diff options
498 files changed, 38034 insertions, 10893 deletions
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index e41b2d59ca7f..f591ab782dbc 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt @@ -47,6 +47,9 @@ Optional properties: Valid values are between 0 to 7, that maps to 273, 589, 899, 1222, 1480, 1806, 2147, 2464 ps Default value is 2, which corresponds to 899 ps +- rxlos-gpios: Input gpio from SFP+ module to indicate availability of + incoming signal. + Example: menetclk: menetclk { diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt index 30d487597ecb..fb40891ee606 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt +++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt @@ -6,9 +6,13 @@ Required properties: - reg: addresses and length of the register sets for the device, must be 6 pairs of register addresses and lengths - interrupts: interrupts for the devices, must be two interrupts +- #address-cells: must be 1, see dsa/dsa.txt +- #size-cells: must be 0, see dsa/dsa.txt + +Deprecated binding required properties: + - dsa,mii-bus: phandle to the MDIO bus controller, see dsa/dsa.txt - dsa,ethernet: phandle to the CPU network interface controller, see dsa/dsa.txt -- #size-cells: must be 0 - #address-cells: must be 2, see dsa/dsa.txt Subnodes: @@ -48,6 +52,45 @@ switch_top@f0b00000 { ethernet_switch@0 { compatible = "brcm,bcm7445-switch-v4.0"; #size-cells = <0>; + #address-cells = <1>; + reg = <0x0 0x40000 + 0x40000 0x110 + 0x40340 0x30 + 0x40380 0x30 + 0x40400 0x34 + 0x40600 0x208>; + reg-names = "core", "reg", intrl2_0", "intrl2_1", + "fcb, "acb"; + interrupts = <0 0x18 0 + 0 0x19 0>; + brcm,num-gphy = <1>; + brcm,num-rgmii-ports = <2>; + brcm,fcb-pause-override; + brcm,acb-packets-inflight; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + label = "gphy"; + reg = <0>; + }; + }; + }; +}; + +Example using the old DSA DeviceTree binding: + +switch_top@f0b00000 { + compatible = "simple-bus"; + #size-cells = <1>; + #address-cells = <1>; + ranges = <0 0xf0b00000 0x40804>; + + ethernet_switch@0 { + compatible = "brcm,bcm7445-switch-v4.0"; + #size-cells = <0>; #address-cells = <2>; reg = <0x0 0x40000 0x40000 0x110 diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index b5a42df4c928..1506e948610c 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -21,6 +21,7 @@ Required properties: - clock-names: Tuple listing input clock names. Required elements: 'pclk', 'hclk' Optional elements: 'tx_clk' + Optional elements: 'rx_clk' applies to cdns,zynqmp-gem - clocks: Phandles to input clocks. Optional properties for PHY child node: diff --git a/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt b/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt new file mode 100644 index 000000000000..038dda48b8e6 --- /dev/null +++ b/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt @@ -0,0 +1,35 @@ +XILINX GMIITORGMII Converter Driver Device Tree Bindings +-------------------------------------------------------- + +The Gigabit Media Independent Interface (GMII) to Reduced Gigabit Media +Independent Interface (RGMII) core provides the RGMII between RGMII-compliant +Ethernet physical media devices (PHY) and the Gigabit Ethernet controller. +This core can be used in all three modes of operation(10/100/1000 Mb/s). +The Management Data Input/Output (MDIO) interface is used to configure the +Speed of operation. This core can switch dynamically between the three +Different speed modes by configuring the conveter register through mdio write. + +This converter sits between the ethernet MAC and the external phy. +MAC <==> GMII2RGMII <==> RGMII_PHY + +For more details about mdio please refer phy.txt file in the same directory. + +Required properties: +- compatible : Should be "xlnx,gmii-to-rgmii-1.0" +- reg : The ID number for the phy, usually a small integer +- phy-handle : Should point to the external phy device. + See ethernet.txt file in the same directory. + +Example: + mdio { + #address-cells = <1>; + #size-cells = <0>; + phy: ethernet-phy@0 { + ...... + }; + gmiitorgmii: gmiitorgmii@8 { + compatible = "xlnx,gmii-to-rgmii-1.0"; + reg = <8>; + phy-handle = <&phy>; + }; + }; diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 415154a487d0..a7697783ac4c 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -74,6 +74,8 @@ dns_resolver.txt - The DNS resolver module allows kernel servies to make DNS queries. driver.txt - Softnet driver issues. +ena.txt + - info on Amazon's Elastic Network Adapter (ENA) e100.txt - info on Intel's EtherExpress PRO/100 line of 10/100 boards e1000.txt diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt index 1b5e7a7f2185..8a8d3d96f6c6 100644 --- a/Documentation/networking/batman-adv.txt +++ b/Documentation/networking/batman-adv.txt @@ -43,10 +43,15 @@ new interfaces to verify the compatibility. There is no need to reload the module if you plug your USB wifi adapter into your ma- chine after batman advanced was initially loaded. -To activate a given interface simply write "bat0" into its -"mesh_iface" file inside the batman_adv subfolder: +The batman-adv soft-interface can be created using the iproute2 +tool "ip" -# echo bat0 > /sys/class/net/eth0/batman_adv/mesh_iface +# ip link add name bat0 type batadv + +To activate a given interface simply attach it to the "bat0" +interface + +# ip link set dev eth0 master bat0 Repeat this step for all interfaces you wish to add. Now batman starts using/broadcasting on this/these interface(s). @@ -56,10 +61,10 @@ By reading the "iface_status" file you can check its status: # cat /sys/class/net/eth0/batman_adv/iface_status # active -To deactivate an interface you have to write "none" into its -"mesh_iface" file: +To deactivate an interface you have to detach it from the +"bat0" interface: -# echo none > /sys/class/net/eth0/batman_adv/mesh_iface +# ip link set dev eth0 nomaster All mesh wide settings can be found in batman's own interface diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index f20c884c048a..a4e55c76d371 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -227,9 +227,9 @@ to address individual switches in the tree. dsa_switch: structure describing a switch device in the tree, referencing a dsa_switch_tree as a backpointer, slave network devices, master network device, -and a reference to the backing dsa_switch_driver +and a reference to the backing dsa_switch_ops -dsa_switch_driver: structure referencing function pointers, see below for a full +dsa_switch_ops: structure referencing function pointers, see below for a full description. Design limitations @@ -357,10 +357,10 @@ regular HWMON devices in /sys/class/hwmon/. Driver development ================== -DSA switch drivers need to implement a dsa_switch_driver structure which will +DSA switch drivers need to implement a dsa_switch_ops structure which will contain the various members described below. -register_switch_driver() registers this dsa_switch_driver in its internal list +register_switch_driver() registers this dsa_switch_ops in its internal list of drivers to probe for. unregister_switch_driver() does the exact opposite. Unless requested differently by setting the priv_size member accordingly, DSA @@ -379,7 +379,7 @@ Switch configuration buses, return a non-NULL string - setup: setup function for the switch, this function is responsible for setting - up the dsa_switch_driver private structure with all it needs: register maps, + up the dsa_switch_ops private structure with all it needs: register maps, interrupts, mutexes, locks etc.. This function is also expected to properly configure the switch to separate all network interfaces from each other, that is, they should be isolated by the switch hardware itself, typically by creating diff --git a/Documentation/networking/ena.txt b/Documentation/networking/ena.txt new file mode 100644 index 000000000000..2b4b6f57e549 --- /dev/null +++ b/Documentation/networking/ena.txt @@ -0,0 +1,305 @@ +Linux kernel driver for Elastic Network Adapter (ENA) family: +============================================================= + +Overview: +========= +ENA is a networking interface designed to make good use of modern CPU +features and system architectures. + +The ENA device exposes a lightweight management interface with a +minimal set of memory mapped registers and extendable command set +through an Admin Queue. + +The driver supports a range of ENA devices, is link-speed independent +(i.e., the same driver is used for 10GbE, 25GbE, 40GbE, etc.), and has +a negotiated and extendable feature set. + +Some ENA devices support SR-IOV. This driver is used for both the +SR-IOV Physical Function (PF) and Virtual Function (VF) devices. + +ENA devices enable high speed and low overhead network traffic +processing by providing multiple Tx/Rx queue pairs (the maximum number +is advertised by the device via the Admin Queue), a dedicated MSI-X +interrupt vector per Tx/Rx queue pair, adaptive interrupt moderation, +and CPU cacheline optimized data placement. + +The ENA driver supports industry standard TCP/IP offload features such +as checksum offload and TCP transmit segmentation offload (TSO). +Receive-side scaling (RSS) is supported for multi-core scaling. + +The ENA driver and its corresponding devices implement health +monitoring mechanisms such as watchdog, enabling the device and driver +to recover in a manner transparent to the application, as well as +debug logs. + +Some of the ENA devices support a working mode called Low-latency +Queue (LLQ), which saves several more microseconds. + +Supported PCI vendor ID/device IDs: +=================================== +1d0f:0ec2 - ENA PF +1d0f:1ec2 - ENA PF with LLQ support +1d0f:ec20 - ENA VF +1d0f:ec21 - ENA VF with LLQ support + +ENA Source Code Directory Structure: +==================================== +ena_com.[ch] - Management communication layer. This layer is + responsible for the handling all the management + (admin) communication between the device and the + driver. +ena_eth_com.[ch] - Tx/Rx data path. +ena_admin_defs.h - Definition of ENA management interface. +ena_eth_io_defs.h - Definition of ENA data path interface. +ena_common_defs.h - Common definitions for ena_com layer. +ena_regs_defs.h - Definition of ENA PCI memory-mapped (MMIO) registers. +ena_netdev.[ch] - Main Linux kernel driver. +ena_syfsfs.[ch] - Sysfs files. +ena_ethtool.c - ethtool callbacks. +ena_pci_id_tbl.h - Supported device IDs. + +Management Interface: +===================== +ENA management interface is exposed by means of: +- PCIe Configuration Space +- Device Registers +- Admin Queue (AQ) and Admin Completion Queue (ACQ) +- Asynchronous Event Notification Queue (AENQ) + +ENA device MMIO Registers are accessed only during driver +initialization and are not involved in further normal device +operation. + +AQ is used for submitting management commands, and the +results/responses are reported asynchronously through ACQ. + +ENA introduces a very small set of management commands with room for +vendor-specific extensions. Most of the management operations are +framed in a generic Get/Set feature command. + +The following admin queue commands are supported: +- Create I/O submission queue +- Create I/O completion queue +- Destroy I/O submission queue +- Destroy I/O completion queue +- Get feature +- Set feature +- Configure AENQ +- Get statistics + +Refer to ena_admin_defs.h for the list of supported Get/Set Feature +properties. + +The Asynchronous Event Notification Queue (AENQ) is a uni-directional +queue used by the ENA device to send to the driver events that cannot +be reported using ACQ. AENQ events are subdivided into groups. Each +group may have multiple syndromes, as shown below + +The events are: + Group Syndrome + Link state change - X - + Fatal error - X - + Notification Suspend traffic + Notification Resume traffic + Keep-Alive - X - + +ACQ and AENQ share the same MSI-X vector. + +Keep-Alive is a special mechanism that allows monitoring of the +device's health. The driver maintains a watchdog (WD) handler which, +if fired, logs the current state and statistics then resets and +restarts the ENA device and driver. A Keep-Alive event is delivered by +the device every second. The driver re-arms the WD upon reception of a +Keep-Alive event. A missed Keep-Alive event causes the WD handler to +fire. + +Data Path Interface: +==================== +I/O operations are based on Tx and Rx Submission Queues (Tx SQ and Rx +SQ correspondingly). Each SQ has a completion queue (CQ) associated +with it. + +The SQs and CQs are implemented as descriptor rings in contiguous +physical memory. + +The ENA driver supports two Queue Operation modes for Tx SQs: +- Regular mode + * In this mode the Tx SQs reside in the host's memory. The ENA + device fetches the ENA Tx descriptors and packet data from host + memory. +- Low Latency Queue (LLQ) mode or "push-mode". + * In this mode the driver pushes the transmit descriptors and the + first 128 bytes of the packet directly to the ENA device memory + space. The rest of the packet payload is fetched by the + device. For this operation mode, the driver uses a dedicated PCI + device memory BAR, which is mapped with write-combine capability. + +The Rx SQs support only the regular mode. + +Note: Not all ENA devices support LLQ, and this feature is negotiated + with the device upon initialization. If the ENA device does not + support LLQ mode, the driver falls back to the regular mode. + +The driver supports multi-queue for both Tx and Rx. This has various +benefits: +- Reduced CPU/thread/process contention on a given Ethernet interface. +- Cache miss rate on completion is reduced, particularly for data + cache lines that hold the sk_buff structures. +- Increased process-level parallelism when handling received packets. +- Increased data cache hit rate, by steering kernel processing of + packets to the CPU, where the application thread consuming the + packet is running. +- In hardware interrupt re-direction. + +Interrupt Modes: +================ +The driver assigns a single MSI-X vector per queue pair (for both Tx +and Rx directions). The driver assigns an additional dedicated MSI-X vector +for management (for ACQ and AENQ). + +Management interrupt registration is performed when the Linux kernel +probes the adapter, and it is de-registered when the adapter is +removed. I/O queue interrupt registration is performed when the Linux +interface of the adapter is opened, and it is de-registered when the +interface is closed. + +The management interrupt is named: + ena-mgmnt@pci:<PCI domain:bus:slot.function> +and for each queue pair, an interrupt is named: + <interface name>-Tx-Rx-<queue index> + +The ENA device operates in auto-mask and auto-clear interrupt +modes. That is, once MSI-X is delivered to the host, its Cause bit is +automatically cleared and the interrupt is masked. The interrupt is +unmasked by the driver after NAPI processing is complete. + +Interrupt Moderation: +===================== +ENA driver and device can operate in conventional or adaptive interrupt +moderation mode. + +In conventional mode the driver instructs device to postpone interrupt +posting according to static interrupt delay value. The interrupt delay +value can be configured through ethtool(8). The following ethtool +parameters are supported by the driver: tx-usecs, rx-usecs + +In adaptive interrupt moderation mode the interrupt delay value is +updated by the driver dynamically and adjusted every NAPI cycle +according to the traffic nature. + +By default ENA driver applies adaptive coalescing on Rx traffic and +conventional coalescing on Tx traffic. + +Adaptive coalescing can be switched on/off through ethtool(8) +adaptive_rx on|off parameter. + +The driver chooses interrupt delay value according to the number of +bytes and packets received between interrupt unmasking and interrupt +posting. The driver uses interrupt delay table that subdivides the +range of received bytes/packets into 5 levels and assigns interrupt +delay value to each level. + +The user can enable/disable adaptive moderation, modify the interrupt +delay table and restore its default values through sysfs. + +The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK +and can be configured by the ETHTOOL_STUNABLE command of the +SIOCETHTOOL ioctl. + +SKB: +The driver-allocated SKB for frames received from Rx handling using +NAPI context. The allocation method depends on the size of the packet. +If the frame length is larger than rx_copybreak, napi_get_frags() +is used, otherwise netdev_alloc_skb_ip_align() is used, the buffer +content is copied (by CPU) to the SKB, and the buffer is recycled. + +Statistics: +=========== +The user can obtain ENA device and driver statistics using ethtool. +The driver can collect regular or extended statistics (including +per-queue stats) from the device. + +In addition the driver logs the stats to syslog upon device reset. + +MTU: +==== +The driver supports an arbitrarily large MTU with a maximum that is +negotiated with the device. The driver configures MTU using the +SetFeature command (ENA_ADMIN_MTU property). The user can change MTU +via ip(8) and similar legacy tools. + +Stateless Offloads: +=================== +The ENA driver supports: +- TSO over IPv4/IPv6 +- TSO with ECN +- IPv4 header checksum offload +- TCP/UDP over IPv4/IPv6 checksum offloads + +RSS: +==== +- The ENA device supports RSS that allows flexible Rx traffic + steering. +- Toeplitz and CRC32 hash functions are supported. +- Different combinations of L2/L3/L4 fields can be configured as + inputs for hash functions. +- The driver configures RSS settings using the AQ SetFeature command + (ENA_ADMIN_RSS_HASH_FUNCTION, ENA_ADMIN_RSS_HASH_INPUT and + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG properties). +- If the NETIF_F_RXHASH flag is set, the 32-bit result of the hash + function delivered in the Rx CQ descriptor is set in the received + SKB. +- The user can provide a hash key, hash function, and configure the + indirection table through ethtool(8). + +DATA PATH: +========== +Tx: +--- +end_start_xmit() is called by the stack. This function does the following: +- Maps data buffers (skb->data and frags). +- Populates ena_buf for the push buffer (if the driver and device are + in push mode.) +- Prepares ENA bufs for the remaining frags. +- Allocates a new request ID from the empty req_id ring. The request + ID is the index of the packet in the Tx info. This is used for + out-of-order TX completions. +- Adds the packet to the proper place in the Tx ring. +- Calls ena_com_prepare_tx(), an ENA communication layer that converts + the ena_bufs to ENA descriptors (and adds meta ENA descriptors as + needed.) + * This function also copies the ENA descriptors and the push buffer + to the Device memory space (if in push mode.) +- Writes doorbell to the ENA device. +- When the ENA device finishes sending the packet, a completion + interrupt is raised. +- The interrupt handler schedules NAPI. +- The ena_clean_tx_irq() function is called. This function handles the + completion descriptors generated by the ENA, with a single + completion descriptor per completed packet. + * req_id is retrieved from the completion descriptor. The tx_info of + the packet is retrieved via the req_id. The data buffers are + unmapped and req_id is returned to the empty req_id ring. + * The function stops when the completion descriptors are completed or + the budget is reached. + +Rx: +--- +- When a packet is received from the ENA device. +- The interrupt handler schedules NAPI. +- The ena_clean_rx_irq() function is called. This function calls + ena_rx_pkt(), an ENA communication layer function, which returns the + number of descriptors used for a new unhandled packet, and zero if + no new packet is found. +- Then it calls the ena_clean_rx_irq() function. +- ena_eth_rx_skb() checks packet length: + * If the packet is small (len < rx_copybreak), the driver allocates + a SKB for the new packet, and copies the packet payload into the + SKB data buffer. + - In this way the original data buffer is not passed to the stack + and is reused for future Rx packets. + * Otherwise the function unmaps the Rx buffer, then allocates the + new SKB structure and hooks the Rx buffer to the SKB frags. +- The new SKB is updated with the necessary information (protocol, + checksum hw verify result, etc.), and then passed to the network + stack, using the NAPI interface function napi_gro_receive(). diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 9ae929395b24..3db8c67d2c8d 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -575,32 +575,33 @@ tcp_syncookies - BOOLEAN unconditionally generation of syncookies. tcp_fastopen - INTEGER - Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data - in the opening SYN packet. To use this feature, the client application - must use sendmsg() or sendto() with MSG_FASTOPEN flag rather than - connect() to perform a TCP handshake automatically. + Enable TCP Fast Open (RFC7413) to send and accept data in the opening + SYN packet. - The values (bitmap) are - 1: Enables sending data in the opening SYN on the client w/ MSG_FASTOPEN. - 2: Enables TCP Fast Open on the server side, i.e., allowing data in - a SYN packet to be accepted and passed to the application before - 3-way hand shake finishes. - 4: Send data in the opening SYN regardless of cookie availability and - without a cookie option. - 0x100: Accept SYN data w/o validating the cookie. - 0x200: Accept data-in-SYN w/o any cookie option present. - 0x400/0x800: Enable Fast Open on all listeners regardless of the - TCP_FASTOPEN socket option. The two different flags designate two - different ways of setting max_qlen without the TCP_FASTOPEN socket - option. + The client support is enabled by flag 0x1 (on by default). The client + then must use sendmsg() or sendto() with the MSG_FASTOPEN flag, + rather than connect() to send data in SYN. - Default: 1 + The server support is enabled by flag 0x2 (off by default). Then + either enable for all listeners with another flag (0x400) or + enable individual listeners via TCP_FASTOPEN socket option with + the option value being the length of the syn-data backlog. - Note that the client & server side Fast Open flags (1 and 2 - respectively) must be also enabled before the rest of flags can take - effect. + The values (bitmap) are + 0x1: (client) enables sending data in the opening SYN on the client. + 0x2: (server) enables the server support, i.e., allowing data in + a SYN packet to be accepted and passed to the + application before 3-way handshake finishes. + 0x4: (client) send data in the opening SYN regardless of cookie + availability and without a cookie option. + 0x200: (server) accept data-in-SYN w/o any cookie option present. + 0x400: (server) enable all listeners to support Fast Open by + default without explicit TCP_FASTOPEN socket option. + + Default: 0x1 - See include/net/tcp.h and the code for more details. + Note that that additional client or server features are only + effective if the basic support (0x1 and 0x2) are enabled respectively. tcp_syn_retries - INTEGER Number of times initial SYNs for an active TCP connection attempt diff --git a/Documentation/networking/strparser.txt b/Documentation/networking/strparser.txt new file mode 100644 index 000000000000..a0bf573dfa61 --- /dev/null +++ b/Documentation/networking/strparser.txt @@ -0,0 +1,136 @@ +Stream Parser +------------- + +The stream parser (strparser) is a utility that parses messages of an +application layer protocol running over a TCP connection. The stream +parser works in conjunction with an upper layer in the kernel to provide +kernel support for application layer messages. For instance, Kernel +Connection Multiplexor (KCM) uses the Stream Parser to parse messages +using a BPF program. + +Interface +--------- + +The API includes a context structure, a set of callbacks, utility +functions, and a data_ready function. The callbacks include +a parse_msg function that is called to perform parsing (e.g. +BPF parsing in case of KCM), and a rcv_msg function that is called +when a full message has been completed. + +A stream parser can be instantiated for a TCP connection. This is done +by: + +strp_init(struct strparser *strp, struct sock *csk, + struct strp_callbacks *cb) + +strp is a struct of type strparser that is allocated by the upper layer. +csk is the TCP socket associated with the stream parser. Callbacks are +called by the stream parser. + +Callbacks +--------- + +There are four callbacks: + +int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); + + parse_msg is called to determine the length of the next message + in the stream. The upper layer must implement this function. It + should parse the sk_buff as containing the headers for the + next application layer messages in the stream. + + The skb->cb in the input skb is a struct strp_rx_msg. Only + the offset field is relevant in parse_msg and gives the offset + where the message starts in the skb. + + The return values of this function are: + + >0 : indicates length of successfully parsed message + 0 : indicates more data must be received to parse the message + -ESTRPIPE : current message should not be processed by the + kernel, return control of the socket to userspace which + can proceed to read the messages itself + other < 0 : Error is parsing, give control back to userspace + assuming that synchronization is lost and the stream + is unrecoverable (application expected to close TCP socket) + + In the case that an error is returned (return value is less than + zero) the stream parser will set the error on TCP socket and wake + it up. If parse_msg returned -ESTRPIPE and the stream parser had + previously read some bytes for the current message, then the error + set on the attached socket is ENODATA since the stream is + unrecoverable in that case. + +void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); + + rcv_msg is called when a full message has been received and + is queued. The callee must consume the sk_buff; it can + call strp_pause to prevent any further messages from being + received in rcv_msg (see strp_pause below). This callback + must be set. + + The skb->cb in the input skb is a struct strp_rx_msg. This + struct contains two fields: offset and full_len. Offset is + where the message starts in the skb, and full_len is the + the length of the message. skb->len - offset may be greater + then full_len since strparser does not trim the skb. + +int (*read_sock_done)(struct strparser *strp, int err); + + read_sock_done is called when the stream parser is done reading + the TCP socket. The stream parser may read multiple messages + in a loop and this function allows cleanup to occur when existing + the loop. If the callback is not set (NULL in strp_init) a + default function is used. + +void (*abort_parser)(struct strparser *strp, int err); + + This function is called when stream parser encounters an error + in parsing. The default function stops the stream parser for the + TCP socket and sets the error in the socket. The default function + can be changed by setting the callback to non-NULL in strp_init. + +Functions +--------- + +The upper layer calls strp_tcp_data_ready when data is ready on the lower +socket for strparser to process. This should be called from a data_ready +callback that is set on the socket. + +strp_stop is called to completely stop stream parser operations. This +is called internally when the stream parser encounters an error, and +it is called from the upper layer when unattaching a TCP socket. + +strp_done is called to unattach the stream parser from the TCP socket. +This must be called after the stream processor has be stopped. + +strp_check_rcv is called to check for new messages on the socket. This +is normally called at initialization of the a stream parser instance +of after strp_unpause. + +Statistics +---------- + +Various counters are kept for each stream parser for a TCP socket. +These are in the strp_stats structure. strp_aggr_stats is a convenience +structure for accumulating statistics for multiple stream parser +instances. save_strp_stats and aggregate_strp_stats are helper functions +to save and aggregate statistics. + +Message assembly limits +----------------------- + +The stream parser provide mechanisms to limit the resources consumed by +message assembly. + +A timer is set when assembly starts for a new message. The message +timeout is taken from rcvtime for the associated TCP socket. If the +timer fires before assembly completes the stream parser is aborted +and the ETIMEDOUT error is set on the TCP socket. + +Message length is limited to the receive buffer size of the associated +TCP socket. If the length returned by parse_msg is greater than +the socket buffer size then the stream parser is aborted with +EMSGSIZE error set on the TCP socket. Note that this makes the +maximum size of receive skbuffs for a socket with a stream parser +to be 2*sk_rcvbuf of the TCP socket. diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 31c39115834d..44235e83799b 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -283,15 +283,10 @@ be sent to the port netdev for processing by the bridge driver. The bridge should not reflood the packet to the same ports the device flooded, otherwise there will be duplicate packets on the wire. -To avoid duplicate packets, the device/driver should mark a packet as already -forwarded using skb->offload_fwd_mark. The same mark is set on the device -ports in the domain using dev->offload_fwd_mark. If the skb->offload_fwd_mark -is non-zero and matches the forwarding egress port's dev->skb_mark, the kernel -will drop the skb right before transmit on the egress port, with the -understanding that the device already forwarded the packet on same egress port. -The driver can use switchdev_port_fwd_mark_set() to set a globally unique mark -for port's dev->offload_fwd_mark, based on the port's parent ID (switch ID) and -a group ifindex. +To avoid duplicate packets, the switch driver should mark a packet as already +forwarded by setting the skb->offload_fwd_mark bit. The bridge driver will mark +the skb using the ingress bridge port's mark and prevent it from being forwarded +through any bridge port with the same mark. It is possible for the switch device to not handle flooding and push the packets up to the bridge driver for flooding. This is not ideal as the number diff --git a/MAINTAINERS b/MAINTAINERS index 71aa5daeae8f..58b50296e0ee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -636,6 +636,15 @@ F: drivers/tty/serial/altera_jtaguart.c F: include/linux/altera_uart.h F: include/linux/altera_jtaguart.h +AMAZON ETHERNET DRIVERS +M: Netanel Belgazal <netanel@annapurnalabs.com> +R: Saeed Bishara <saeed@annapurnalabs.com> +R: Zorik Machulsky <zorik@annapurnalabs.com> +L: netdev@vger.kernel.org +S: Supported +F: Documentation/networking/ena.txt +F: drivers/net/ethernet/amazon/ + AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER M: Tom Lendacky <thomas.lendacky@amd.com> M: Gary Hook <gary.hook@amd.com> @@ -9942,6 +9951,7 @@ F: net/rfkill/ RHASHTABLE M: Thomas Graf <tgraf@suug.ch> +M: Herbert Xu <herbert@gondor.apana.org.au> L: netdev@vger.kernel.org S: Maintained F: lib/rhashtable.c diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts index b7fb5d9295c2..32a961c5e98a 100644 --- a/arch/arm64/boot/dts/apm/apm-mustang.dts +++ b/arch/arm64/boot/dts/apm/apm-mustang.dts @@ -74,6 +74,7 @@ &xgenet { status = "ok"; + rxlos-gpios = <&sbgpio 12 1>; }; &mmc0 { diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1c2c713f9b0..d5c3435324e8 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -923,7 +923,7 @@ /* mac address will be overwritten by the bootloader */ local-mac-address = [00 00 00 00 00 00]; phy-connection-type = "rgmii"; - phy-handle = <&menet0phy>,<&menetphy>; + phy-handle = <&menetphy>,<&menet0phy>; mdio { compatible = "apm,xgene-mdio"; #address-cells = <1>; diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 75dde903b238..81aaa505862c 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2489,7 +2489,7 @@ static int fore200e_load_and_start_fw(struct fore200e *fore200e) { const struct firmware *firmware; struct device *device; - struct fw_header *fw_header; + const struct fw_header *fw_header; const __le32 *fw_data; u32 fw_size; u32 __iomem *load_addr; @@ -2511,9 +2511,9 @@ static int fore200e_load_and_start_fw(struct fore200e *fore200e) return err; } - fw_data = (__le32 *) firmware->data; + fw_data = (const __le32 *)firmware->data; fw_size = firmware->size / sizeof(u32); - fw_header = (struct fw_header *) firmware->data; + fw_header = (const struct fw_header *)firmware->data; load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1af94e2d1a25..9b035b7d7f4f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -550,4 +550,6 @@ config CRYPTO_DEV_ROCKCHIP This driver interfaces with the hardware crypto accelerator. Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. +source "drivers/crypto/chelsio/Kconfig" + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 3c6432dd09d9..ad7250fa1348 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -31,3 +31,4 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ +obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig new file mode 100644 index 000000000000..4ce67fb9a880 --- /dev/null +++ b/drivers/crypto/chelsio/Kconfig @@ -0,0 +1,19 @@ +config CRYPTO_DEV_CHELSIO + tristate "Chelsio Crypto Co-processor Driver" + depends on CHELSIO_T4 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + ---help--- + The Chelsio Crypto Co-processor driver for T6 adapters. + + For general information about Chelsio and our products, visit + our website at <http://www.chelsio.com>. + + For customer support, please visit our customer support page at + <http://www.chelsio.com/support.html>. + + Please send feedback to <linux-bugs@chelsio.com>. + + To compile this driver as a module, choose M here: the module + will be called chcr. diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile new file mode 100644 index 000000000000..bebdf06687ad --- /dev/null +++ b/drivers/crypto/chelsio/Makefile @@ -0,0 +1,4 @@ +ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 + +obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o +chcr-objs := chcr_core.o chcr_algo.o diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c new file mode 100644 index 000000000000..e4ddb921d7b3 --- /dev/null +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -0,0 +1,1525 @@ +/* + * This file is part of the Chelsio T6 Crypto driver for Linux. + * + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written and Maintained by: + * Manoj Malviya (manojmalviya@chelsio.com) + * Atul Gupta (atul.gupta@chelsio.com) + * Jitendra Lulla (jlulla@chelsio.com) + * Yeshaswi M R Gowda (yeshaswi@chelsio.com) + * Harsh Jain (harsh@chelsio.com) + */ + +#define pr_fmt(fmt) "chcr:" fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <linux/skbuff.h> +#include <linux/rtnetlink.h> +#include <linux/highmem.h> +#include <linux/scatterlist.h> + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/hash.h> +#include <crypto/sha.h> +#include <crypto/internal/hash.h> + +#include "t4fw_api.h" +#include "t4_msg.h" +#include "chcr_core.h" +#include "chcr_algo.h" +#include "chcr_crypto.h" + +static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) +{ + return ctx->crypto_ctx->ablkctx; +} + +static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) +{ + return ctx->crypto_ctx->hmacctx; +} + +static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) +{ + return ctx->dev->u_ctx; +} + +static inline int is_ofld_imm(const struct sk_buff *skb) +{ + return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN); +} + +/* + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/* + * chcr_handle_resp - Unmap the DMA buffers associated with the request + * @req: crypto request + */ +int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, + int error_status) +{ + struct crypto_tfm *tfm = req->tfm; + struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct chcr_req_ctx ctx_req; + struct cpl_fw6_pld *fw6_pld; + unsigned int digestsize, updated_digestsize; + + switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_BLKCIPHER: + ctx_req.req.ablk_req = (struct ablkcipher_request *)req; + ctx_req.ctx.ablk_ctx = + ablkcipher_request_ctx(ctx_req.req.ablk_req); + if (!error_status) { + fw6_pld = (struct cpl_fw6_pld *)input; + memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], + AES_BLOCK_SIZE); + } + dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, + ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE); + if (ctx_req.ctx.ablk_ctx->skb) { + kfree_skb(ctx_req.ctx.ablk_ctx->skb); + ctx_req.ctx.ablk_ctx->skb = NULL; + } + break; + + case CRYPTO_ALG_TYPE_AHASH: + ctx_req.req.ahash_req = (struct ahash_request *)req; + ctx_req.ctx.ahash_ctx = + ahash_request_ctx(ctx_req.req.ahash_req); + digestsize = + crypto_ahash_digestsize(crypto_ahash_reqtfm( + ctx_req.req.ahash_req)); + updated_digestsize = digestsize; + if (digestsize == SHA224_DIGEST_SIZE) + updated_digestsize = SHA256_DIGEST_SIZE; + else if (digestsize == SHA384_DIGEST_SIZE) + updated_digestsize = SHA512_DIGEST_SIZE; + if (ctx_req.ctx.ahash_ctx->skb) + ctx_req.ctx.ahash_ctx->skb = NULL; + if (ctx_req.ctx.ahash_ctx->result == 1) { + ctx_req.ctx.ahash_ctx->result = 0; + memcpy(ctx_req.req.ahash_req->result, input + + sizeof(struct cpl_fw6_pld), + digestsize); + } else { + memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input + + sizeof(struct cpl_fw6_pld), + updated_digestsize); + } + kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr); + ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL; + break; + } + return 0; +} + +/* + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + + flits = skb_transport_offset(skb) / 8; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb_tail_pointer(skb) != skb_transport_header(skb)) + cnt++; + return flits + sgl_len(cnt); +} + +static struct shash_desc *chcr_alloc_shash(unsigned int ds) +{ + struct crypto_shash *base_hash = NULL; + struct shash_desc *desc; + + switch (ds) { + case SHA1_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha1-generic", 0, 0); + break; + case SHA224_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha224-generic", 0, 0); + break; + case SHA256_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha256-generic", 0, 0); + break; + case SHA384_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha384-generic", 0, 0); + break; + case SHA512_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha512-generic", 0, 0); + break; + } + if (IS_ERR(base_hash)) { + pr_err("Can not allocate sha-generic algo.\n"); + return (void *)base_hash; + } + + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash), + GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + desc->tfm = base_hash; + desc->flags = crypto_shash_get_flags(base_hash); + return desc; +} + +static int chcr_compute_partial_hash(struct shash_desc *desc, + char *iopad, char *result_hash, + int digest_size) +{ + struct sha1_state sha1_st; + struct sha256_state sha256_st; + struct sha512_state sha512_st; + int error; + + if (digest_size == SHA1_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha1_st); + memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); + } else if (digest_size == SHA224_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha256_st); + memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); + + } else if (digest_size == SHA256_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha256_st); + memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); + + } else if (digest_size == SHA384_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha512_st); + memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); + + } else if (digest_size == SHA512_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha512_st); + memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); + } else { + error = -EINVAL; + pr_err("Unknown digest size %d\n", digest_size); + } + return error; +} + +static void chcr_change_order(char *buf, int ds) +{ + int i; + + if (ds == SHA512_DIGEST_SIZE) { + for (i = 0; i < (ds / sizeof(u64)); i++) + *((__be64 *)buf + i) = + cpu_to_be64(*((u64 *)buf + i)); + } else { + for (i = 0; i < (ds / sizeof(u32)); i++) + *((__be32 *)buf + i) = + cpu_to_be32(*((u32 *)buf + i)); + } +} + +static inline int is_hmac(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct chcr_alg_template *chcr_crypto_alg = + container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, + alg.hash); + if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) == + CRYPTO_ALG_SUB_TYPE_HASH_HMAC) + return 1; + return 0; +} + +static inline unsigned int ch_nents(struct scatterlist *sg, + unsigned int *total_size) +{ + unsigned int nents; + + for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) { + nents++; + *total_size += sg->length; + } + return nents; +} + +static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, + struct scatterlist *sg, + struct phys_sge_parm *sg_param) +{ + struct phys_sge_pairs *to; + unsigned int out_buf_size = sg_param->obsize; + unsigned int nents = sg_param->nents, i, j, tot_len = 0; + + phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) + | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); + phys_cpl->pcirlxorder_to_noofsgentr = + htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | + CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | + CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | + CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | + CPL_RX_PHYS_DSGL_DCAID_V(0) | + CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents)); + phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; + phys_cpl->rss_hdr_int.qid = htons(sg_param->qid); + phys_cpl->rss_hdr_int.hash_val = 0; + to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + + sizeof(struct cpl_rx_phys_dsgl)); + + for (i = 0; nents; to++) { + for (j = i; (nents && (j < (8 + i))); j++, nents--) { + to->len[j] = htons(sg->length); + to->addr[j] = cpu_to_be64(sg_dma_address(sg)); + if (out_buf_size) { + if (tot_len + sg_dma_len(sg) >= out_buf_size) { + to->len[j] = htons(out_buf_size - + tot_len); + return; + } + tot_len += sg_dma_len(sg); + } + sg = sg_next(sg); + } + } +} + +static inline unsigned +int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl, + struct scatterlist *sg, struct phys_sge_parm *sg_param) +{ + if (!sg || !sg_param->nents) + return 0; + + sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); + if (sg_param->nents == 0) { + pr_err("CHCR : DMA mapping failed\n"); + return -EINVAL; + } + write_phys_cpl(phys_cpl, sg, sg_param); + return 0; +} + +static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct chcr_alg_template *chcr_crypto_alg = + container_of(alg, struct chcr_alg_template, alg.crypto); + + return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; +} + +static inline void +write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags, + struct scatterlist *sg, unsigned int count) +{ + struct page *spage; + unsigned int page_len; + + skb->len += count; + skb->data_len += count; + skb->truesize += count; + while (count > 0) { + if (sg && (!(sg->length))) + break; + spage = sg_page(sg); + get_page(spage); + page_len = min(sg->length, count); + skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len); + (*frags)++; + count -= page_len; + sg = sg_next(sg); + } +} + +static int generate_copy_rrkey(struct ablk_ctx *ablkctx, + struct _key_ctx *key_ctx) +{ + if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { + get_aes_decrypt_key(key_ctx->key, ablkctx->key, + ablkctx->enckey_len << 3); + memset(key_ctx->key + ablkctx->enckey_len, 0, + CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len); + } else { + memcpy(key_ctx->key, + ablkctx->key + (ablkctx->enckey_len >> 1), + ablkctx->enckey_len >> 1); + get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1), + ablkctx->key, ablkctx->enckey_len << 2); + } + return 0; +} + +static inline void create_wreq(struct chcr_context *ctx, + struct fw_crypto_lookaside_wr *wreq, + void *req, struct sk_buff *skb, + int kctx_len, int hash_sz, + unsigned int phys_dsgl) +{ + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1); + struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1); + int iv_loc = IV_DSGL; + int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id]; + unsigned int immdatalen = 0, nr_frags = 0; + + if (is_ofld_imm(skb)) { + immdatalen = skb->data_len; + iv_loc = IV_IMMEDIATE; + } else { + nr_frags = skb_shinfo(skb)->nr_frags; + } + + wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen, + (kctx_len >> 4)); + wreq->pld_size_hash_size = + htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) | + FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); + wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP( + (calc_tx_flits_ofld(skb) * 8), 16))); + wreq->cookie = cpu_to_be64((uintptr_t)req); + wreq->rx_chid_to_rx_q_id = + FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid, + (hash_sz) ? IV_NOP : iv_loc); + + ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id); + ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8), + 16) - ((sizeof(*wreq)) >> 4))); + + sc_imm->cmd_more = FILL_CMD_MORE(immdatalen); + sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len + + ((hash_sz) ? DUMMY_BYTES : + (sizeof(struct cpl_rx_phys_dsgl) + + phys_dsgl)) + immdatalen); +} + +/** + * create_cipher_wr - form the WR for cipher operations + * @req: cipher req. + * @ctx: crypto driver context of the request. + * @qid: ingress qid where response of this WR should be received. + * @op_type: encryption or decryption + */ +static struct sk_buff +*create_cipher_wr(struct crypto_async_request *req_base, + struct chcr_context *ctx, unsigned short qid, + unsigned short op_type) +{ + struct ablkcipher_request *req = (struct ablkcipher_request *)req_base; + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); + struct sk_buff *skb = NULL; + struct _key_ctx *key_ctx; + struct fw_crypto_lookaside_wr *wreq; + struct cpl_tx_sec_pdu *sec_cpl; + struct cpl_rx_phys_dsgl *phys_cpl; + struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req); + struct phys_sge_parm sg_param; + unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0; + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len; + + if (!req->info) + return ERR_PTR(-EINVAL); + ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize); + ablkctx->enc = op_type; + + if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || + (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) + return ERR_PTR(-EINVAL); + + phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents); + + kctx_len = sizeof(*key_ctx) + + (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); + transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); + skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), + GFP_ATOMIC); + if (!skb) + return ERR_PTR(-ENOMEM); + skb_reserve(skb, sizeof(struct sge_opaque_hdr)); + wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len); + + sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET); + sec_cpl->op_ivinsrtofst = + FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1); + + sec_cpl->pldlen = htonl(ivsize + req->nbytes); + sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, + ivsize + 1, 0); + + sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0, + 0, 0); + sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0, + ablkctx->ciph_mode, + 0, 0, ivsize >> 1, 1); + sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, + 0, 1, phys_dsgl); + + key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl)); + key_ctx->ctx_hdr = ablkctx->key_ctx_hdr; + if (op_type == CHCR_DECRYPT_OP) { + if (generate_copy_rrkey(ablkctx, key_ctx)) + goto map_fail1; + } else { + if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { + memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len); + } else { + memcpy(key_ctx->key, ablkctx->key + + (ablkctx->enckey_len >> 1), + ablkctx->enckey_len >> 1); + memcpy(key_ctx->key + + (ablkctx->enckey_len >> 1), + ablkctx->key, + ablkctx->enckey_len >> 1); + } + } + phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len); + + memcpy(ablkctx->iv, req->info, ivsize); + sg_init_table(&ablkctx->iv_sg, 1); + sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize); + sg_param.nents = ablkctx->dst_nents; + sg_param.obsize = dst_bufsize; + sg_param.qid = qid; + sg_param.align = 1; + if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst, + &sg_param)) + goto map_fail1; + + skb_set_transport_header(skb, transhdr_len); + write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize); + write_sg_data_page_desc(skb, &frags, req->src, req->nbytes); + create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl); + req_ctx->skb = skb; + skb_get(skb); + return skb; +map_fail1: + kfree_skb(skb); + return ERR_PTR(-ENOMEM); +} + +static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); + struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm); + unsigned int ck_size, context_size; + u16 alignment = 0; + + if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize)) + goto badkey_err; + + memcpy(ablkctx->key, key, keylen); + ablkctx->enckey_len = keylen; + if (keylen == AES_KEYSIZE_128) { + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; + } else if (keylen == AES_KEYSIZE_192) { + alignment = 8; + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; + } else if (keylen == AES_KEYSIZE_256) { + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; + } else { + goto badkey_err; + } + + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + + keylen + alignment) >> 4; + + ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, + 0, 0, context_size); + ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; + return 0; +badkey_err: + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + ablkctx->enckey_len = 0; + return -EINVAL; +} + +static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) +{ + int ret = 0; + struct sge_ofld_txq *q; + struct adapter *adap = netdev2adap(dev); + + local_bh_disable(); + q = &adap->sge.ofldtxq[idx]; + spin_lock(&q->sendq.lock); + if (q->full) + ret = -1; + spin_unlock(&q->sendq.lock); + local_bh_enable(); + return ret; +} + +static int chcr_aes_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); + struct crypto_async_request *req_base = &req->base; + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct sk_buff *skb; + + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + skb = create_cipher_wr(req_base, ctx, + u_ctx->lldi.rxq_ids[ctx->tx_channel_id], + CHCR_ENCRYPT_OP); + if (IS_ERR(skb)) { + pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); + return PTR_ERR(skb); + } + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + return -EINPROGRESS; +} + +static int chcr_aes_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); + struct crypto_async_request *req_base = &req->base; + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct sk_buff *skb; + + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0], + CHCR_DECRYPT_OP); + if (IS_ERR(skb)) { + pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); + return PTR_ERR(skb); + } + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + return -EINPROGRESS; +} + +static int chcr_device_init(struct chcr_context *ctx) +{ + struct uld_ctx *u_ctx; + unsigned int id; + int err = 0, rxq_perchan, rxq_idx; + + id = smp_processor_id(); + if (!ctx->dev) { + err = assign_chcr_device(&ctx->dev); + if (err) { + pr_err("chcr device assignment fails\n"); + goto out; + } + u_ctx = ULD_CTX(ctx); + rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; + ctx->dev->tx_channel_id = 0; + rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; + rxq_idx += id % rxq_perchan; + spin_lock(&ctx->dev->lock_chcr_dev); + ctx->tx_channel_id = rxq_idx; + spin_unlock(&ctx->dev->lock_chcr_dev); + } +out: + return err; +} + +static int chcr_cra_init(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); + return chcr_device_init(crypto_tfm_ctx(tfm)); +} + +static int get_alg_config(struct algo_param *params, + unsigned int auth_size) +{ + switch (auth_size) { + case SHA1_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; + params->result_size = SHA1_DIGEST_SIZE; + break; + case SHA224_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224; + params->result_size = SHA256_DIGEST_SIZE; + break; + case SHA256_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; + params->result_size = SHA256_DIGEST_SIZE; + break; + case SHA384_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; + params->result_size = SHA512_DIGEST_SIZE; + break; + case SHA512_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; + params->result_size = SHA512_DIGEST_SIZE; + break; + default: + pr_err("chcr : ERROR, unsupported digest size\n"); + return -EINVAL; + } + return 0; +} + +static inline int +write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx, + struct sk_buff *skb, unsigned int *frags, char *bfr, + u8 bfr_len) +{ + void *page_ptr = NULL; + + skb->len += bfr_len; + skb->data_len += bfr_len; + skb->truesize += bfr_len; + page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA); + if (!page_ptr) + return -ENOMEM; + get_page(virt_to_page(page_ptr)); + req_ctx->dummy_payload_ptr = page_ptr; + memcpy(page_ptr, bfr, bfr_len); + skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr), + offset_in_page(page_ptr), bfr_len); + (*frags)++; + return 0; +} + +/** + * create_final_hash_wr - Create hash work request + * @req - Cipher req base + */ +static struct sk_buff *create_final_hash_wr(struct ahash_request *req, + struct hash_wr_param *param) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + struct sk_buff *skb = NULL; + struct _key_ctx *key_ctx; + struct fw_crypto_lookaside_wr *wreq; + struct cpl_tx_sec_pdu *sec_cpl; + unsigned int frags = 0, transhdr_len, iopad_alignment = 0; + unsigned int digestsize = crypto_ahash_digestsize(tfm); + unsigned int kctx_len = sizeof(*key_ctx); + u8 hash_size_in_response = 0; + + iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); + kctx_len += param->alg_prm.result_size + iopad_alignment; + if (param->opad_needed) + kctx_len += param->alg_prm.result_size + iopad_alignment; + + if (req_ctx->result) + hash_size_in_response = digestsize; + else + hash_size_in_response = param->alg_prm.result_size; + transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); + skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), + GFP_ATOMIC); + if (!skb) + return skb; + + skb_reserve(skb, sizeof(struct sge_opaque_hdr)); + wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len); + memset(wreq, 0, transhdr_len); + + sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET); + sec_cpl->op_ivinsrtofst = + FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0); + sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len); + + sec_cpl->aadstart_cipherstop_hi = + FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); + sec_cpl->cipherstop_lo_authinsert = + FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); + sec_cpl->seqno_numivs = + FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, + param->opad_needed, 0, 0); + + sec_cpl->ivgen_hdrlen = + FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); + + key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl)); + memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size); + + if (param->opad_needed) + memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 : + CHCR_HASH_MAX_DIGEST_SIZE), + hmacctx->opad, param->alg_prm.result_size); + + key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, + param->alg_prm.mk_size, 0, + param->opad_needed, + (kctx_len >> 4)); + sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1); + + skb_set_transport_header(skb, transhdr_len); + if (param->bfr_len != 0) + write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr, + param->bfr_len); + if (param->sg_len != 0) + write_sg_data_page_desc(skb, &frags, req->src, param->sg_len); + + create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response, + 0); + req_ctx->skb = skb; + skb_get(skb); + return skb; +} + +static int chcr_ahash_update(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct uld_ctx *u_ctx = NULL; + struct sk_buff *skb; + u8 remainder = 0, bs; + unsigned int nbytes = req->nbytes; + struct hash_wr_param params; + + bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + + u_ctx = ULD_CTX(ctx); + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + if (nbytes + req_ctx->bfr_len >= bs) { + remainder = (nbytes + req_ctx->bfr_len) % bs; + nbytes = nbytes + req_ctx->bfr_len - remainder; + } else { + sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr + + req_ctx->bfr_len, nbytes, 0); + req_ctx->bfr_len += nbytes; + return 0; + } + + params.opad_needed = 0; + params.more = 1; + params.last = 0; + params.sg_len = nbytes - req_ctx->bfr_len; + params.bfr_len = req_ctx->bfr_len; + params.scmd1 = 0; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + req_ctx->result = 0; + req_ctx->data_len += params.sg_len + params.bfr_len; + skb = create_final_hash_wr(req, ¶ms); + if (!skb) + return -ENOMEM; + + req_ctx->bfr_len = remainder; + if (remainder) + sg_pcopy_to_buffer(req->src, sg_nents(req->src), + req_ctx->bfr, remainder, req->nbytes - + remainder); + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + + return -EINPROGRESS; +} + +static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) +{ + memset(bfr_ptr, 0, bs); + *bfr_ptr = 0x80; + if (bs == 64) + *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3); + else + *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3); +} + +static int chcr_ahash_final(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct hash_wr_param params; + struct sk_buff *skb; + struct uld_ctx *u_ctx = NULL; + u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + + u_ctx = ULD_CTX(ctx); + if (is_hmac(crypto_ahash_tfm(rtfm))) + params.opad_needed = 1; + else + params.opad_needed = 0; + params.sg_len = 0; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + req_ctx->result = 1; + params.bfr_len = req_ctx->bfr_len; + req_ctx->data_len += params.bfr_len + params.sg_len; + if (req_ctx->bfr && (req_ctx->bfr_len == 0)) { + create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len); + params.last = 0; + params.more = 1; + params.scmd1 = 0; + params.bfr_len = bs; + + } else { + params.scmd1 = req_ctx->data_len; + params.last = 1; + params.more = 0; + } + skb = create_final_hash_wr(req, ¶ms); + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + return -EINPROGRESS; +} + +static int chcr_ahash_finup(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct uld_ctx *u_ctx = NULL; + struct sk_buff *skb; + struct hash_wr_param params; + u8 bs; + + bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + u_ctx = ULD_CTX(ctx); + + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + if (is_hmac(crypto_ahash_tfm(rtfm))) + params.opad_needed = 1; + else + params.opad_needed = 0; + + params.sg_len = req->nbytes; + params.bfr_len = req_ctx->bfr_len; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + req_ctx->data_len += params.bfr_len + params.sg_len; + req_ctx->result = 1; + if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) { + create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len); + params.last = 0; + params.more = 1; + params.scmd1 = 0; + params.bfr_len = bs; + } else { + params.scmd1 = req_ctx->data_len; + params.last = 1; + params.more = 0; + } + + skb = create_final_hash_wr(req, ¶ms); + if (!skb) + return -ENOMEM; + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + + return -EINPROGRESS; +} + +static int chcr_ahash_digest(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct uld_ctx *u_ctx = NULL; + struct sk_buff *skb; + struct hash_wr_param params; + u8 bs; + + rtfm->init(req); + bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + + u_ctx = ULD_CTX(ctx); + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + if (is_hmac(crypto_ahash_tfm(rtfm))) + params.opad_needed = 1; + else + params.opad_needed = 0; + + params.last = 0; + params.more = 0; + params.sg_len = req->nbytes; + params.bfr_len = 0; + params.scmd1 = 0; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + req_ctx->result = 1; + req_ctx->data_len += params.bfr_len + params.sg_len; + + if (req_ctx->bfr && req->nbytes == 0) { + create_last_hash_block(req_ctx->bfr, bs, 0); + params.more = 1; + params.bfr_len = bs; + } + + skb = create_final_hash_wr(req, ¶ms); + if (!skb) + return -ENOMEM; + + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + return -EINPROGRESS; +} + +static int chcr_ahash_export(struct ahash_request *areq, void *out) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct chcr_ahash_req_ctx *state = out; + + state->bfr_len = req_ctx->bfr_len; + state->data_len = req_ctx->data_len; + memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128); + memcpy(state->partial_hash, req_ctx->partial_hash, + CHCR_HASH_MAX_DIGEST_SIZE); + return 0; +} + +static int chcr_ahash_import(struct ahash_request *areq, const void *in) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; + + req_ctx->bfr_len = state->bfr_len; + req_ctx->data_len = state->data_len; + req_ctx->dummy_payload_ptr = NULL; + memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128); + memcpy(req_ctx->partial_hash, state->partial_hash, + CHCR_HASH_MAX_DIGEST_SIZE); + return 0; +} + +static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + unsigned int digestsize = crypto_ahash_digestsize(tfm); + unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + unsigned int i, err = 0, updated_digestsize; + + /* + * use the key to calculate the ipad and opad. ipad will sent with the + * first request's data. opad will be sent with the final hash result + * ipad in hmacctx->ipad and opad in hmacctx->opad location + */ + if (!hmacctx->desc) + return -EINVAL; + if (keylen > bs) { + err = crypto_shash_digest(hmacctx->desc, key, keylen, + hmacctx->ipad); + if (err) + goto out; + keylen = digestsize; + } else { + memcpy(hmacctx->ipad, key, keylen); + } + memset(hmacctx->ipad + keylen, 0, bs - keylen); + memcpy(hmacctx->opad, hmacctx->ipad, bs); + + for (i = 0; i < bs / sizeof(int); i++) { + *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; + *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; + } + + updated_digestsize = digestsize; + if (digestsize == SHA224_DIGEST_SIZE) + updated_digestsize = SHA256_DIGEST_SIZE; + else if (digestsize == SHA384_DIGEST_SIZE) + updated_digestsize = SHA512_DIGEST_SIZE; + err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad, + hmacctx->ipad, digestsize); + if (err) + goto out; + chcr_change_order(hmacctx->ipad, updated_digestsize); + + err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad, + hmacctx->opad, digestsize); + if (err) + goto out; + chcr_change_order(hmacctx->opad, updated_digestsize); +out: + return err; +} + +static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); + int status = 0; + unsigned short context_size = 0; + + if ((key_len == (AES_KEYSIZE_128 << 1)) || + (key_len == (AES_KEYSIZE_256 << 1))) { + memcpy(ablkctx->key, key, key_len); + ablkctx->enckey_len = key_len; + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; + ablkctx->key_ctx_hdr = + FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? + CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : + CHCR_KEYCTX_CIPHER_KEY_SIZE_256, + CHCR_KEYCTX_NO_KEY, 1, + 0, context_size); + ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; + } else { + crypto_tfm_set_flags((struct crypto_tfm *)tfm, + CRYPTO_TFM_RES_BAD_KEY_LEN); + ablkctx->enckey_len = 0; + status = -EINVAL; + } + return status; +} + +static int chcr_sha_init(struct ahash_request *areq) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + int digestsize = crypto_ahash_digestsize(tfm); + + req_ctx->data_len = 0; + req_ctx->dummy_payload_ptr = NULL; + req_ctx->bfr_len = 0; + req_ctx->skb = NULL; + req_ctx->result = 0; + copy_hash_init_values(req_ctx->partial_hash, digestsize); + return 0; +} + +static int chcr_sha_cra_init(struct crypto_tfm *tfm) +{ + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct chcr_ahash_req_ctx)); + return chcr_device_init(crypto_tfm_ctx(tfm)); +} + +static int chcr_hmac_init(struct ahash_request *areq) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + unsigned int digestsize = crypto_ahash_digestsize(rtfm); + unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + + chcr_sha_init(areq); + req_ctx->data_len = bs; + if (is_hmac(crypto_ahash_tfm(rtfm))) { + if (digestsize == SHA224_DIGEST_SIZE) + memcpy(req_ctx->partial_hash, hmacctx->ipad, + SHA256_DIGEST_SIZE); + else if (digestsize == SHA384_DIGEST_SIZE) + memcpy(req_ctx->partial_hash, hmacctx->ipad, + SHA512_DIGEST_SIZE); + else + memcpy(req_ctx->partial_hash, hmacctx->ipad, + digestsize); + } + return 0; +} + +static int chcr_hmac_cra_init(struct crypto_tfm *tfm) +{ + struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + unsigned int digestsize = + crypto_ahash_digestsize(__crypto_ahash_cast(tfm)); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct chcr_ahash_req_ctx)); + hmacctx->desc = chcr_alloc_shash(digestsize); + if (IS_ERR(hmacctx->desc)) + return PTR_ERR(hmacctx->desc); + return chcr_device_init(crypto_tfm_ctx(tfm)); +} + +static void chcr_free_shash(struct shash_desc *desc) +{ + crypto_free_shash(desc->tfm); + kfree(desc); +} + +static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) +{ + struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + + if (hmacctx->desc) { + chcr_free_shash(hmacctx->desc); + hmacctx->desc = NULL; + } +} + +static struct chcr_alg_template driver_algs[] = { + /* AES-CBC */ + { + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .is_registered = 0, + .alg.crypto = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc(aes-chcr)", + .cra_priority = CHCR_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct chcr_context) + + sizeof(struct ablk_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = chcr_cra_init, + .cra_exit = NULL, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = chcr_aes_cbc_setkey, + .encrypt = chcr_aes_encrypt, + .decrypt = chcr_aes_decrypt, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .is_registered = 0, + .alg.crypto = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts(aes-chcr)", + .cra_priority = CHCR_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct chcr_context) + + sizeof(struct ablk_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = chcr_cra_init, + .cra_exit = NULL, + .cra_u = { + .ablkcipher = { + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = chcr_aes_xts_setkey, + .encrypt = chcr_aes_encrypt, + .decrypt = chcr_aes_decrypt, + } + } + } + }, + /* SHA */ + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-chcr", + .cra_blocksize = SHA1_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-chcr", + .cra_blocksize = SHA256_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-chcr", + .cra_blocksize = SHA224_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha384", + .cra_driver_name = "sha384-chcr", + .cra_blocksize = SHA384_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-chcr", + .cra_blocksize = SHA512_BLOCK_SIZE, + } + } + }, + /* HMAC */ + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac(sha1-chcr)", + .cra_blocksize = SHA1_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "hmac(sha224-chcr)", + .cra_blocksize = SHA224_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac(sha256-chcr)", + .cra_blocksize = SHA256_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "hmac(sha384-chcr)", + .cra_blocksize = SHA384_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "hmac(sha512-chcr)", + .cra_blocksize = SHA512_BLOCK_SIZE, + } + } + }, +}; + +/* + * chcr_unregister_alg - Deregister crypto algorithms with + * kernel framework. + */ +static int chcr_unregister_alg(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + if (driver_algs[i].is_registered) + crypto_unregister_alg( + &driver_algs[i].alg.crypto); + break; + case CRYPTO_ALG_TYPE_AHASH: + if (driver_algs[i].is_registered) + crypto_unregister_ahash( + &driver_algs[i].alg.hash); + break; + } + driver_algs[i].is_registered = 0; + } + return 0; +} + +#define SZ_AHASH_CTX sizeof(struct chcr_context) +#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx)) +#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx) +#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC) + +/* + * chcr_register_alg - Register crypto algorithms with kernel framework. + */ +static int chcr_register_alg(void) +{ + struct crypto_alg ai; + struct ahash_alg *a_hash; + int err = 0, i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + if (driver_algs[i].is_registered) + continue; + switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + err = crypto_register_alg(&driver_algs[i].alg.crypto); + name = driver_algs[i].alg.crypto.cra_driver_name; + break; + case CRYPTO_ALG_TYPE_AHASH: + a_hash = &driver_algs[i].alg.hash; + a_hash->update = chcr_ahash_update; + a_hash->final = chcr_ahash_final; + a_hash->finup = chcr_ahash_finup; + a_hash->digest = chcr_ahash_digest; + a_hash->export = chcr_ahash_export; + a_hash->import = chcr_ahash_import; + a_hash->halg.statesize = SZ_AHASH_REQ_CTX; + a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; + a_hash->halg.base.cra_module = THIS_MODULE; + a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS; + a_hash->halg.base.cra_alignmask = 0; + a_hash->halg.base.cra_exit = NULL; + a_hash->halg.base.cra_type = &crypto_ahash_type; + + if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { + a_hash->halg.base.cra_init = chcr_hmac_cra_init; + a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; + a_hash->init = chcr_hmac_init; + a_hash->setkey = chcr_ahash_setkey; + a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; + } else { + a_hash->init = chcr_sha_init; + a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX; + a_hash->halg.base.cra_init = chcr_sha_cra_init; + } + err = crypto_register_ahash(&driver_algs[i].alg.hash); + ai = driver_algs[i].alg.hash.halg.base; + name = ai.cra_driver_name; + break; + } + if (err) { + pr_err("chcr : %s : Algorithm registration failed\n", + name); + goto register_err; + } else { + driver_algs[i].is_registered = 1; + } + } + return 0; + +register_err: + chcr_unregister_alg(); + return err; +} + +/* + * start_crypto - Register the crypto algorithms. + * This should called once when the first device comesup. After this + * kernel will start calling driver APIs for crypto operations. + */ +int start_crypto(void) +{ + return chcr_register_alg(); +} + +/* + * stop_crypto - Deregister all the crypto algorithms with kernel. + * This should be called once when the last device goes down. After this + * kernel will not call the driver API for crypto operations. + */ +int stop_crypto(void) +{ + chcr_unregister_alg(); + return 0; +} diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h new file mode 100644 index 000000000000..ec64fbcdeb49 --- /dev/null +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -0,0 +1,471 @@ +/* + * This file is part of the Chelsio T6 Crypto driver for Linux. + * + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __CHCR_ALGO_H__ +#define __CHCR_ALGO_H__ + +/* Crypto key context */ +#define KEY_CONTEXT_CTX_LEN_S 24 +#define KEY_CONTEXT_CTX_LEN_M 0xff +#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S) +#define KEY_CONTEXT_CTX_LEN_G(x) \ + (((x) >> KEY_CONTEXT_CTX_LEN_S) & KEY_CONTEXT_CTX_LEN_M) + +#define KEY_CONTEXT_DUAL_CK_S 12 +#define KEY_CONTEXT_DUAL_CK_M 0x1 +#define KEY_CONTEXT_DUAL_CK_V(x) ((x) << KEY_CONTEXT_DUAL_CK_S) +#define KEY_CONTEXT_DUAL_CK_G(x) \ +(((x) >> KEY_CONTEXT_DUAL_CK_S) & KEY_CONTEXT_DUAL_CK_M) +#define KEY_CONTEXT_DUAL_CK_F KEY_CONTEXT_DUAL_CK_V(1U) + +#define KEY_CONTEXT_SALT_PRESENT_S 10 +#define KEY_CONTEXT_SALT_PRESENT_M 0x1 +#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S) +#define KEY_CONTEXT_SALT_PRESENT_G(x) \ + (((x) >> KEY_CONTEXT_SALT_PRESENT_S) & \ + KEY_CONTEXT_SALT_PRESENT_M) +#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U) + +#define KEY_CONTEXT_VALID_S 0 +#define KEY_CONTEXT_VALID_M 0x1 +#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S) +#define KEY_CONTEXT_VALID_G(x) \ + (((x) >> KEY_CONTEXT_VALID_S) & \ + KEY_CONTEXT_VALID_M) +#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U) + +#define KEY_CONTEXT_CK_SIZE_S 6 +#define KEY_CONTEXT_CK_SIZE_M 0xf +#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S) +#define KEY_CONTEXT_CK_SIZE_G(x) \ + (((x) >> KEY_CONTEXT_CK_SIZE_S) & KEY_CONTEXT_CK_SIZE_M) + +#define KEY_CONTEXT_MK_SIZE_S 2 +#define KEY_CONTEXT_MK_SIZE_M 0xf +#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S) +#define KEY_CONTEXT_MK_SIZE_G(x) \ + (((x) >> KEY_CONTEXT_MK_SIZE_S) & KEY_CONTEXT_MK_SIZE_M) + +#define KEY_CONTEXT_OPAD_PRESENT_S 11 +#define KEY_CONTEXT_OPAD_PRESENT_M 0x1 +#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S) +#define KEY_CONTEXT_OPAD_PRESENT_G(x) \ + (((x) >> KEY_CONTEXT_OPAD_PRESENT_S) & \ + KEY_CONTEXT_OPAD_PRESENT_M) +#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U) + +#define CHCR_HASH_MAX_DIGEST_SIZE 64 +#define CHCR_MAX_SHA_DIGEST_SIZE 64 + +#define IPSEC_TRUNCATED_ICV_SIZE 12 +#define TLS_TRUNCATED_HMAC_SIZE 10 +#define CBCMAC_DIGEST_SIZE 16 +#define MAX_HASH_NAME 20 + +#define SHA1_INIT_STATE_5X4B 5 +#define SHA256_INIT_STATE_8X4B 8 +#define SHA512_INIT_STATE_8X8B 8 +#define SHA1_INIT_STATE SHA1_INIT_STATE_5X4B +#define SHA224_INIT_STATE SHA256_INIT_STATE_8X4B +#define SHA256_INIT_STATE SHA256_INIT_STATE_8X4B +#define SHA384_INIT_STATE SHA512_INIT_STATE_8X8B +#define SHA512_INIT_STATE SHA512_INIT_STATE_8X8B + +#define DUMMY_BYTES 16 + +#define IPAD_DATA 0x36363636 +#define OPAD_DATA 0x5c5c5c5c + +#define TRANSHDR_SIZE(alignedkctx_len)\ + (sizeof(struct ulptx_idata) +\ + sizeof(struct ulp_txpkt) +\ + sizeof(struct fw_crypto_lookaside_wr) +\ + sizeof(struct cpl_tx_sec_pdu) +\ + (alignedkctx_len)) +#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \ + (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\ + sizeof(struct cpl_rx_phys_dsgl)) +#define HASH_TRANSHDR_SIZE(alignedkctx_len)\ + (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES) + +#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \ + sizeof(struct ulp_txpkt) + \ + sizeof(struct ulptx_idata)) + +#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \ + htonl( \ + CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \ + CPL_TX_SEC_PDU_RXCHID_V((id)) | \ + CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \ + CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \ + CPL_TX_SEC_PDU_CPLLEN_V((len)) | \ + CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \ + CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst))) + +#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \ + htonl( \ + CPL_TX_SEC_PDU_AADSTART_V((a_start)) | \ + CPL_TX_SEC_PDU_AADSTOP_V((a_stop)) | \ + CPL_TX_SEC_PDU_CIPHERSTART_V((c_start)) | \ + CPL_TX_SEC_PDU_CIPHERSTOP_HI_V((c_stop_hi))) + +#define FILL_SEC_CPL_AUTHINSERT(c_stop_lo, a_start, a_stop, a_inst) \ + htonl( \ + CPL_TX_SEC_PDU_CIPHERSTOP_LO_V((c_stop_lo)) | \ + CPL_TX_SEC_PDU_AUTHSTART_V((a_start)) | \ + CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \ + CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst))) + +#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \ + htonl( \ + SCMD_SEQ_NO_CTRL_V(0) | \ + SCMD_STATUS_PRESENT_V(0) | \ + SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) | \ + SCMD_ENC_DEC_CTRL_V((ctrl)) | \ + SCMD_CIPH_AUTH_SEQ_CTRL_V((seq)) | \ + SCMD_CIPH_MODE_V((cmode)) | \ + SCMD_AUTH_MODE_V((amode)) | \ + SCMD_HMAC_CTRL_V((opad)) | \ + SCMD_IV_SIZE_V((size)) | \ + SCMD_NUM_IVS_V((nivs))) + +#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \ + SCMD_ENB_DBGID_V(0) | \ + SCMD_IV_GEN_CTRL_V(0) | \ + SCMD_LAST_FRAG_V((last)) | \ + SCMD_MORE_FRAGS_V((more)) | \ + SCMD_TLS_COMPPDU_V(0) | \ + SCMD_KEY_CTX_INLINE_V((ctx_in)) | \ + SCMD_TLS_FRAG_ENABLE_V(0) | \ + SCMD_MAC_ONLY_V((mac)) | \ + SCMD_AADIVDROP_V((ivdrop)) | \ + SCMD_HDR_LEN_V((len))) + +#define FILL_KEY_CTX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \ + htonl(KEY_CONTEXT_VALID_V(1) | \ + KEY_CONTEXT_CK_SIZE_V((ck_size)) | \ + KEY_CONTEXT_MK_SIZE_V(mk_size) | \ + KEY_CONTEXT_DUAL_CK_V((d_ck)) | \ + KEY_CONTEXT_OPAD_PRESENT_V((opad)) | \ + KEY_CONTEXT_SALT_PRESENT_V(1) | \ + KEY_CONTEXT_CTX_LEN_V((ctx_len))) + +#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \ + htonl( \ + FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \ + FW_CRYPTO_LOOKASIDE_WR) | \ + FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \ + FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len))) + +#define FILL_WR_RX_Q_ID(cid, qid, wr_iv) \ + htonl( \ + FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \ + FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \ + FW_CRYPTO_LOOKASIDE_WR_LCB_V(0) | \ + FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv))) + +#define FILL_ULPTX_CMD_DEST(cid) \ + htonl(ULPTX_CMD_V(ULP_TX_PKT) | \ + ULP_TXPKT_DEST_V(0) | \ + ULP_TXPKT_DATAMODIFY_V(0) | \ + ULP_TXPKT_CHANNELID_V((cid)) | \ + ULP_TXPKT_RO_V(1) | \ + ULP_TXPKT_FID_V(0)) + +#define KEYCTX_ALIGN_PAD(bs) ({unsigned int _bs = (bs);\ + _bs == SHA1_DIGEST_SIZE ? 12 : 0; }) + +#define FILL_PLD_SIZE_HASH_SIZE(payload_sgl_len, sgl_lengths, total_frags) \ + htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(payload_sgl_len ? \ + sgl_lengths[total_frags] : 0) |\ + FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(0)) + +#define FILL_LEN_PKD(calc_tx_flits_ofld, skb) \ + htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP((\ + calc_tx_flits_ofld(skb) * 8), 16))) + +#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\ + ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1)) + +#define MAX_NK 8 +#define CRYPTO_MAX_IMM_TX_PKT_LEN 256 + +struct algo_param { + unsigned int auth_mode; + unsigned int mk_size; + unsigned int result_size; +}; + +struct hash_wr_param { + unsigned int opad_needed; + unsigned int more; + unsigned int last; + struct algo_param alg_prm; + unsigned int sg_len; + unsigned int bfr_len; + u64 scmd1; +}; + +enum { + AES_KEYLENGTH_128BIT = 128, + AES_KEYLENGTH_192BIT = 192, + AES_KEYLENGTH_256BIT = 256 +}; + +enum { + KEYLENGTH_3BYTES = 3, + KEYLENGTH_4BYTES = 4, + KEYLENGTH_6BYTES = 6, + KEYLENGTH_8BYTES = 8 +}; + +enum { + NUMBER_OF_ROUNDS_10 = 10, + NUMBER_OF_ROUNDS_12 = 12, + NUMBER_OF_ROUNDS_14 = 14, +}; + +/* + * CCM defines values of 4, 6, 8, 10, 12, 14, and 16 octets, + * where they indicate the size of the integrity check value (ICV) + */ +enum { + AES_CCM_ICV_4 = 4, + AES_CCM_ICV_6 = 6, + AES_CCM_ICV_8 = 8, + AES_CCM_ICV_10 = 10, + AES_CCM_ICV_12 = 12, + AES_CCM_ICV_14 = 14, + AES_CCM_ICV_16 = 16 +}; + +struct hash_op_params { + unsigned char mk_size; + unsigned char pad_align; + unsigned char auth_mode; + char hash_name[MAX_HASH_NAME]; + unsigned short block_size; + unsigned short word_size; + unsigned short ipad_size; +}; + +struct phys_sge_pairs { + __be16 len[8]; + __be64 addr[8]; +}; + +struct phys_sge_parm { + unsigned int nents; + unsigned int obsize; + unsigned short qid; + unsigned char align; +}; + +struct crypto_result { + struct completion completion; + int err; +}; + +static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, +}; + +static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { + SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, +}; + +static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, +}; + +static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = { + SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3, + SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7, +}; + +static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = { + SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3, + SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7, +}; + +static inline void copy_hash_init_values(char *key, int digestsize) +{ + u8 i; + __be32 *dkey = (__be32 *)key; + u64 *ldkey = (u64 *)key; + __be64 *sha384 = (__be64 *)sha384_init; + __be64 *sha512 = (__be64 *)sha512_init; + + switch (digestsize) { + case SHA1_DIGEST_SIZE: + for (i = 0; i < SHA1_INIT_STATE; i++) + dkey[i] = cpu_to_be32(sha1_init[i]); + break; + case SHA224_DIGEST_SIZE: + for (i = 0; i < SHA224_INIT_STATE; i++) + dkey[i] = cpu_to_be32(sha224_init[i]); + break; + case SHA256_DIGEST_SIZE: + for (i = 0; i < SHA256_INIT_STATE; i++) + dkey[i] = cpu_to_be32(sha256_init[i]); + break; + case SHA384_DIGEST_SIZE: + for (i = 0; i < SHA384_INIT_STATE; i++) + ldkey[i] = be64_to_cpu(sha384[i]); + break; + case SHA512_DIGEST_SIZE: + for (i = 0; i < SHA512_INIT_STATE; i++) + ldkey[i] = be64_to_cpu(sha512[i]); + break; + } +} + +static const u8 sgl_lengths[20] = { + 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15 +}; + +/* Number of len fields(8) * size of one addr field */ +#define PHYSDSGL_MAX_LEN_SIZE 16 + +static inline u16 get_space_for_phys_dsgl(unsigned int sgl_entr) +{ + /* len field size + addr field size */ + return ((sgl_entr >> 3) + ((sgl_entr % 8) ? + 1 : 0)) * PHYSDSGL_MAX_LEN_SIZE + + (sgl_entr << 3) + ((sgl_entr % 2 ? 1 : 0) << 3); +} + +/* The AES s-transform matrix (s-box). */ +static const u8 aes_sbox[256] = { + 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, + 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, + 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, + 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7, + 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 90, + 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 32, + 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170, + 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81, + 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, + 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, + 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, + 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 194, + 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78, + 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46, 28, 166, + 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102, + 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225, 248, + 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223, + 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, + 187, 22 +}; + +static u32 aes_ks_subword(const u32 w) +{ + u8 bytes[4]; + + *(u32 *)(&bytes[0]) = w; + bytes[0] = aes_sbox[bytes[0]]; + bytes[1] = aes_sbox[bytes[1]]; + bytes[2] = aes_sbox[bytes[2]]; + bytes[3] = aes_sbox[bytes[3]]; + return *(u32 *)(&bytes[0]); +} + +static u32 round_constant[11] = { + 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000, + 0x1B000000, 0x36000000, 0x6C000000 +}; + +/* dec_key - OUTPUT - Reverse round key + * key - INPUT - key + * keylength - INPUT - length of the key in number of bits + */ +static inline void get_aes_decrypt_key(unsigned char *dec_key, + const unsigned char *key, + unsigned int keylength) +{ + u32 temp; + u32 w_ring[MAX_NK]; + int i, j, k = 0; + u8 nr, nk; + + switch (keylength) { + case AES_KEYLENGTH_128BIT: + nk = KEYLENGTH_4BYTES; + nr = NUMBER_OF_ROUNDS_10; + break; + + case AES_KEYLENGTH_192BIT: + nk = KEYLENGTH_6BYTES; + nr = NUMBER_OF_ROUNDS_12; + break; + case AES_KEYLENGTH_256BIT: + nk = KEYLENGTH_8BYTES; + nr = NUMBER_OF_ROUNDS_14; + break; + default: + return; + } + for (i = 0; i < nk; i++ ) + w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); + + i = 0; + temp = w_ring[nk - 1]; + while(i + nk < (nr + 1) * 4) { + if(!(i % nk)) { + /* RotWord(temp) */ + temp = (temp << 8) | (temp >> 24); + temp = aes_ks_subword(temp); + temp ^= round_constant[i / nk]; + } + else if (nk == 8 && (i % 4 == 0)) + temp = aes_ks_subword(temp); + w_ring[i % nk] ^= temp; + temp = w_ring[i % nk]; + i++; + } + for (k = 0, j = i % nk; k < nk; k++) { + *((u32 *)dec_key + k) = htonl(w_ring[j]); + j--; + if(j < 0) + j += nk; + } +} + +#endif /* __CHCR_ALGO_H__ */ diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c new file mode 100644 index 000000000000..2f6156b672ce --- /dev/null +++ b/drivers/crypto/chelsio/chcr_core.c @@ -0,0 +1,240 @@ +/** + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written and Maintained by: + * Manoj Malviya (manojmalviya@chelsio.com) + * Atul Gupta (atul.gupta@chelsio.com) + * Jitendra Lulla (jlulla@chelsio.com) + * Yeshaswi M R Gowda (yeshaswi@chelsio.com) + * Harsh Jain (harsh@chelsio.com) + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/skbuff.h> + +#include <crypto/aes.h> +#include <crypto/hash.h> + +#include "t4_msg.h" +#include "chcr_core.h" +#include "cxgb4_uld.h" + +static LIST_HEAD(uld_ctx_list); +static DEFINE_MUTEX(dev_mutex); +static atomic_t dev_count; + +typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); +static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); +static void *chcr_uld_add(const struct cxgb4_lld_info *lld); +static int chcr_uld_state_change(void *handle, enum cxgb4_state state); + +static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { + [CPL_FW6_PLD] = cpl_fw6_pld_handler, +}; + +static struct cxgb4_pci_uld_info chcr_uld_info = { + .name = DRV_MODULE_NAME, + .nrxq = 4, + .rxq_size = 1024, + .nciq = 0, + .ciq_size = 0, + .add = chcr_uld_add, + .state_change = chcr_uld_state_change, + .rx_handler = chcr_uld_rx_handler, +}; + +int assign_chcr_device(struct chcr_dev **dev) +{ + struct uld_ctx *u_ctx; + + /* + * Which device to use if multiple devices are available TODO + * May be select the device based on round robin. One session + * must go to the same device to maintain the ordering. + */ + mutex_lock(&dev_mutex); /* TODO ? */ + u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); + if (!u_ctx) { + mutex_unlock(&dev_mutex); + return -ENXIO; + } + + *dev = u_ctx->dev; + mutex_unlock(&dev_mutex); + return 0; +} + +static int chcr_dev_add(struct uld_ctx *u_ctx) +{ + struct chcr_dev *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENXIO; + + spin_lock_init(&dev->lock_chcr_dev); + u_ctx->dev = dev; + dev->u_ctx = u_ctx; + atomic_inc(&dev_count); + return 0; +} + +static int chcr_dev_remove(struct uld_ctx *u_ctx) +{ + kfree(u_ctx->dev); + u_ctx->dev = NULL; + atomic_dec(&dev_count); + return 0; +} + +static int cpl_fw6_pld_handler(struct chcr_dev *dev, + unsigned char *input) +{ + struct crypto_async_request *req; + struct cpl_fw6_pld *fw6_pld; + u32 ack_err_status = 0; + int error_status = 0; + + fw6_pld = (struct cpl_fw6_pld *)input; + req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( + fw6_pld->data[1]); + + ack_err_status = + ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); + if (ack_err_status) { + if (CHK_MAC_ERR_BIT(ack_err_status) || + CHK_PAD_ERR_BIT(ack_err_status)) + error_status = -EINVAL; + } + /* call completion callback with failure status */ + if (req) { + if (!chcr_handle_resp(req, input, error_status)) + req->complete(req, error_status); + else + return -EINVAL; + } else { + pr_err("Incorrect request address from the firmware\n"); + return -EFAULT; + } + return 0; +} + +int chcr_send_wr(struct sk_buff *skb) +{ + return cxgb4_ofld_send(skb->dev, skb); +} + +static void *chcr_uld_add(const struct cxgb4_lld_info *lld) +{ + struct uld_ctx *u_ctx; + + /* Create the device and add it in the device list */ + u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); + if (!u_ctx) { + u_ctx = ERR_PTR(-ENOMEM); + goto out; + } + u_ctx->lldi = *lld; + mutex_lock(&dev_mutex); + list_add_tail(&u_ctx->entry, &uld_ctx_list); + mutex_unlock(&dev_mutex); +out: + return u_ctx; +} + +int chcr_uld_rx_handler(void *handle, const __be64 *rsp, + const struct pkt_gl *pgl) +{ + struct uld_ctx *u_ctx = (struct uld_ctx *)handle; + struct chcr_dev *dev = u_ctx->dev; + const struct cpl_act_establish *rpl = (struct cpl_act_establish + *)rsp; + + if (rpl->ot.opcode != CPL_FW6_PLD) { + pr_err("Unsupported opcode\n"); + return 0; + } + + if (!pgl) + work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]); + else + work_handlers[rpl->ot.opcode](dev, pgl->va); + return 0; +} + +static int chcr_uld_state_change(void *handle, enum cxgb4_state state) +{ + struct uld_ctx *u_ctx = handle; + int ret = 0; + + switch (state) { + case CXGB4_STATE_UP: + if (!u_ctx->dev) { + ret = chcr_dev_add(u_ctx); + if (ret != 0) + return ret; + } + if (atomic_read(&dev_count) == 1) + ret = start_crypto(); + break; + + case CXGB4_STATE_DETACH: + if (u_ctx->dev) { + mutex_lock(&dev_mutex); + chcr_dev_remove(u_ctx); + mutex_unlock(&dev_mutex); + } + if (!atomic_read(&dev_count)) + stop_crypto(); + break; + + case CXGB4_STATE_START_RECOVERY: + case CXGB4_STATE_DOWN: + default: + break; + } + return ret; +} + +static int __init chcr_crypto_init(void) +{ + if (cxgb4_register_pci_uld(CXGB4_PCI_ULD1, &chcr_uld_info)) { + pr_err("ULD register fail: No chcr crypto support in cxgb4"); + return -1; + } + + return 0; +} + +static void __exit chcr_crypto_exit(void) +{ + struct uld_ctx *u_ctx, *tmp; + + if (atomic_read(&dev_count)) + stop_crypto(); + + /* Remove all devices from list */ + mutex_lock(&dev_mutex); + list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { + if (u_ctx->dev) + chcr_dev_remove(u_ctx); + kfree(u_ctx); + } + mutex_unlock(&dev_mutex); + cxgb4_unregister_pci_uld(CXGB4_PCI_ULD1); +} + +module_init(chcr_crypto_init); +module_exit(chcr_crypto_exit); + +MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h new file mode 100644 index 000000000000..2a5c671a4232 --- /dev/null +++ b/drivers/crypto/chelsio/chcr_core.h @@ -0,0 +1,80 @@ +/* + * This file is part of the Chelsio T6 Crypto driver for Linux. + * + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __CHCR_CORE_H__ +#define __CHCR_CORE_H__ + +#include <crypto/algapi.h> +#include "t4_hw.h" +#include "cxgb4.h" +#include "cxgb4_uld.h" + +#define DRV_MODULE_NAME "chcr" +#define DRV_VERSION "1.0.0.0" + +#define MAX_PENDING_REQ_TO_HW 20 +#define CHCR_TEST_RESPONSE_TIMEOUT 1000 + +#define PAD_ERROR_BIT 1 +#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1) + +#define MAC_ERROR_BIT 0 +#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) + +struct uld_ctx; + +struct chcr_dev { + /* Request submited to h/w and waiting for response. */ + spinlock_t lock_chcr_dev; + struct crypto_queue pending_queue; + struct uld_ctx *u_ctx; + unsigned char tx_channel_id; +}; + +struct uld_ctx { + struct list_head entry; + struct cxgb4_lld_info lldi; + struct chcr_dev *dev; +}; + +int assign_chcr_device(struct chcr_dev **dev); +int chcr_send_wr(struct sk_buff *skb); +int start_crypto(void); +int stop_crypto(void); +int chcr_uld_rx_handler(void *handle, const __be64 *rsp, + const struct pkt_gl *pgl); +int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, + int err); +#endif /* __CHCR_CORE_H__ */ diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h new file mode 100644 index 000000000000..d7d75605da8b --- /dev/null +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -0,0 +1,203 @@ +/* + * This file is part of the Chelsio T6 Crypto driver for Linux. + * + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __CHCR_CRYPTO_H__ +#define __CHCR_CRYPTO_H__ + +/* Define following if h/w is not dropping the AAD and IV data before + * giving the processed data + */ + +#define CHCR_CRA_PRIORITY 300 + +#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */ +#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */ + +#define CHCR_MAX_AUTHENC_AES_KEY_LEN 32 /* max aes key length*/ +#define CHCR_MAX_AUTHENC_SHA_KEY_LEN 128 /* max sha key length*/ + +#define CHCR_GIVENCRYPT_OP 2 +/* CPL/SCMD parameters */ + +#define CHCR_ENCRYPT_OP 0 +#define CHCR_DECRYPT_OP 1 + +#define CHCR_SCMD_SEQ_NO_CTRL_32BIT 1 +#define CHCR_SCMD_SEQ_NO_CTRL_48BIT 2 +#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3 + +#define CHCR_SCMD_PROTO_VERSION_GENERIC 4 + +#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0 +#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1 + +#define CHCR_SCMD_CIPHER_MODE_NOP 0 +#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1 +#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4 +#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6 + +#define CHCR_SCMD_AUTH_MODE_NOP 0 +#define CHCR_SCMD_AUTH_MODE_SHA1 1 +#define CHCR_SCMD_AUTH_MODE_SHA224 2 +#define CHCR_SCMD_AUTH_MODE_SHA256 3 +#define CHCR_SCMD_AUTH_MODE_SHA512_224 5 +#define CHCR_SCMD_AUTH_MODE_SHA512_256 6 +#define CHCR_SCMD_AUTH_MODE_SHA512_384 7 +#define CHCR_SCMD_AUTH_MODE_SHA512_512 8 + +#define CHCR_SCMD_HMAC_CTRL_NOP 0 +#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1 + +#define CHCR_SCMD_IVGEN_CTRL_HW 0 +#define CHCR_SCMD_IVGEN_CTRL_SW 1 +/* This are not really mac key size. They are intermediate values + * of sha engine and its size + */ +#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0 +#define CHCR_KEYCTX_MAC_KEY_SIZE_160 1 +#define CHCR_KEYCTX_MAC_KEY_SIZE_192 2 +#define CHCR_KEYCTX_MAC_KEY_SIZE_256 3 +#define CHCR_KEYCTX_MAC_KEY_SIZE_512 4 +#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0 +#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192 1 +#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256 2 +#define CHCR_KEYCTX_NO_KEY 15 + +#define CHCR_CPL_FW4_PLD_IV_OFFSET (5 * 64) /* bytes. flt #5 and #6 */ +#define CHCR_CPL_FW4_PLD_HASH_RESULT_OFFSET (7 * 64) /* bytes. flt #7 */ +#define CHCR_CPL_FW4_PLD_DATA_SIZE (4 * 64) /* bytes. flt #4 to #7 */ + +#define KEY_CONTEXT_HDR_SALT_AND_PAD 16 +#define flits_to_bytes(x) (x * 8) + +#define IV_NOP 0 +#define IV_IMMEDIATE 1 +#define IV_DSGL 2 + +#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000 +#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000 +#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ + CRYPTO_ALG_SUB_TYPE_HASH_HMAC) + +#define MAX_SALT 4 +#define MAX_SCRATCH_PAD_SIZE 32 + +#define CHCR_HASH_MAX_BLOCK_SIZE_64 64 +#define CHCR_HASH_MAX_BLOCK_SIZE_128 128 + +/* Aligned to 128 bit boundary */ +struct _key_ctx { + __be32 ctx_hdr; + u8 salt[MAX_SALT]; + __be64 reserverd; + unsigned char key[0]; +}; + +struct ablk_ctx { + u8 enc; + unsigned int processed_len; + __be32 key_ctx_hdr; + unsigned int enckey_len; + unsigned int dst_nents; + struct scatterlist iv_sg; + u8 key[CHCR_AES_MAX_KEY_LEN]; + u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; + unsigned char ciph_mode; +}; + +struct hmac_ctx { + struct shash_desc *desc; + u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; + u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128]; +}; + +struct __crypto_ctx { + struct hmac_ctx hmacctx[0]; + struct ablk_ctx ablkctx[0]; +}; + +struct chcr_context { + struct chcr_dev *dev; + unsigned char tx_channel_id; + struct __crypto_ctx crypto_ctx[0]; +}; + +struct chcr_ahash_req_ctx { + u32 result; + char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128]; + u8 bfr_len; + /* DMA the partial hash in it */ + u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; + u64 data_len; /* Data len till time */ + void *dummy_payload_ptr; + /* SKB which is being sent to the hardware for processing */ + struct sk_buff *skb; +}; + +struct chcr_blkcipher_req_ctx { + struct sk_buff *skb; +}; + +struct chcr_alg_template { + u32 type; + u32 is_registered; + union { + struct crypto_alg crypto; + struct ahash_alg hash; + } alg; +}; + +struct chcr_req_ctx { + union { + struct ahash_request *ahash_req; + struct ablkcipher_request *ablk_req; + } req; + union { + struct chcr_ahash_req_ctx *ahash_ctx; + struct chcr_blkcipher_req_ctx *ablk_ctx; + } ctx; +}; + +struct sge_opaque_hdr { + void *dev; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; +}; + +typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req, + struct chcr_context *ctx, + unsigned short qid, + unsigned short op_type); + +#endif /* __CHCR_CRYPTO_H__ */ diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 308a358e5b46..35a9f718e669 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -747,14 +747,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, struct ib_ucontext *context, struct mlx5_ib_cq *cq, - int entries, struct mlx5_create_cq_mbox_in **cqb, + int entries, u32 **cqb, int *cqe_size, int *index, int *inlen) { struct mlx5_ib_create_cq ucmd; size_t ucmdlen; int page_shift; + __be64 *pas; int npages; int ncont; + void *cqc; int err; ucmdlen = @@ -792,14 +794,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); - *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; + *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; *cqb = mlx5_vzalloc(*inlen); if (!*cqb) { err = -ENOMEM; goto err_db; } - mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); - (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; + + pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); + mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); + + cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); + MLX5_SET(cqc, cqc, log_page_size, + page_shift - MLX5_ADAPTER_PAGE_SHIFT); *index = to_mucontext(context)->uuari.uars[0].index; @@ -834,9 +842,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, int cqe_size, - struct mlx5_create_cq_mbox_in **cqb, - int *index, int *inlen) + u32 **cqb, int *index, int *inlen) { + __be64 *pas; + void *cqc; int err; err = mlx5_db_alloc(dev->mdev, &cq->db); @@ -853,15 +862,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, init_cq_buf(cq, &cq->buf); - *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; + *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; *cqb = mlx5_vzalloc(*inlen); if (!*cqb) { err = -ENOMEM; goto err_buf; } - mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); - (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; + pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); + mlx5_fill_page_array(&cq->buf.buf, pas); + + cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); + MLX5_SET(cqc, cqc, log_page_size, + cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + *index = dev->mdev->priv.uuari.uars[0].index; return 0; @@ -895,11 +910,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, { int entries = attr->cqe; int vector = attr->comp_vector; - struct mlx5_create_cq_mbox_in *cqb = NULL; struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_cq *cq; int uninitialized_var(index); int uninitialized_var(inlen); + u32 *cqb = NULL; + void *cqc; int cqe_size; unsigned int irqn; int eqn; @@ -945,19 +961,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, INIT_WORK(&cq->notify_work, notify_soft_wc_handler); } - cq->cqe_size = cqe_size; - cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; - - if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN) - cqb->ctx.cqe_sz_flags |= (1 << 1); - - cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); if (err) goto err_cqb; - cqb->ctx.c_eqn = cpu_to_be16(eqn); - cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); + cq->cqe_size = cqe_size; + + cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context); + MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size)); + MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); + MLX5_SET(cqc, cqc, uar_page, index); + MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); + if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN) + MLX5_SET(cqc, cqc, oi, 1); err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); if (err) @@ -1088,27 +1105,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) { - struct mlx5_modify_cq_mbox_in *in; struct mlx5_ib_dev *dev = to_mdev(cq->device); struct mlx5_ib_cq *mcq = to_mcq(cq); int err; - u32 fsel; if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) return -ENOSYS; - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - in->cqn = cpu_to_be32(mcq->mcq.cqn); - fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); - in->ctx.cq_period = cpu_to_be16(cq_period); - in->ctx.cq_max_count = cpu_to_be16(cq_count); - in->field_select = cpu_to_be32(fsel); - err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in)); - kfree(in); - + err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq, + cq_period, cq_count); if (err) mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); @@ -1241,9 +1246,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibcq->device); struct mlx5_ib_cq *cq = to_mcq(ibcq); - struct mlx5_modify_cq_mbox_in *in; + void *cqc; + u32 *in; int err; int npas; + __be64 *pas; int page_shift; int inlen; int uninitialized_var(cqe_size); @@ -1285,28 +1292,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) if (err) goto ex; - inlen = sizeof(*in) + npas * sizeof(in->pas[0]); + inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + + MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; + in = mlx5_vzalloc(inlen); if (!in) { err = -ENOMEM; goto ex_resize; } + pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas); if (udata) mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, - in->pas, 0); + pas, 0); else - mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); - - in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | - MLX5_MODIFY_CQ_MASK_PG_OFFSET | - MLX5_MODIFY_CQ_MASK_PG_SIZE); - in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; - in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; - in->ctx.page_offset = 0; - in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); - in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); - in->cqn = cpu_to_be32(cq->mcq.cqn); + mlx5_fill_page_array(&cq->resize_buf->buf, pas); + + MLX5_SET(modify_cq_in, in, + modify_field_select_resize_field_select.resize_field_select.resize_field_select, + MLX5_MODIFY_CQ_MASK_LOG_SIZE | + MLX5_MODIFY_CQ_MASK_PG_OFFSET | + MLX5_MODIFY_CQ_MASK_PG_SIZE); + + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); + + MLX5_SET(cqc, cqc, log_page_size, + page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size)); + MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); + + MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE); + MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); if (err) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 1b4094baa2de..8db6fdff092f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -232,23 +232,19 @@ static int set_roce_addr(struct ib_device *device, u8 port_num, const union ib_gid *gid, const struct ib_gid_attr *attr) { - struct mlx5_ib_dev *dev = to_mdev(device); - u32 in[MLX5_ST_SZ_DW(set_roce_address_in)]; - u32 out[MLX5_ST_SZ_DW(set_roce_address_out)]; + struct mlx5_ib_dev *dev = to_mdev(device); + u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); if (ll != IB_LINK_LAYER_ETHERNET) return -EINVAL; - memset(in, 0, sizeof(in)); - ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); MLX5_SET(set_roce_address_in, in, roce_address_index, index); MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); - - memset(out, 0, sizeof(out)); return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); } @@ -751,8 +747,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, &props->active_width); if (err) goto out; - err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB, - port); + err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); if (err) goto out; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 372385d0f993..a59034aaa297 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -504,7 +504,7 @@ struct mlx5_ib_mr { int umred; int npages; struct mlx5_ib_dev *dev; - struct mlx5_create_mkey_mbox_out out; + u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; struct mlx5_core_sig_ctx *sig; int live; void *descs_alloc; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 4b021305c321..6f7e34753abc 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -135,20 +135,10 @@ static void reg_mr_callback(int status, void *context) return; } - if (mr->out.hdr.status) { - mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n", - mr->out.hdr.status, - be32_to_cpu(mr->out.hdr.syndrome)); - kfree(mr); - dev->fill_delay = 1; - mod_timer(&dev->delay_timer, jiffies + HZ); - return; - } - spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); key = dev->mdev->priv.mkey_key++; spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); - mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; + mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key; cache->last_add = jiffies; @@ -170,16 +160,19 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) { struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_cache_ent *ent = &cache->ent[c]; - struct mlx5_create_mkey_mbox_in *in; + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_ib_mr *mr; int npages = 1 << ent->order; + void *mkc; + u32 *in; int err = 0; int i; - in = kzalloc(sizeof(*in), GFP_KERNEL); + in = kzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); for (i = 0; i < num; i++) { if (ent->pending >= MAX_PENDING_REG_MR) { err = -EAGAIN; @@ -194,18 +187,22 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) mr->order = ent->order; mr->umred = 1; mr->dev = dev; - in->seg.status = MLX5_MKEY_STATUS_FREE; - in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN; - in->seg.log2_page_size = 12; + + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); + + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2); + MLX5_SET(mkc, mkc, log_page_size, 12); spin_lock_irq(&ent->lock); ent->pending++; spin_unlock_irq(&ent->lock); - err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, - sizeof(*in), reg_mr_callback, - mr, &mr->out); + err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey, + in, inlen, + mr->out, sizeof(mr->out), + reg_mr_callback, mr); if (err) { spin_lock_irq(&ent->lock); ent->pending--; @@ -670,30 +667,38 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_core_dev *mdev = dev->mdev; - struct mlx5_create_mkey_mbox_in *in; - struct mlx5_mkey_seg *seg; struct mlx5_ib_mr *mr; + void *mkc; + u32 *in; int err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); - in = kzalloc(sizeof(*in), GFP_KERNEL); + in = kzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_free; } - seg = &in->seg; - seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA; - seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64); - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - seg->start_addr = 0; + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA); + MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); + MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); + MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); + MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); + MLX5_SET(mkc, mkc, lr, 1); - err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL, - NULL); + MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET64(mkc, mkc, start_addr, 0); + + err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); if (err) goto err_in; @@ -1063,9 +1068,11 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, int page_shift, int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_create_mkey_mbox_in *in; struct mlx5_ib_mr *mr; + __be64 *pas; + void *mkc; int inlen; + u32 *in; int err; bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); @@ -1073,31 +1080,41 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, if (!mr) return ERR_PTR(-ENOMEM); - inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2; + inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + + sizeof(*pas) * ((npages + 1) / 2) * 2; in = mlx5_vzalloc(inlen); if (!in) { err = -ENOMEM; goto err_1; } - mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, + pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + mlx5_ib_populate_pas(dev, umem, page_shift, pas, pg_cap ? MLX5_IB_MTT_PRESENT : 0); - /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags + /* The pg_access bit allows setting the access flags * in the page list submitted with the command. */ - in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0; - in->seg.flags = convert_access(access_flags) | - MLX5_ACCESS_MODE_MTT; - in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); - in->seg.start_addr = cpu_to_be64(virt_addr); - in->seg.len = cpu_to_be64(length); - in->seg.bsfs_octo_size = 0; - in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); - in->seg.log2_page_size = page_shift; - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, - 1 << page_shift)); - err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL, - NULL, NULL); + MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); + MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); + MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); + MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); + MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); + MLX5_SET(mkc, mkc, lr, 1); + + MLX5_SET64(mkc, mkc, start_addr, virt_addr); + MLX5_SET64(mkc, mkc, len, length); + MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET(mkc, mkc, bsf_octword_size, 0); + MLX5_SET(mkc, mkc, translations_octword_size, + get_octo_len(virt_addr, length, 1 << page_shift)); + MLX5_SET(mkc, mkc, log_page_size, page_shift); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, + get_octo_len(virt_addr, length, 1 << page_shift)); + + err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); if (err) { mlx5_ib_warn(dev, "create mkey failed\n"); goto err_2; @@ -1523,30 +1540,32 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, u32 max_num_sg) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_create_mkey_mbox_in *in; - struct mlx5_ib_mr *mr; + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); int ndescs = ALIGN(max_num_sg, 4); + struct mlx5_ib_mr *mr; + void *mkc; + u32 *in; int err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); - in = kzalloc(sizeof(*in), GFP_KERNEL); + in = kzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_free; } - in->seg.status = MLX5_MKEY_STATUS_FREE; - in->seg.xlt_oct_size = cpu_to_be32(ndescs); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, translations_octword_size, ndescs); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); if (mr_type == IB_MR_TYPE_MEM_REG) { - mr->access_mode = MLX5_ACCESS_MODE_MTT; - in->seg.log2_page_size = PAGE_SHIFT; - + mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT; + MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, sizeof(u64)); if (err) @@ -1555,7 +1574,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, mr->desc_size = sizeof(u64); mr->max_descs = ndescs; } else if (mr_type == IB_MR_TYPE_SG_GAPS) { - mr->access_mode = MLX5_ACCESS_MODE_KLM; + mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, sizeof(struct mlx5_klm)); @@ -1566,9 +1585,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, } else if (mr_type == IB_MR_TYPE_SIGNATURE) { u32 psv_index[2]; - in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) | - MLX5_MKEY_BSF_EN); - in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); + MLX5_SET(mkc, mkc, bsf_en, 1); + MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); if (!mr->sig) { err = -ENOMEM; @@ -1581,7 +1599,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, if (err) goto err_free_sig; - mr->access_mode = MLX5_ACCESS_MODE_KLM; + mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; mr->sig->psv_memory.psv_idx = psv_index[0]; mr->sig->psv_wire.psv_idx = psv_index[1]; @@ -1595,9 +1613,10 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, goto err_free_in; } - in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode; - err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in), - NULL, NULL, NULL); + MLX5_SET(mkc, mkc, access_mode, mr->access_mode); + MLX5_SET(mkc, mkc, umr_en, 1); + + err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); if (err) goto err_destroy_psv; @@ -1633,8 +1652,10 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_create_mkey_mbox_in *in = NULL; + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_ib_mw *mw = NULL; + u32 *in = NULL; + void *mkc; int ndescs; int err; struct mlx5_ib_alloc_mw req = {}; @@ -1658,23 +1679,24 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); mw = kzalloc(sizeof(*mw), GFP_KERNEL); - in = kzalloc(sizeof(*in), GFP_KERNEL); + in = kzalloc(inlen, GFP_KERNEL); if (!mw || !in) { err = -ENOMEM; goto free; } - in->seg.status = MLX5_MKEY_STATUS_FREE; - in->seg.xlt_oct_size = cpu_to_be32(ndescs); - in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); - in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM | - MLX5_PERM_LOCAL_READ; - if (type == IB_MW_TYPE_2) - in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - - err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in), - NULL, NULL, NULL); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, translations_octword_size, ndescs); + MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS); + MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2))); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + + err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen); if (err) goto free; @@ -1811,7 +1833,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, mr->desc_size * mr->max_descs, DMA_TO_DEVICE); - if (mr->access_mode == MLX5_ACCESS_MODE_KLM) + if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset); else n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0dd7d93cac95..f3c943f6458e 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -726,7 +726,7 @@ err_umem: static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct ib_udata *udata, struct ib_qp_init_attr *attr, - struct mlx5_create_qp_mbox_in **in, + u32 **in, struct mlx5_ib_create_qp_resp *resp, int *inlen, struct mlx5_ib_qp_base *base) { @@ -739,6 +739,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 offset = 0; int uuarn; int ncont = 0; + __be64 *pas; + void *qpc; int err; err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); @@ -795,20 +797,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ubuffer->umem = NULL; } - *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; + *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; *in = mlx5_vzalloc(*inlen); if (!*in) { err = -ENOMEM; goto err_umem; } + + pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); if (ubuffer->umem) - mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, - (*in)->pas, 0); - (*in)->ctx.log_pg_sz_remote_qpn = - cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); - (*in)->ctx.params2 = cpu_to_be32(offset << 6); + mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0); + + qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); - (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); + MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET(qpc, qpc, page_offset, offset); + + MLX5_SET(qpc, qpc, uar_page, uar_index); resp->uuar_index = uuarn; qp->uuarn = uuarn; @@ -857,12 +863,13 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx5_ib_qp *qp, - struct mlx5_create_qp_mbox_in **in, int *inlen, + u32 **in, int *inlen, struct mlx5_ib_qp_base *base) { enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; struct mlx5_uuar_info *uuari; int uar_index; + void *qpc; int uuarn; int err; @@ -902,25 +909,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, } qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); - *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; + *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; *in = mlx5_vzalloc(*inlen); if (!*in) { err = -ENOMEM; goto err_buf; } - (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); - (*in)->ctx.log_pg_sz_remote_qpn = - cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); + + qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); + MLX5_SET(qpc, qpc, uar_page, uar_index); + MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + /* Set "fast registration enabled" for all kernel QPs */ - (*in)->ctx.params1 |= cpu_to_be32(1 << 11); - (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); + MLX5_SET(qpc, qpc, fre, 1); + MLX5_SET(qpc, qpc, rlky, 1); if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) { - (*in)->ctx.deth_sqpn = cpu_to_be32(1); + MLX5_SET(qpc, qpc, deth_sqpn, 1); qp->flags |= MLX5_IB_QP_SQPN_QP1; } - mlx5_fill_page_array(&qp->buf, (*in)->pas); + mlx5_fill_page_array(&qp->buf, + (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas)); err = mlx5_db_alloc(dev->mdev, &qp->db); if (err) { @@ -974,15 +985,15 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); } -static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) +static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) { if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || (attr->qp_type == IB_QPT_XRC_INI)) - return cpu_to_be32(MLX5_SRQ_RQ); + return MLX5_SRQ_RQ; else if (!qp->has_rq) - return cpu_to_be32(MLX5_ZERO_LEN_RQ); + return MLX5_ZERO_LEN_RQ; else - return cpu_to_be32(MLX5_NON_ZERO_RQ); + return MLX5_NON_ZERO_RQ; } static int is_connected(enum ib_qp_type qp_type) @@ -996,13 +1007,10 @@ static int is_connected(enum ib_qp_type qp_type) static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq, u32 tdn) { - u32 in[MLX5_ST_SZ_DW(create_tis_in)]; + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - memset(in, 0, sizeof(in)); - MLX5_SET(tisc, tisc, transport_domain, tdn); - return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn); } @@ -1191,7 +1199,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, } static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, - struct mlx5_create_qp_mbox_in *in, + u32 *in, struct ib_pd *pd) { struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; @@ -1461,18 +1469,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct ib_udata *udata, struct mlx5_ib_qp *qp) { struct mlx5_ib_resources *devr = &dev->devr; + int inlen = MLX5_ST_SZ_BYTES(create_qp_in); struct mlx5_core_dev *mdev = dev->mdev; - struct mlx5_ib_qp_base *base; struct mlx5_ib_create_qp_resp resp; - struct mlx5_create_qp_mbox_in *in; - struct mlx5_ib_create_qp ucmd; struct mlx5_ib_cq *send_cq; struct mlx5_ib_cq *recv_cq; unsigned long flags; - int inlen = sizeof(*in); - int err; u32 uidx = MLX5_IB_DEFAULT_UIDX; + struct mlx5_ib_create_qp ucmd; + struct mlx5_ib_qp_base *base; void *qpc; + u32 *in; + int err; base = init_attr->qp_type == IB_QPT_RAW_PACKET ? &qp->raw_packet_qp.rq.base : @@ -1600,7 +1608,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (err) return err; } else { - in = mlx5_vzalloc(sizeof(*in)); + in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; @@ -1610,26 +1618,29 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (is_sqp(init_attr->qp_type)) qp->port = init_attr->port_num; - in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 | - MLX5_QP_PM_MIGRATED << 11); + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + + MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) - in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); + MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn); else - in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE); + MLX5_SET(qpc, qpc, latency_sensitive, 1); + if (qp->wq_sig) - in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); + MLX5_SET(qpc, qpc, wq_signature, 1); if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) - in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST); + MLX5_SET(qpc, qpc, block_lb_mc, 1); if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) - in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER); + MLX5_SET(qpc, qpc, cd_master, 1); if (qp->flags & MLX5_IB_QP_MANAGED_SEND) - in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND); + MLX5_SET(qpc, qpc, cd_slave_send, 1); if (qp->flags & MLX5_IB_QP_MANAGED_RECV) - in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV); + MLX5_SET(qpc, qpc, cd_slave_receive, 1); if (qp->scat_cqe && is_connected(init_attr->qp_type)) { int rcqe_sz; @@ -1639,71 +1650,68 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); if (rcqe_sz == 128) - in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE; + MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE); else - in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE; + MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE); if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { if (scqe_sz == 128) - in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE; + MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE); else - in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE; + MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); } } if (qp->rq.wqe_cnt) { - in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); - in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; + MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); + MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); } - in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); + MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); if (qp->sq.wqe_cnt) - in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); + MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); else - in->ctx.sq_crq_size |= cpu_to_be16(0x8000); + MLX5_SET(qpc, qpc, no_sq, 1); /* Set default resources */ switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: - in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); - in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); - in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); - in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn); + MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); + MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); + MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn); break; case IB_QPT_XRC_INI: - in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); - in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); - in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); + MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); break; default: if (init_attr->srq) { - in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn); - in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); + MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn); + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); } else { - in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); - in->ctx.rq_type_srqn |= - cpu_to_be32(to_msrq(devr->s1)->msrq.srqn); + MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); } } if (init_attr->send_cq) - in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); + MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); if (init_attr->recv_cq) - in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); + MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); - in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); + MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); - if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) { - qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); - /* 0xffffff means we ask to work with cqe version 0 */ + /* 0xffffff means we ask to work with cqe version 0 */ + if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) MLX5_SET(qpc, qpc, user_index, uidx); - } + /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ if (init_attr->qp_type == IB_QPT_UD && (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) { - qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); qp->flags |= MLX5_IB_QP_LSO; } @@ -1860,7 +1868,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) { struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_ib_qp_base *base = &qp->trans_qp.base; - struct mlx5_modify_qp_mbox_in *in; unsigned long flags; int err; @@ -1873,16 +1880,12 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) &qp->raw_packet_qp.rq.base : &qp->trans_qp.base; - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return; - if (qp->state != IB_QPS_RESET) { if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) { mlx5_ib_qp_disable_pagefaults(qp); err = mlx5_core_qp_modify(dev->mdev, - MLX5_CMD_OP_2RST_QP, in, 0, - &base->mqp); + MLX5_CMD_OP_2RST_QP, 0, + NULL, &base->mqp); } else { err = modify_raw_packet_qp(dev, qp, MLX5_CMD_OP_2RST_QP); @@ -1924,8 +1927,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) base->mqp.qpn); } - kfree(in); - if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); else if (qp->create_type == MLX5_QP_USER) @@ -2511,7 +2512,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, struct mlx5_ib_qp_base *base = &qp->trans_qp.base; struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_qp_context *context; - struct mlx5_modify_qp_mbox_in *in; struct mlx5_ib_pd *pd; enum mlx5_qp_state mlx5_cur, mlx5_new; enum mlx5_qp_optpar optpar; @@ -2520,11 +2520,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, int err; u16 op; - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) return -ENOMEM; - context = &in->ctx; err = to_mlx5_st(ibqp->qp_type); if (err < 0) { mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type); @@ -2689,12 +2688,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, op = optab[mlx5_cur][mlx5_new]; optpar = ib_mask_to_mlx5_opt(attr_mask); optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; - in->optparam = cpu_to_be32(optpar); if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) err = modify_raw_packet_qp(dev, qp, op); else - err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event, + err = mlx5_core_qp_modify(dev->mdev, op, optpar, context, &base->mqp); if (err) goto out; @@ -2735,7 +2733,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, } out: - kfree(in); + kfree(context); return err; } @@ -2968,7 +2966,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, memset(umr, 0, sizeof(*umr)); - if (mr->access_mode == MLX5_ACCESS_MODE_KLM) + if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) /* KLMs take twice the size of MTTs */ ndescs *= 2; @@ -3111,9 +3109,9 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, memset(seg, 0, sizeof(*seg)); - if (mr->access_mode == MLX5_ACCESS_MODE_MTT) + if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT) seg->log2_page_size = ilog2(mr->ibmr.page_size); - else if (mr->access_mode == MLX5_ACCESS_MODE_KLM) + else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) /* KLMs take twice the size of MTTs */ ndescs *= 2; @@ -3454,7 +3452,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, memset(seg, 0, sizeof(*seg)); seg->flags = get_umr_flags(wr->access_flags) | - MLX5_ACCESS_MODE_KLM; + MLX5_MKC_ACCESS_MODE_KLMS; seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | MLX5_MKEY_BSF_EN | pdn); @@ -4320,21 +4318,24 @@ static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev, static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct ib_qp_attr *qp_attr) { - struct mlx5_query_qp_mbox_out *outb; + int outlen = MLX5_ST_SZ_BYTES(query_qp_out); struct mlx5_qp_context *context; int mlx5_state; + u32 *outb; int err = 0; - outb = kzalloc(sizeof(*outb), GFP_KERNEL); + outb = kzalloc(outlen, GFP_KERNEL); if (!outb) return -ENOMEM; - context = &outb->ctx; err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, - sizeof(*outb)); + outlen); if (err) goto out; + /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ + context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc); + mlx5_state = be32_to_cpu(context->flags) >> 28; qp->state = to_ib_qp_state(mlx5_state); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 71f0e791355b..b3d02759c226 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -600,7 +600,6 @@ static int ems_usb_start(struct ems_usb *dev) /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -752,10 +751,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); + if (!urb) goto nomem; - } buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { @@ -1007,10 +1004,8 @@ static int ems_usb_probe(struct usb_interface *intf, dev->tx_contexts[i].echo_index = MAX_TX_URBS; dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!dev->intr_urb) { - dev_err(&intf->dev, "Couldn't alloc intr URB\n"); + if (!dev->intr_urb) goto cleanup_candev; - } dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); if (!dev->intr_in_buffer) diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 784a9002fbb9..be928ce62d32 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -558,8 +558,6 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - dev_warn(dev->udev->dev.parent, - "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -730,7 +728,6 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); stats->tx_dropped++; dev_kfree_skb(skb); goto nourbmem; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 6f0cbc38782e..77e3cc06a30c 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -493,10 +493,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(netdev, "No memory left for URB\n"); + if (!urb) goto nomem_urb; - } hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC, &urb->transfer_dma); @@ -600,11 +598,8 @@ static int gs_can_open(struct net_device *netdev) /* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - netdev_err(netdev, - "No memory left for URB\n"); + if (!urb) return -ENOMEM; - } /* alloc rx buffer */ buf = usb_alloc_coherent(dev->udev, diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 6f1f3b675ff5..d51e0c401b48 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -787,10 +787,8 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, int err; urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); + if (!urb) return -ENOMEM; - } buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); if (!buf) { @@ -1393,8 +1391,6 @@ static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - dev_warn(dev->udev->dev.parent, - "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -1670,7 +1666,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index bfb91d8fa460..c06382cdfdfe 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -399,7 +399,6 @@ static int peak_usb_start(struct peak_usb_device *dev) /* create a URB, and a buffer for it, to receive usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -454,7 +453,6 @@ static int peak_usb_start(struct peak_usb_device *dev) /* create a URB and a buffer for it, to transmit usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -651,10 +649,8 @@ static int peak_usb_restart(struct peak_usb_device *dev) /* first allocate a urb to handle the asynchronous steps */ urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(dev->netdev, "no memory left for urb\n"); + if (!urb) return -ENOMEM; - } /* also allocate enough space for the commands to send */ buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC); diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index a731720f1d13..108a30e15097 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -623,10 +623,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); + if (!urb) goto nomem; - } buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC, &urb->transfer_dma); @@ -748,7 +746,6 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 8f4544394f44..de6d04429a70 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -16,6 +16,7 @@ config NET_DSA_BCM_SF2 select FIXED_PHY select BCM7XXX_PHY select MDIO_BCM_UNIMAC + select B53 ---help--- This enables support for the Broadcom Starfighter 2 Ethernet switch chips. diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index bda37d336736..1299104a87d4 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -167,6 +167,65 @@ static const struct b53_mib_desc b53_mibs[] = { #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) +static const struct b53_mib_desc b53_mibs_58xx[] = { + { 8, 0x00, "TxOctets" }, + { 4, 0x08, "TxDropPkts" }, + { 4, 0x0c, "TxQPKTQ0" }, + { 4, 0x10, "TxBroadcastPkts" }, + { 4, 0x14, "TxMulticastPkts" }, + { 4, 0x18, "TxUnicastPKts" }, + { 4, 0x1c, "TxCollisions" }, + { 4, 0x20, "TxSingleCollision" }, + { 4, 0x24, "TxMultipleCollision" }, + { 4, 0x28, "TxDeferredCollision" }, + { 4, 0x2c, "TxLateCollision" }, + { 4, 0x30, "TxExcessiveCollision" }, + { 4, 0x34, "TxFrameInDisc" }, + { 4, 0x38, "TxPausePkts" }, + { 4, 0x3c, "TxQPKTQ1" }, + { 4, 0x40, "TxQPKTQ2" }, + { 4, 0x44, "TxQPKTQ3" }, + { 4, 0x48, "TxQPKTQ4" }, + { 4, 0x4c, "TxQPKTQ5" }, + { 8, 0x50, "RxOctets" }, + { 4, 0x58, "RxUndersizePkts" }, + { 4, 0x5c, "RxPausePkts" }, + { 4, 0x60, "RxPkts64Octets" }, + { 4, 0x64, "RxPkts65to127Octets" }, + { 4, 0x68, "RxPkts128to255Octets" }, + { 4, 0x6c, "RxPkts256to511Octets" }, + { 4, 0x70, "RxPkts512to1023Octets" }, + { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, + { 4, 0x78, "RxOversizePkts" }, + { 4, 0x7c, "RxJabbers" }, + { 4, 0x80, "RxAlignmentErrors" }, + { 4, 0x84, "RxFCSErrors" }, + { 8, 0x88, "RxGoodOctets" }, + { 4, 0x90, "RxDropPkts" }, + { 4, 0x94, "RxUnicastPkts" }, + { 4, 0x98, "RxMulticastPkts" }, + { 4, 0x9c, "RxBroadcastPkts" }, + { 4, 0xa0, "RxSAChanges" }, + { 4, 0xa4, "RxFragments" }, + { 4, 0xa8, "RxJumboPkt" }, + { 4, 0xac, "RxSymblErr" }, + { 4, 0xb0, "InRangeErrCount" }, + { 4, 0xb4, "OutRangeErrCount" }, + { 4, 0xb8, "EEELpiEvent" }, + { 4, 0xbc, "EEELpiDuration" }, + { 4, 0xc0, "RxDiscard" }, + { 4, 0xc8, "TxQPKTQ6" }, + { 4, 0xcc, "TxQPKTQ7" }, + { 4, 0xd0, "TxPkts64Octets" }, + { 4, 0xd4, "TxPkts65to127Octets" }, + { 4, 0xd8, "TxPkts128to255Octets" }, + { 4, 0xdc, "TxPkts256to511Ocets" }, + { 4, 0xe0, "TxPkts512to1023Ocets" }, + { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, +}; + +#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) + static int b53_do_vlan_op(struct b53_device *dev, u8 op) { unsigned int i; @@ -635,6 +694,8 @@ static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) return b53_mibs_65; else if (is63xx(dev)) return b53_mibs_63xx; + else if (is58xx(dev)) + return b53_mibs_58xx; else return b53_mibs; } @@ -645,6 +706,8 @@ static unsigned int b53_get_mib_size(struct b53_device *dev) return B53_MIBS_65_SIZE; else if (is63xx(dev)) return B53_MIBS_63XX_SIZE; + else if (is58xx(dev)) + return B53_MIBS_58XX_SIZE; else return B53_MIBS_SIZE; } @@ -1252,9 +1315,21 @@ static int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct b53_device *dev = ds_to_priv(ds); + s8 cpu_port = ds->dst->cpu_port; u16 pvlan, reg; unsigned int i; + /* Make this port leave the all VLANs join since we will have proper + * VLAN entries from now on + */ + if (is58xx(dev)) { + b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); + reg &= ~BIT(port); + if ((reg & BIT(cpu_port)) == BIT(cpu_port)) + reg &= ~BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); + } + dev->ports[port].bridge_dev = bridge; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); @@ -1287,6 +1362,7 @@ static void b53_br_leave(struct dsa_switch *ds, int port) struct b53_device *dev = ds_to_priv(ds); struct net_device *bridge = dev->ports[port].bridge_dev; struct b53_vlan *vl = &dev->vlans[0]; + s8 cpu_port = ds->dst->cpu_port; unsigned int i; u16 pvlan, reg, pvid; @@ -1316,10 +1392,19 @@ static void b53_br_leave(struct dsa_switch *ds, int port) else pvid = 0; - b53_get_vlan_entry(dev, pvid, vl); - vl->members |= BIT(port) | BIT(dev->cpu_port); - vl->untag |= BIT(port) | BIT(dev->cpu_port); - b53_set_vlan_entry(dev, pvid, vl); + /* Make this port join all VLANs without VLAN entries */ + if (is58xx(dev)) { + b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); + reg |= BIT(port); + if (!(reg & BIT(cpu_port))) + reg |= BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); + } else { + b53_get_vlan_entry(dev, pvid, vl); + vl->members |= BIT(port) | BIT(dev->cpu_port); + vl->untag |= BIT(port) | BIT(dev->cpu_port); + b53_set_vlan_entry(dev, pvid, vl); + } } static void b53_br_set_stp_state(struct dsa_switch *ds, int port, @@ -1373,8 +1458,13 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port, b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); } -static struct dsa_switch_driver b53_switch_ops = { - .tag_protocol = DSA_TAG_PROTO_NONE, +static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds) +{ + return DSA_TAG_PROTO_NONE; +} + +static struct dsa_switch_ops b53_switch_ops = { + .get_tag_protocol = b53_get_tag_protocol, .setup = b53_setup, .set_addr = b53_set_addr, .get_strings = b53_get_strings, @@ -1593,11 +1683,22 @@ static const struct b53_chip_data b53_switch_chips[] = { .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, }, + { + .chip_id = BCM7445_DEVICE_ID, + .dev_name = "BCM7445", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, }; static int b53_switch_init(struct b53_device *dev) { - struct dsa_switch *ds = dev->ds; unsigned int i; int ret; @@ -1613,7 +1714,6 @@ static int b53_switch_init(struct b53_device *dev) dev->vta_regs[1] = chip->vta_regs[1]; dev->vta_regs[2] = chip->vta_regs[2]; dev->jumbo_pm_reg = chip->jumbo_pm_reg; - ds->drv = &b53_switch_ops; dev->cpu_port = chip->cpu_port; dev->num_vlans = chip->vlans; dev->num_arl_entries = chip->arl_entries; @@ -1681,7 +1781,8 @@ static int b53_switch_init(struct b53_device *dev) return 0; } -struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, +struct b53_device *b53_switch_alloc(struct device *base, + const struct b53_io_ops *ops, void *priv) { struct dsa_switch *ds; @@ -1700,6 +1801,7 @@ struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, dev->ds = ds; dev->priv = priv; dev->ops = ops; + ds->ops = &b53_switch_ops; mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index aa87c3fffdac..477a16b5660a 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -267,7 +267,7 @@ static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg, return mdiobus_write_nested(bus, addr, reg, value); } -static struct b53_io_ops b53_mdio_ops = { +static const struct b53_io_ops b53_mdio_ops = { .read8 = b53_mdio_read8, .read16 = b53_mdio_read16, .read32 = b53_mdio_read32, diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index 77ffc4312808..cc9e6bd83e0e 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -208,7 +208,7 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg, return 0; } -static struct b53_io_ops b53_mmap_ops = { +static const struct b53_io_ops b53_mmap_ops = { .read8 = b53_mmap_read8, .read16 = b53_mmap_read16, .read32 = b53_mmap_read32, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 835a744f206e..76672dae412d 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -60,6 +60,7 @@ enum { BCM53018_DEVICE_ID = 0x53018, BCM53019_DEVICE_ID = 0x53019, BCM58XX_DEVICE_ID = 0x5800, + BCM7445_DEVICE_ID = 0x7445, }; #define B53_N_PORTS 9 @@ -174,6 +175,12 @@ static inline int is5301x(struct b53_device *dev) dev->chip_id == BCM53019_DEVICE_ID; } +static inline int is58xx(struct b53_device *dev) +{ + return dev->chip_id == BCM58XX_DEVICE_ID || + dev->chip_id == BCM7445_DEVICE_ID; +} + #define B53_CPU_PORT_25 5 #define B53_CPU_PORT 8 @@ -182,7 +189,8 @@ static inline int is_cpu_port(struct b53_device *dev, int port) return dev->cpu_port; } -struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, +struct b53_device *b53_switch_alloc(struct device *base, + const struct b53_io_ops *ops, void *priv); int b53_switch_detect(struct b53_device *dev); diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index a0b453ea34c9..dac0af4e2cd0 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -309,6 +309,9 @@ /* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */ #define B53_PVLAN_PORT_MASK(i) ((i) * 2) +/* Join all VLANs register (16 bit) */ +#define B53_JOIN_ALL_VLAN_EN 0x50 + /************************************************************************* * 802.1Q Page Registers *************************************************************************/ diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 2bda0b5f1578..f89f5308a99b 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -270,7 +270,7 @@ static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value) return spi_write(spi, txbuf, sizeof(txbuf)); } -static struct b53_io_ops b53_spi_ops = { +static const struct b53_io_ops b53_spi_ops = { .read8 = b53_spi_read8, .read16 = b53_spi_read16, .read32 = b53_spi_read32, @@ -317,8 +317,6 @@ static int b53_spi_remove(struct spi_device *spi) static struct spi_driver b53_spi_driver = { .driver = { .name = "b53-switch", - .bus = &spi_bus_type, - .owner = THIS_MODULE, }, .probe = b53_spi_probe, .remove = b53_spi_remove, diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 3e2d4a5fcd5a..8a62b6a69703 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -344,7 +344,7 @@ err: return ret; } -static struct b53_io_ops b53_srab_ops = { +static const struct b53_io_ops b53_srab_ops = { .read8 = b53_srab_read8, .read16 = b53_srab_read16, .read32 = b53_srab_read32, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b2b838724a9b..51f1fc0dddc5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -29,130 +29,21 @@ #include <linux/brcmphy.h> #include <linux/etherdevice.h> #include <net/switchdev.h> +#include <linux/platform_data/b53.h> #include "bcm_sf2.h" #include "bcm_sf2_regs.h" +#include "b53/b53_priv.h" +#include "b53/b53_regs.h" -/* String, offset, and register size in bytes if different from 4 bytes */ -static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = { - { "TxOctets", 0x000, 8 }, - { "TxDropPkts", 0x020 }, - { "TxQPKTQ0", 0x030 }, - { "TxBroadcastPkts", 0x040 }, - { "TxMulticastPkts", 0x050 }, - { "TxUnicastPKts", 0x060 }, - { "TxCollisions", 0x070 }, - { "TxSingleCollision", 0x080 }, - { "TxMultipleCollision", 0x090 }, - { "TxDeferredCollision", 0x0a0 }, - { "TxLateCollision", 0x0b0 }, - { "TxExcessiveCollision", 0x0c0 }, - { "TxFrameInDisc", 0x0d0 }, - { "TxPausePkts", 0x0e0 }, - { "TxQPKTQ1", 0x0f0 }, - { "TxQPKTQ2", 0x100 }, - { "TxQPKTQ3", 0x110 }, - { "TxQPKTQ4", 0x120 }, - { "TxQPKTQ5", 0x130 }, - { "RxOctets", 0x140, 8 }, - { "RxUndersizePkts", 0x160 }, - { "RxPausePkts", 0x170 }, - { "RxPkts64Octets", 0x180 }, - { "RxPkts65to127Octets", 0x190 }, - { "RxPkts128to255Octets", 0x1a0 }, - { "RxPkts256to511Octets", 0x1b0 }, - { "RxPkts512to1023Octets", 0x1c0 }, - { "RxPkts1024toMaxPktsOctets", 0x1d0 }, - { "RxOversizePkts", 0x1e0 }, - { "RxJabbers", 0x1f0 }, - { "RxAlignmentErrors", 0x200 }, - { "RxFCSErrors", 0x210 }, - { "RxGoodOctets", 0x220, 8 }, - { "RxDropPkts", 0x240 }, - { "RxUnicastPkts", 0x250 }, - { "RxMulticastPkts", 0x260 }, - { "RxBroadcastPkts", 0x270 }, - { "RxSAChanges", 0x280 }, - { "RxFragments", 0x290 }, - { "RxJumboPkt", 0x2a0 }, - { "RxSymblErr", 0x2b0 }, - { "InRangeErrCount", 0x2c0 }, - { "OutRangeErrCount", 0x2d0 }, - { "EEELpiEvent", 0x2e0 }, - { "EEELpiDuration", 0x2f0 }, - { "RxDiscard", 0x300, 8 }, - { "TxQPKTQ6", 0x320 }, - { "TxQPKTQ7", 0x330 }, - { "TxPkts64Octets", 0x340 }, - { "TxPkts65to127Octets", 0x350 }, - { "TxPkts128to255Octets", 0x360 }, - { "TxPkts256to511Ocets", 0x370 }, - { "TxPkts512to1023Ocets", 0x380 }, - { "TxPkts1024toMaxPktOcets", 0x390 }, -}; - -#define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib) - -static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, - int port, uint8_t *data) -{ - unsigned int i; - - for (i = 0; i < BCM_SF2_STATS_SIZE; i++) - memcpy(data + i * ETH_GSTRING_LEN, - bcm_sf2_mib[i].string, ETH_GSTRING_LEN); -} - -static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, - int port, uint64_t *data) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - const struct bcm_sf2_hw_stats *s; - unsigned int i; - u64 val = 0; - u32 offset; - - mutex_lock(&priv->stats_mutex); - - /* Now fetch the per-port counters */ - for (i = 0; i < BCM_SF2_STATS_SIZE; i++) { - s = &bcm_sf2_mib[i]; - - /* Do a latched 64-bit read if needed */ - offset = s->reg + CORE_P_MIB_OFFSET(port); - if (s->sizeof_stat == 8) - val = core_readq(priv, offset); - else - val = core_readl(priv, offset); - - data[i] = (u64)val; - } - - mutex_unlock(&priv->stats_mutex); -} - -static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) -{ - return BCM_SF2_STATS_SIZE; -} - -static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **_priv) +static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv; - - priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return NULL; - *_priv = priv; - - return "Broadcom Starfighter 2"; + return DSA_TAG_PROTO_BRCM; } static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int i; u32 reg; @@ -172,7 +63,7 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg, val; /* Enable the port memories */ @@ -237,7 +128,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg; reg = core_readl(priv, CORE_EEE_EN_CTRL); @@ -250,7 +141,7 @@ static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg; reg = reg_readl(priv, REG_SPHY_CNTRL); @@ -324,7 +215,7 @@ static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; u32 reg; @@ -380,7 +271,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 off, reg; if (priv->wol_ports_mask & (1 << port)) @@ -412,7 +303,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; int ret; @@ -430,7 +321,7 @@ static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; u32 reg; @@ -445,7 +336,7 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; p->eee_enabled = e->eee_enabled; @@ -461,469 +352,6 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, return 0; } -static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 1000; - u32 reg; - - reg = core_readl(priv, CORE_FAST_AGE_CTRL); - reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; - core_writel(priv, reg, CORE_FAST_AGE_CTRL); - - do { - reg = core_readl(priv, CORE_FAST_AGE_CTRL); - if (!(reg & FAST_AGE_STR_DONE)) - break; - - cpu_relax(); - } while (timeout--); - - if (!timeout) - return -ETIMEDOUT; - - core_writel(priv, 0, CORE_FAST_AGE_CTRL); - - return 0; -} - -/* Fast-ageing of ARL entries for a given port, equivalent to an ARL - * flush for that port. - */ -static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - - core_writel(priv, port, CORE_FAST_AGE_PORT); - - return bcm_sf2_fast_age_op(priv); -} - -static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid) -{ - core_writel(priv, vid, CORE_FAST_AGE_VID); - - return bcm_sf2_fast_age_op(priv); -} - -static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 10; - u32 reg; - - do { - reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); - if (!(reg & ARLA_VTBL_STDN)) - return 0; - - usleep_range(1000, 2000); - } while (timeout--); - - return -ETIMEDOUT; -} - -static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) -{ - core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); - - return bcm_sf2_vlan_op_wait(priv); -} - -static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, - struct bcm_sf2_vlan *vlan) -{ - int ret; - - core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); - core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members, - CORE_ARLA_VTBL_ENTRY); - - ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE); - if (ret) - pr_err("failed to write VLAN entry\n"); -} - -static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, - struct bcm_sf2_vlan *vlan) -{ - u32 entry; - int ret; - - core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); - - ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ); - if (ret) - return ret; - - entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY); - vlan->members = entry & FWD_MAP_MASK; - vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK; - - return 0; -} - -static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, - struct net_device *bridge) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - s8 cpu_port = ds->dst->cpu_port; - unsigned int i; - u32 reg, p_ctl; - - /* Make this port leave the all VLANs join since we will have proper - * VLAN entries from now on - */ - reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); - reg &= ~BIT(port); - if ((reg & BIT(cpu_port)) == BIT(cpu_port)) - reg &= ~BIT(cpu_port); - core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); - - priv->port_sts[port].bridge_dev = bridge; - p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); - - for (i = 0; i < priv->hw_params.num_ports; i++) { - if (priv->port_sts[i].bridge_dev != bridge) - continue; - - /* Add this local port to the remote port VLAN control - * membership and update the remote port bitmask - */ - reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); - reg |= 1 << port; - core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); - priv->port_sts[i].vlan_ctl_mask = reg; - - p_ctl |= 1 << i; - } - - /* Configure the local port VLAN control membership to include - * remote ports and update the local port bitmask - */ - core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); - priv->port_sts[port].vlan_ctl_mask = p_ctl; - - return 0; -} - -static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct net_device *bridge = priv->port_sts[port].bridge_dev; - s8 cpu_port = ds->dst->cpu_port; - unsigned int i; - u32 reg, p_ctl; - - p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); - - for (i = 0; i < priv->hw_params.num_ports; i++) { - /* Don't touch the remaining ports */ - if (priv->port_sts[i].bridge_dev != bridge) - continue; - - reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); - reg &= ~(1 << port); - core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); - priv->port_sts[port].vlan_ctl_mask = reg; - - /* Prevent self removal to preserve isolation */ - if (port != i) - p_ctl &= ~(1 << i); - } - - core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); - priv->port_sts[port].vlan_ctl_mask = p_ctl; - priv->port_sts[port].bridge_dev = NULL; - - /* Make this port join all VLANs without VLAN entries */ - reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); - reg |= BIT(port); - if (!(reg & BIT(cpu_port))) - reg |= BIT(cpu_port); - core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); -} - -static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, - u8 state) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - u8 hw_state, cur_hw_state; - u32 reg; - - reg = core_readl(priv, CORE_G_PCTL_PORT(port)); - cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); - - switch (state) { - case BR_STATE_DISABLED: - hw_state = G_MISTP_DIS_STATE; - break; - case BR_STATE_LISTENING: - hw_state = G_MISTP_LISTEN_STATE; - break; - case BR_STATE_LEARNING: - hw_state = G_MISTP_LEARN_STATE; - break; - case BR_STATE_FORWARDING: - hw_state = G_MISTP_FWD_STATE; - break; - case BR_STATE_BLOCKING: - hw_state = G_MISTP_BLOCK_STATE; - break; - default: - pr_err("%s: invalid STP state: %d\n", __func__, state); - return; - } - - /* Fast-age ARL entries if we are moving a port from Learning or - * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening - * state (hw_state) - */ - if (cur_hw_state != hw_state) { - if (cur_hw_state >= G_MISTP_LEARN_STATE && - hw_state <= G_MISTP_LISTEN_STATE) { - if (bcm_sf2_sw_fast_age_port(ds, port)) { - pr_err("%s: fast-ageing failed\n", __func__); - return; - } - } - } - - reg = core_readl(priv, CORE_G_PCTL_PORT(port)); - reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); - reg |= hw_state; - core_writel(priv, reg, CORE_G_PCTL_PORT(port)); -} - -/* Address Resolution Logic routines */ -static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 10; - u32 reg; - - do { - reg = core_readl(priv, CORE_ARLA_RWCTL); - if (!(reg & ARL_STRTDN)) - return 0; - - usleep_range(1000, 2000); - } while (timeout--); - - return -ETIMEDOUT; -} - -static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op) -{ - u32 cmd; - - if (op > ARL_RW) - return -EINVAL; - - cmd = core_readl(priv, CORE_ARLA_RWCTL); - cmd &= ~IVL_SVL_SELECT; - cmd |= ARL_STRTDN; - if (op) - cmd |= ARL_RW; - else - cmd &= ~ARL_RW; - core_writel(priv, cmd, CORE_ARLA_RWCTL); - - return bcm_sf2_arl_op_wait(priv); -} - -static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac, - u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx, - bool is_valid) -{ - unsigned int i; - int ret; - - ret = bcm_sf2_arl_op_wait(priv); - if (ret) - return ret; - - /* Read the 4 bins */ - for (i = 0; i < 4; i++) { - u64 mac_vid; - u32 fwd_entry; - - mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i)); - fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i)); - bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry); - - if (ent->is_valid && is_valid) { - *idx = i; - return 0; - } - - /* This is the MAC we just deleted */ - if (!is_valid && (mac_vid & mac)) - return 0; - } - - return -ENOENT; -} - -static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port, - const unsigned char *addr, u16 vid, bool is_valid) -{ - struct bcm_sf2_arl_entry ent; - u32 fwd_entry; - u64 mac, mac_vid = 0; - u8 idx = 0; - int ret; - - /* Convert the array into a 64-bit MAC */ - mac = bcm_sf2_mac_to_u64(addr); - - /* Perform a read for the given MAC and VID */ - core_writeq(priv, mac, CORE_ARLA_MAC); - core_writel(priv, vid, CORE_ARLA_VID); - - /* Issue a read operation for this MAC */ - ret = bcm_sf2_arl_rw_op(priv, 1); - if (ret) - return ret; - - ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid); - /* If this is a read, just finish now */ - if (op) - return ret; - - /* We could not find a matching MAC, so reset to a new entry */ - if (ret) { - fwd_entry = 0; - idx = 0; - } - - memset(&ent, 0, sizeof(ent)); - ent.port = port; - ent.is_valid = is_valid; - ent.vid = vid; - ent.is_static = true; - memcpy(ent.mac, addr, ETH_ALEN); - bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent); - - core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx)); - core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx)); - - ret = bcm_sf2_arl_rw_op(priv, 0); - if (ret) - return ret; - - /* Re-read the entry to check */ - return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid); -} - -static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - /* We do not need to do anything specific here yet */ - return 0; -} - -static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - - if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) - pr_err("%s: failed to add MAC address\n", __func__); -} - -static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - - return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); -} - -static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv) -{ - unsigned timeout = 1000; - u32 reg; - - do { - reg = core_readl(priv, CORE_ARLA_SRCH_CTL); - if (!(reg & ARLA_SRCH_STDN)) - return 0; - - if (reg & ARLA_SRCH_VLID) - return 0; - - usleep_range(1000, 2000); - } while (timeout--); - - return -ETIMEDOUT; -} - -static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx, - struct bcm_sf2_arl_entry *ent) -{ - u64 mac_vid; - u32 fwd_entry; - - mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx)); - fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx)); - bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry); -} - -static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port, - const struct bcm_sf2_arl_entry *ent, - struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) -{ - if (!ent->is_valid) - return 0; - - if (port != ent->port) - return 0; - - ether_addr_copy(fdb->addr, ent->mac); - fdb->vid = ent->vid; - fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; - - return cb(&fdb->obj); -} - -static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct net_device *dev = ds->ports[port].netdev; - struct bcm_sf2_arl_entry results[2]; - unsigned int count = 0; - int ret; - - /* Start search operation */ - core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL); - - do { - ret = bcm_sf2_arl_search_wait(priv); - if (ret) - return ret; - - /* Read both entries, then return their values back */ - bcm_sf2_arl_search_rd(priv, 0, &results[0]); - ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb); - if (ret) - return ret; - - bcm_sf2_arl_search_rd(priv, 1, &results[1]); - ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb); - if (ret) - return ret; - - if (!results[0].is_valid && !results[1].is_valid) - break; - - } while (count++ < CORE_ARLA_NUM_ENTRIES); - - return 0; -} - static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, int regnum, u16 val) { @@ -1036,12 +464,10 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) { - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_0_mask_set(priv, 0xffffffff); intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_1_mask_set(priv, 0xffffffff); intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, @@ -1082,7 +508,7 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, static int bcm_sf2_mdio_register(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct device_node *dn; static int index; int err; @@ -1146,14 +572,9 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) of_node_put(priv->master_mii_dn); } -static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr) -{ - return 0; -} - static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); /* The BCM7xxx PHY driver expects to find the integrated PHY revision * in bits 15:8 and the patch level in bits 7:0 which is exactly what @@ -1166,7 +587,7 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 id_mode_dis = 0, port_mode; const char *str = NULL; u32 reg; @@ -1246,7 +667,7 @@ force_link: static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 duplex, pause; u32 reg; @@ -1298,7 +719,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, static int bcm_sf2_sw_suspend(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int port; bcm_sf2_intr_disable(priv); @@ -1318,7 +739,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) static int bcm_sf2_sw_resume(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int port; int ret; @@ -1345,7 +766,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { struct net_device *p = ds->dst[ds->index].master_netdev; - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol; /* Get the parent device WoL settings */ @@ -1368,7 +789,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { struct net_device *p = ds->dst[ds->index].master_netdev; - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; struct ethtool_wolinfo pwol; @@ -1393,43 +814,32 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, return p->ethtool_ops->set_wol(p, wol); } -static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable) +static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) { - u32 mgmt, vc0, vc1, vc4, vc5; + unsigned int timeout = 10; + u32 reg; - mgmt = core_readl(priv, CORE_SWMODE); - vc0 = core_readl(priv, CORE_VLAN_CTRL0); - vc1 = core_readl(priv, CORE_VLAN_CTRL1); - vc4 = core_readl(priv, CORE_VLAN_CTRL4); - vc5 = core_readl(priv, CORE_VLAN_CTRL5); + do { + reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); + if (!(reg & ARLA_VTBL_STDN)) + return 0; - mgmt &= ~SW_FWDG_MODE; + usleep_range(1000, 2000); + } while (timeout--); - if (enable) { - vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL; - vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP; - vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); - vc4 |= INGR_VID_CHK_DROP; - vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD; - } else { - vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL); - vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP); - vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); - vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD); - vc4 |= INGR_VID_CHK_VID_VIOL_IMP; - } + return -ETIMEDOUT; +} + +static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) +{ + core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); - core_writel(priv, vc0, CORE_VLAN_CTRL0); - core_writel(priv, vc1, CORE_VLAN_CTRL1); - core_writel(priv, 0, CORE_VLAN_CTRL3); - core_writel(priv, vc4, CORE_VLAN_CTRL4); - core_writel(priv, vc5, CORE_VLAN_CTRL5); - core_writel(priv, mgmt, CORE_SWMODE); + return bcm_sf2_vlan_op_wait(priv); } static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int port; /* Clear all VLANs */ @@ -1443,162 +853,199 @@ static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) } } -static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port, - bool vlan_filtering) +static int bcm_sf2_sw_setup(struct dsa_switch *ds) { + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned int port; + + /* Enable all valid ports and disable those unused */ + for (port = 0; port < priv->hw_params.num_ports; port++) { + /* IMP port receives special treatment */ + if ((1 << port) & ds->enabled_port_mask) + bcm_sf2_port_setup(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + else + bcm_sf2_port_disable(ds, port, NULL); + } + + bcm_sf2_sw_configure_vlan(ds); + return 0; } -static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +/* The SWITCH_CORE register space is managed by b53 but operates on a page + + * register basis so we need to translate that into an address that the + * bus-glue understands. + */ +#define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2) + +static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg, + u8 *val) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = dev->priv; - bcm_sf2_enable_vlan(priv, true); + *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); return 0; } -static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg, + u16 *val) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - s8 cpu_port = ds->dst->cpu_port; - struct bcm_sf2_vlan *vl; - u16 vid; + struct bcm_sf2_priv *priv = dev->priv; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - vl = &priv->vlans[vid]; + *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); - bcm_sf2_get_vlan_entry(priv, vid, vl); + return 0; +} - vl->members |= BIT(port) | BIT(cpu_port); - if (untagged) - vl->untag |= BIT(port) | BIT(cpu_port); - else - vl->untag &= ~(BIT(port) | BIT(cpu_port)); +static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, + u32 *val) +{ + struct bcm_sf2_priv *priv = dev->priv; - bcm_sf2_set_vlan_entry(priv, vid, vl); - bcm_sf2_sw_fast_age_vlan(priv, vid); - } + *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); - if (pvid) { - core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port)); - core_writel(priv, vlan->vid_end, - CORE_DEFAULT_1Q_TAG_P(cpu_port)); - bcm_sf2_sw_fast_age_vlan(priv, vid); - } + return 0; } -static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg, + u64 *val) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - s8 cpu_port = ds->dst->cpu_port; - struct bcm_sf2_vlan *vl; - u16 vid, pvid; - int ret; + struct bcm_sf2_priv *priv = dev->priv; - pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); + *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg)); - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - vl = &priv->vlans[vid]; - - ret = bcm_sf2_get_vlan_entry(priv, vid, vl); - if (ret) - return ret; - - vl->members &= ~BIT(port); - if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) - vl->members = 0; - if (pvid == vid) - pvid = 0; - if (untagged) { - vl->untag &= ~BIT(port); - if ((vl->untag & BIT(port)) == BIT(cpu_port)) - vl->untag = 0; - } + return 0; +} - bcm_sf2_set_vlan_entry(priv, vid, vl); - bcm_sf2_sw_fast_age_vlan(priv, vid); - } +static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg, + u8 value) +{ + struct bcm_sf2_priv *priv = dev->priv; - core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port)); - core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port)); - bcm_sf2_sw_fast_age_vlan(priv, vid); + core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); return 0; } -static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)) +static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct bcm_sf2_port_status *p = &priv->port_sts[port]; - struct bcm_sf2_vlan *vl; - u16 vid, pvid; - int err = 0; + struct bcm_sf2_priv *priv = dev->priv; - pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); + core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); - for (vid = 0; vid < VLAN_N_VID; vid++) { - vl = &priv->vlans[vid]; + return 0; +} - if (!(vl->members & BIT(port))) - continue; +static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + struct bcm_sf2_priv *priv = dev->priv; - vlan->vid_begin = vlan->vid_end = vid; - vlan->flags = 0; + core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); - if (vl->untag & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - if (p->pvid == vid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; + return 0; +} - err = cb(&vlan->obj); - if (err) - break; - } +static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct bcm_sf2_priv *priv = dev->priv; - return err; + core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); + + return 0; } -static int bcm_sf2_sw_setup(struct dsa_switch *ds) +struct b53_io_ops bcm_sf2_io_ops = { + .read8 = bcm_sf2_core_read8, + .read16 = bcm_sf2_core_read16, + .read32 = bcm_sf2_core_read32, + .read48 = bcm_sf2_core_read64, + .read64 = bcm_sf2_core_read64, + .write8 = bcm_sf2_core_write8, + .write16 = bcm_sf2_core_write16, + .write32 = bcm_sf2_core_write32, + .write48 = bcm_sf2_core_write64, + .write64 = bcm_sf2_core_write64, +}; + +static int bcm_sf2_sw_probe(struct platform_device *pdev) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; - struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct device_node *dn; + struct device_node *dn = pdev->dev.of_node; + struct b53_platform_data *pdata; + struct bcm_sf2_priv *priv; + struct b53_device *dev; + struct dsa_switch *ds; void __iomem **base; - unsigned int port; + struct resource *r; unsigned int i; u32 reg, rev; int ret; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); + if (!dev) + return -ENOMEM; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* Auto-detection using standard registers will not work, so + * provide an indication of what kind of device we are for + * b53_common to work with + */ + pdata->chip_id = BCM7445_DEVICE_ID; + dev->pdata = pdata; + + priv->dev = dev; + ds = dev->ds; + + /* Override the parts that are non-standard wrt. normal b53 devices */ + ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol; + ds->ops->setup = bcm_sf2_sw_setup; + ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags; + ds->ops->adjust_link = bcm_sf2_sw_adjust_link; + ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update; + ds->ops->suspend = bcm_sf2_sw_suspend; + ds->ops->resume = bcm_sf2_sw_resume; + ds->ops->get_wol = bcm_sf2_sw_get_wol; + ds->ops->set_wol = bcm_sf2_sw_set_wol; + ds->ops->port_enable = bcm_sf2_port_setup; + ds->ops->port_disable = bcm_sf2_port_disable; + ds->ops->get_eee = bcm_sf2_sw_get_eee; + ds->ops->set_eee = bcm_sf2_sw_set_eee; + + /* Avoid having DSA free our slave MDIO bus (checking for + * ds->slave_mii_bus and ds->ops->phy_read being non-NULL) + */ + ds->ops->phy_read = NULL; + + dev_set_drvdata(&pdev->dev, priv); + spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); - /* All the interesting properties are at the parent device_node - * level - */ - dn = ds->cd->of_node->parent; - bcm_sf2_identify_ports(priv, ds->cd->of_node); + bcm_sf2_identify_ports(priv, dn->child); priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - *base = of_iomap(dn, i); - if (*base == NULL) { + r = platform_get_resource(pdev, IORESOURCE_MEM, i); + *base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(*base)) { pr_err("unable to find register: %s\n", reg_names[i]); - ret = -ENOMEM; - goto out_unmap; + return PTR_ERR(*base); } base++; } @@ -1606,30 +1053,30 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("unable to software reset switch: %d\n", ret); - goto out_unmap; + return ret; } ret = bcm_sf2_mdio_register(ds); if (ret) { pr_err("failed to register MDIO bus\n"); - goto out_unmap; + return ret; } /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); - ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, - "switch_0", priv); + ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, + "switch_0", priv); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); goto out_mdio; } - ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, - "switch_1", priv); + ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, + "switch_1", priv); if (ret < 0) { pr_err("failed to request switch_1 IRQ\n"); - goto out_free_irq0; + goto out_mdio; } /* Reset the MIB counters */ @@ -1649,19 +1096,6 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) &priv->hw_params.num_gphy)) priv->hw_params.num_gphy = 1; - /* Enable all valid ports and disable those unused */ - for (port = 0; port < priv->hw_params.num_ports; port++) { - /* IMP port receives special treatment */ - if ((1 << port) & ds->enabled_port_mask) - bcm_sf2_port_setup(ds, port, NULL); - else if (dsa_is_cpu_port(ds, port)) - bcm_sf2_imp_setup(ds, port); - else - bcm_sf2_port_disable(ds, port, NULL); - } - - bcm_sf2_sw_configure_vlan(ds); - rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & SWITCH_TOP_REV_MASK; @@ -1670,6 +1104,10 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) rev = reg_readl(priv, REG_PHY_REVISION); priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + ret = b53_switch_register(dev); + if (ret) + goto out_mdio; + pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, @@ -1677,66 +1115,60 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) return 0; -out_free_irq0: - free_irq(priv->irq0, priv); out_mdio: bcm_sf2_mdio_unregister(priv); -out_unmap: - base = &priv->core; - for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - if (*base) - iounmap(*base); - base++; - } return ret; } -static struct dsa_switch_driver bcm_sf2_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_BRCM, - .probe = bcm_sf2_sw_drv_probe, - .setup = bcm_sf2_sw_setup, - .set_addr = bcm_sf2_sw_set_addr, - .get_phy_flags = bcm_sf2_sw_get_phy_flags, - .get_strings = bcm_sf2_sw_get_strings, - .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, - .get_sset_count = bcm_sf2_sw_get_sset_count, - .adjust_link = bcm_sf2_sw_adjust_link, - .fixed_link_update = bcm_sf2_sw_fixed_link_update, - .suspend = bcm_sf2_sw_suspend, - .resume = bcm_sf2_sw_resume, - .get_wol = bcm_sf2_sw_get_wol, - .set_wol = bcm_sf2_sw_set_wol, - .port_enable = bcm_sf2_port_setup, - .port_disable = bcm_sf2_port_disable, - .get_eee = bcm_sf2_sw_get_eee, - .set_eee = bcm_sf2_sw_set_eee, - .port_bridge_join = bcm_sf2_sw_br_join, - .port_bridge_leave = bcm_sf2_sw_br_leave, - .port_stp_state_set = bcm_sf2_sw_br_set_stp_state, - .port_fdb_prepare = bcm_sf2_sw_fdb_prepare, - .port_fdb_add = bcm_sf2_sw_fdb_add, - .port_fdb_del = bcm_sf2_sw_fdb_del, - .port_fdb_dump = bcm_sf2_sw_fdb_dump, - .port_vlan_filtering = bcm_sf2_sw_vlan_filtering, - .port_vlan_prepare = bcm_sf2_sw_vlan_prepare, - .port_vlan_add = bcm_sf2_sw_vlan_add, - .port_vlan_del = bcm_sf2_sw_vlan_del, - .port_vlan_dump = bcm_sf2_sw_vlan_dump, -}; - -static int __init bcm_sf2_init(void) +static int bcm_sf2_sw_remove(struct platform_device *pdev) { - register_switch_driver(&bcm_sf2_switch_driver); + struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); + + /* Disable all ports and interrupts */ + priv->wol_ports_mask = 0; + bcm_sf2_sw_suspend(priv->dev->ds); + dsa_unregister_switch(priv->dev->ds); + bcm_sf2_mdio_unregister(priv); return 0; } -module_init(bcm_sf2_init); -static void __exit bcm_sf2_exit(void) +#ifdef CONFIG_PM_SLEEP +static int bcm_sf2_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); + + return dsa_switch_suspend(priv->dev->ds); +} + +static int bcm_sf2_resume(struct device *dev) { - unregister_switch_driver(&bcm_sf2_switch_driver); + struct platform_device *pdev = to_platform_device(dev); + struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); + + return dsa_switch_resume(priv->dev->ds); } -module_exit(bcm_sf2_exit); +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, + bcm_sf2_suspend, bcm_sf2_resume); + +static const struct of_device_id bcm_sf2_of_match[] = { + { .compatible = "brcm,bcm7445-switch-v4.0" }, + { /* sentinel */ }, +}; + +static struct platform_driver bcm_sf2_driver = { + .probe = bcm_sf2_sw_probe, + .remove = bcm_sf2_sw_remove, + .driver = { + .name = "brcm-sf2", + .of_match_table = bcm_sf2_of_match, + .pm = &bcm_sf2_pm_ops, + }, +}; +module_platform_driver(bcm_sf2_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index dd446e466699..afe56686b3d7 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -26,6 +26,7 @@ #include <net/dsa.h> #include "bcm_sf2_regs.h" +#include "b53/b53_priv.h" struct bcm_sf2_hw_params { u16 top_rev; @@ -50,71 +51,9 @@ struct bcm_sf2_port_status { struct ethtool_eee eee; - u32 vlan_ctl_mask; - u16 pvid; - - struct net_device *bridge_dev; -}; - -struct bcm_sf2_arl_entry { - u8 port; - u8 mac[ETH_ALEN]; - u16 vid; - u8 is_valid:1; - u8 is_age:1; - u8 is_static:1; + u16 vlan_ctl_mask; }; -struct bcm_sf2_vlan { - u16 members; - u16 untag; -}; - -static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst) -{ - unsigned int i; - - for (i = 0; i < ETH_ALEN; i++) - dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff; -} - -static inline u64 bcm_sf2_mac_to_u64(const u8 *src) -{ - unsigned int i; - u64 dst = 0; - - for (i = 0; i < ETH_ALEN; i++) - dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i); - - return dst; -} - -static inline void bcm_sf2_arl_to_entry(struct bcm_sf2_arl_entry *ent, - u64 mac_vid, u32 fwd_entry) -{ - memset(ent, 0, sizeof(*ent)); - ent->port = fwd_entry & PORTID_MASK; - ent->is_valid = !!(fwd_entry & ARL_VALID); - ent->is_age = !!(fwd_entry & ARL_AGE); - ent->is_static = !!(fwd_entry & ARL_STATIC); - bcm_sf2_mac_from_u64(mac_vid, ent->mac); - ent->vid = mac_vid >> VID_SHIFT; -} - -static inline void bcm_sf2_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, - const struct bcm_sf2_arl_entry *ent) -{ - *mac_vid = bcm_sf2_mac_to_u64(ent->mac); - *mac_vid |= (u64)(ent->vid & VID_MASK) << VID_SHIFT; - *fwd_entry = ent->port & PORTID_MASK; - if (ent->is_valid) - *fwd_entry |= ARL_VALID; - if (ent->is_static) - *fwd_entry |= ARL_STATIC; - if (ent->is_age) - *fwd_entry |= ARL_AGE; -} - struct bcm_sf2_priv { /* Base registers, keep those in order with BCM_SF2_REGS_NAME */ void __iomem *core; @@ -134,6 +73,9 @@ struct bcm_sf2_priv { u32 irq1_stat; u32 irq1_mask; + /* Backing b53_device */ + struct b53_device *dev; + /* Mutex protecting access to the MIB counters */ struct mutex stats_mutex; @@ -155,16 +97,14 @@ struct bcm_sf2_priv { struct device_node *master_mii_dn; struct mii_bus *slave_mii_bus; struct mii_bus *master_mii_bus; - - /* Cache of programmed VLANs */ - struct bcm_sf2_vlan vlans[VLAN_N_VID]; }; -struct bcm_sf2_hw_stats { - const char *string; - u16 reg; - u8 sizeof_stat; -}; +static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) +{ + struct b53_device *dev = ds_to_priv(ds); + + return dev->priv; +} #define SF2_IO_MACRO(name) \ static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \ diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 9f2a9cb42074..838fe373cd6f 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -115,14 +115,6 @@ #define RX_BCST_EN (1 << 2) #define RX_MCST_EN (1 << 3) #define RX_UCST_EN (1 << 4) -#define G_MISTP_STATE_SHIFT 5 -#define G_MISTP_NO_STP (0 << G_MISTP_STATE_SHIFT) -#define G_MISTP_DIS_STATE (1 << G_MISTP_STATE_SHIFT) -#define G_MISTP_BLOCK_STATE (2 << G_MISTP_STATE_SHIFT) -#define G_MISTP_LISTEN_STATE (3 << G_MISTP_STATE_SHIFT) -#define G_MISTP_LEARN_STATE (4 << G_MISTP_STATE_SHIFT) -#define G_MISTP_FWD_STATE (5 << G_MISTP_STATE_SHIFT) -#define G_MISTP_STATE_MASK 0x7 #define CORE_SWMODE 0x0002c #define SW_FWDG_MODE (1 << 0) @@ -205,75 +197,11 @@ #define BRCM_HDR_EN_P5 (1 << 1) #define BRCM_HDR_EN_P7 (1 << 2) -#define CORE_BRCM_HDR_CTRL2 0x0828 - -#define CORE_HL_PRTC_CTRL 0x0940 -#define ARP_EN (1 << 0) -#define RARP_EN (1 << 1) -#define DHCP_EN (1 << 2) -#define ICMPV4_EN (1 << 3) -#define ICMPV6_EN (1 << 4) -#define ICMPV6_FWD_MODE (1 << 5) -#define IGMP_DIP_EN (1 << 8) -#define IGMP_RPTLVE_EN (1 << 9) -#define IGMP_RTPLVE_FWD_MODE (1 << 10) -#define IGMP_QRY_EN (1 << 11) -#define IGMP_QRY_FWD_MODE (1 << 12) -#define IGMP_UKN_EN (1 << 13) -#define IGMP_UKN_FWD_MODE (1 << 14) -#define MLD_RPTDONE_EN (1 << 15) -#define MLD_RPTDONE_FWD_MODE (1 << 16) -#define MLD_QRY_EN (1 << 17) -#define MLD_QRY_FWD_MODE (1 << 18) - #define CORE_RST_MIB_CNT_EN 0x0950 #define CORE_BRCM_HDR_RX_DIS 0x0980 #define CORE_BRCM_HDR_TX_DIS 0x0988 -#define CORE_ARLA_NUM_ENTRIES 1024 - -#define CORE_ARLA_RWCTL 0x1400 -#define ARL_RW (1 << 0) -#define IVL_SVL_SELECT (1 << 6) -#define ARL_STRTDN (1 << 7) - -#define CORE_ARLA_MAC 0x1408 -#define CORE_ARLA_VID 0x1420 -#define ARLA_VIDTAB_INDX_MASK 0x1fff - -#define CORE_ARLA_MACVID0 0x1440 -#define MAC_MASK 0xffffffffff -#define VID_SHIFT 48 -#define VID_MASK 0xfff - -#define CORE_ARLA_FWD_ENTRY0 0x1460 -#define PORTID_MASK 0x1ff -#define ARL_CON_SHIFT 9 -#define ARL_CON_MASK 0x3 -#define ARL_PRI_SHIFT 11 -#define ARL_PRI_MASK 0x7 -#define ARL_AGE (1 << 14) -#define ARL_STATIC (1 << 15) -#define ARL_VALID (1 << 16) - -#define CORE_ARLA_MACVID_ENTRY(x) (CORE_ARLA_MACVID0 + ((x) * 0x40)) -#define CORE_ARLA_FWD_ENTRY(x) (CORE_ARLA_FWD_ENTRY0 + ((x) * 0x40)) - -#define CORE_ARLA_SRCH_CTL 0x1540 -#define ARLA_SRCH_VLID (1 << 0) -#define IVL_SVL_SELECT (1 << 6) -#define ARLA_SRCH_STDN (1 << 7) - -#define CORE_ARLA_SRCH_ADR 0x1544 -#define ARLA_SRCH_ADR_VALID (1 << 15) - -#define CORE_ARLA_SRCH_RSLT_0_MACVID 0x1580 -#define CORE_ARLA_SRCH_RSLT_0 0x15a0 - -#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40)) -#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40)) - #define CORE_ARLA_VTBL_RWCTRL 0x1600 #define ARLA_VTBL_CMD_WRITE 0 #define ARLA_VTBL_CMD_READ 1 @@ -297,59 +225,9 @@ #define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \ ((x) * P_TXQ_PSM_VDD_SHIFT)) -#define CORE_P0_MIB_OFFSET 0x8000 -#define P_MIB_SIZE 0x400 -#define CORE_P_MIB_OFFSET(x) (CORE_P0_MIB_OFFSET + (x) * P_MIB_SIZE) - #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define PORT_VLAN_CTRL_MASK 0x1ff -#define CORE_VLAN_CTRL0 0xd000 -#define CHANGE_1P_VID_INNER (1 << 0) -#define CHANGE_1P_VID_OUTER (1 << 1) -#define CHANGE_1Q_VID (1 << 3) -#define VLAN_LEARN_MODE_SVL (0 << 5) -#define VLAN_LEARN_MODE_IVL (3 << 5) -#define VLAN_EN (1 << 7) - -#define CORE_VLAN_CTRL1 0xd004 -#define EN_RSV_MCAST_FWDMAP (1 << 2) -#define EN_RSV_MCAST_UNTAG (1 << 3) -#define EN_IPMC_BYPASS_FWDMAP (1 << 5) -#define EN_IPMC_BYPASS_UNTAG (1 << 6) - -#define CORE_VLAN_CTRL2 0xd008 -#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2) -#define EN_GMRP_GVRP_V_FWDMAP (1 << 5) -#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6) - -#define CORE_VLAN_CTRL3 0xd00c -#define EN_DROP_NON1Q_MASK 0x1ff - -#define CORE_VLAN_CTRL4 0xd014 -#define RESV_MCAST_FLOOD (1 << 1) -#define EN_DOUBLE_TAG_MASK 0x3 -#define EN_DOUBLE_TAG_SHIFT 2 -#define EN_MGE_REV_GMRP (1 << 4) -#define EN_MGE_REV_GVRP (1 << 5) -#define INGR_VID_CHK_SHIFT 6 -#define INGR_VID_CHK_MASK 0x3 -#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT) -#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT) -#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT) -#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT) - -#define CORE_VLAN_CTRL5 0xd018 -#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0) -#define EN_VID_FFF_FWD (1 << 2) -#define DROP_VTABLE_MISS (1 << 3) -#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4) -#define PRESV_NON1Q (1 << 6) - -#define CORE_VLAN_CTRL6 0xd01c -#define STRICT_SFD_DETECT (1 << 0) -#define DIS_ARL_BUST_LMIT (1 << 4) - #define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8)) #define CFI_SHIFT 12 #define PRI_SHIFT 13 diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index e36b40886bd8..7ff9d373a9ee 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -69,6 +69,11 @@ static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) return NULL; } +static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds) +{ + return DSA_TAG_PROTO_TRAILER; +} + static const char *mv88e6060_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **_priv) @@ -247,8 +252,8 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return reg_write(ds, addr, regnum, val); } -static struct dsa_switch_driver mv88e6060_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_TRAILER, +static struct dsa_switch_ops mv88e6060_switch_ops = { + .get_tag_protocol = mv88e6060_get_tag_protocol, .probe = mv88e6060_drv_probe, .setup = mv88e6060_setup, .set_addr = mv88e6060_set_addr, @@ -258,14 +263,14 @@ static struct dsa_switch_driver mv88e6060_switch_driver = { static int __init mv88e6060_init(void) { - register_switch_driver(&mv88e6060_switch_driver); + register_switch_driver(&mv88e6060_switch_ops); return 0; } module_init(mv88e6060_init); static void __exit mv88e6060_cleanup(void) { - unregister_switch_driver(&mv88e6060_switch_driver); + unregister_switch_driver(&mv88e6060_switch_ops); } module_exit(mv88e6060_cleanup); diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 490bc06f993e..ac77737bbd87 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -2,6 +2,7 @@ config NET_DSA_MV88E6XXX tristate "Marvell 88E6xxx Ethernet switch fabric support" depends on NET_DSA select NET_DSA_TAG_EDSA + select NET_DSA_TAG_DSA help This driver adds support for most of the Marvell 88E6xxx models of Ethernet switch chips, except 88E6060. diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 710679067594..4e697eea6e0f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -216,25 +216,130 @@ static int mv88e6xxx_write(struct mv88e6xxx_chip *chip, return 0; } -/* Indirect write to single pointer-data register with an Update bit */ -static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 update) +static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, + int reg, u16 *val) { - u16 val; - int i, err; + int addr = phy; /* PHY devices addresses start at 0x0 */ + + if (!chip->phy_ops) + return -EOPNOTSUPP; + + return chip->phy_ops->read(chip, addr, reg, val); +} + +static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, + int reg, u16 val) +{ + int addr = phy; /* PHY devices addresses start at 0x0 */ + + if (!chip->phy_ops) + return -EOPNOTSUPP; + + return chip->phy_ops->write(chip, addr, reg, val); +} + +static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page) +{ + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_PHY_PAGE)) + return -EOPNOTSUPP; + + return mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page); +} + +static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy) +{ + int err; + + /* Restore PHY page Copper 0x0 for access via the registered MDIO bus */ + err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, PHY_PAGE_COPPER); + if (unlikely(err)) { + dev_err(chip->dev, "failed to restore PHY %d page Copper (%d)\n", + phy, err); + } +} + +static int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy, + u8 page, int reg, u16 *val) +{ + int err; + + /* There is no paging for registers 22 */ + if (reg == PHY_PAGE) + return -EINVAL; + + err = mv88e6xxx_phy_page_get(chip, phy, page); + if (!err) { + err = mv88e6xxx_phy_read(chip, phy, reg, val); + mv88e6xxx_phy_page_put(chip, phy); + } + + return err; +} + +static int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy, + u8 page, int reg, u16 val) +{ + int err; + + /* There is no paging for registers 22 */ + if (reg == PHY_PAGE) + return -EINVAL; + + err = mv88e6xxx_phy_page_get(chip, phy, page); + if (!err) { + err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page); + mv88e6xxx_phy_page_put(chip, phy); + } + + return err; +} + +static int mv88e6xxx_serdes_read(struct mv88e6xxx_chip *chip, int reg, u16 *val) +{ + return mv88e6xxx_phy_page_read(chip, ADDR_SERDES, SERDES_PAGE_FIBER, + reg, val); +} + +static int mv88e6xxx_serdes_write(struct mv88e6xxx_chip *chip, int reg, u16 val) +{ + return mv88e6xxx_phy_page_write(chip, ADDR_SERDES, SERDES_PAGE_FIBER, + reg, val); +} + +static int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, + u16 mask) +{ + int i; + + for (i = 0; i < 16; i++) { + u16 val; + int err; - /* Wait until the previous operation is completed */ - for (i = 0; i < 16; ++i) { err = mv88e6xxx_read(chip, addr, reg, &val); if (err) return err; - if (!(val & BIT(15))) - break; + if (!(val & mask)) + return 0; + + usleep_range(1000, 2000); } - if (i == 16) - return -ETIMEDOUT; + dev_err(chip->dev, "Timeout while waiting for switch\n"); + return -ETIMEDOUT; +} + +/* Indirect write to single pointer-data register with an Update bit */ +static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, + u16 update) +{ + u16 val; + int err; + + /* Wait until the previous operation is completed */ + err = mv88e6xxx_wait(chip, addr, reg, BIT(15)); + if (err) + return err; /* Set the Update bit to trigger a write operation */ val = BIT(15) | update; @@ -260,26 +365,10 @@ static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr, return mv88e6xxx_write(chip, addr, reg, val); } -static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip, - int addr, int regnum) -{ - if (addr >= 0) - return _mv88e6xxx_reg_read(chip, addr, regnum); - return 0xffff; -} - -static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip, - int addr, int regnum, u16 val) -{ - if (addr >= 0) - return _mv88e6xxx_reg_write(chip, addr, regnum, val); - return 0; -} - static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) { int ret; - unsigned long timeout; + int i; ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) @@ -290,8 +379,7 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) if (ret) return ret; - timeout = jiffies + 1 * HZ; - while (time_before(jiffies, timeout)) { + for (i = 0; i < 16; i++) { ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -307,8 +395,7 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) { - int ret, err; - unsigned long timeout; + int ret, err, i; ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) @@ -319,8 +406,7 @@ static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) if (err) return err; - timeout = jiffies + 1 * HZ; - while (time_before(jiffies, timeout)) { + for (i = 0; i < 16; i++) { ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -400,34 +486,44 @@ static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip) chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; } -static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr, - int regnum) +static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip) { - int ret; + del_timer_sync(&chip->ppu_timer); +} - ret = mv88e6xxx_ppu_access_get(chip); - if (ret >= 0) { - ret = _mv88e6xxx_reg_read(chip, addr, regnum); +static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 *val) +{ + int err; + + err = mv88e6xxx_ppu_access_get(chip); + if (!err) { + err = mv88e6xxx_read(chip, addr, reg, val); mv88e6xxx_ppu_access_put(chip); } - return ret; + return err; } -static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr, - int regnum, u16 val) +static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 val) { - int ret; + int err; - ret = mv88e6xxx_ppu_access_get(chip); - if (ret >= 0) { - ret = _mv88e6xxx_reg_write(chip, addr, regnum, val); + err = mv88e6xxx_ppu_access_get(chip); + if (!err) { + err = mv88e6xxx_write(chip, addr, reg, val); mv88e6xxx_ppu_access_put(chip); } - return ret; + return err; } +static const struct mv88e6xxx_ops mv88e6xxx_phy_ppu_ops = { + .read = mv88e6xxx_phy_ppu_read, + .write = mv88e6xxx_phy_ppu_write, +}; + static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip) { return chip->info->family == MV88E6XXX_FAMILY_6065; @@ -819,130 +915,69 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset, - u16 mask) -{ - unsigned long timeout = jiffies + HZ / 10; - - while (time_before(jiffies, timeout)) { - int ret; - - ret = _mv88e6xxx_reg_read(chip, reg, offset); - if (ret < 0) - return ret; - if (!(ret & mask)) - return 0; - - usleep_range(1000, 2000); - } - return -ETIMEDOUT; -} - -static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip) -{ - return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, - GLOBAL2_SMI_OP_BUSY); -} - static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP, - GLOBAL_ATU_OP_BUSY); -} - -static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip, - int addr, int regnum) -{ - int ret; - - ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, - GLOBAL2_SMI_OP_22_READ | (addr << 5) | - regnum); - if (ret < 0) - return ret; - - ret = mv88e6xxx_mdio_wait(chip); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA); - - return ret; -} - -static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip, - int addr, int regnum, u16 val) -{ - int ret; - - ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, - GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | - regnum); - - return mv88e6xxx_mdio_wait(chip); + return mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP, + GLOBAL_ATU_OP_BUSY); } static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); - int reg; + u16 reg; + int err; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); - reg = mv88e6xxx_mdio_read_indirect(chip, port, 16); - if (reg < 0) + err = mv88e6xxx_phy_read(chip, port, 16, ®); + if (err) goto out; e->eee_enabled = !!(reg & 0x0200); e->tx_lpi_enabled = !!(reg & 0x0100); - reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); - if (reg < 0) + err = mv88e6xxx_read(chip, REG_PORT(port), PORT_STATUS, ®); + if (err) goto out; e->eee_active = !!(reg & PORT_STATUS_EEE); - reg = 0; - out: mutex_unlock(&chip->reg_lock); - return reg; + + return err; } static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); - int reg; - int ret; + u16 reg; + int err; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_mdio_read_indirect(chip, port, 16); - if (ret < 0) + err = mv88e6xxx_phy_read(chip, port, 16, ®); + if (err) goto out; - reg = ret & ~0x0300; + reg &= ~0x0300; if (e->eee_enabled) reg |= 0x0200; if (e->tx_lpi_enabled) reg |= 0x0100; - ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg); + err = mv88e6xxx_phy_write(chip, port, 16, reg); out: mutex_unlock(&chip->reg_lock); - return ret; + return err; } static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd) @@ -1227,8 +1262,8 @@ static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip, static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP, - GLOBAL_VTU_OP_BUSY); + return mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP, + GLOBAL_VTU_OP_BUSY); } static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op) @@ -2302,38 +2337,6 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip, - int port, int page, int reg, int val) -{ - int ret; - - ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); - if (ret < 0) - goto restore_page_0; - - ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val); -restore_page_0: - mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); - - return ret; -} - -static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip, - int port, int page, int reg) -{ - int ret; - - ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); - if (ret < 0) - goto restore_page_0; - - ret = mv88e6xxx_mdio_read_indirect(chip, port, reg); -restore_page_0: - mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); - - return ret; -} - static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip) { bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE); @@ -2396,23 +2399,22 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip) return ret; } -static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip) { - int ret; + u16 val; + int err; - ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES, - PAGE_FIBER_SERDES, MII_BMCR); - if (ret < 0) - return ret; + /* Clear Power Down bit */ + err = mv88e6xxx_serdes_read(chip, MII_BMCR, &val); + if (err) + return err; - if (ret & BMCR_PDOWN) { - ret &= ~BMCR_PDOWN; - ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES, - PAGE_FIBER_SERDES, MII_BMCR, - ret); + if (val & BMCR_PDOWN) { + val &= ~BMCR_PDOWN; + err = mv88e6xxx_serdes_write(chip, MII_BMCR, val); } - return ret; + return err; } static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, @@ -2486,28 +2488,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | PORT_CONTROL_STATE_FORWARDING; if (dsa_is_cpu_port(ds, port)) { - if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) - reg |= PORT_CONTROL_DSA_TAG; - if (mv88e6xxx_6352_family(chip) || - mv88e6xxx_6351_family(chip) || - mv88e6xxx_6165_family(chip) || - mv88e6xxx_6097_family(chip) || - mv88e6xxx_6320_family(chip)) { + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA | - PORT_CONTROL_FORWARD_UNKNOWN | PORT_CONTROL_FORWARD_UNKNOWN_MC; - } - - if (mv88e6xxx_6352_family(chip) || - mv88e6xxx_6351_family(chip) || - mv88e6xxx_6165_family(chip) || - mv88e6xxx_6097_family(chip) || - mv88e6xxx_6095_family(chip) || - mv88e6xxx_6065_family(chip) || - mv88e6xxx_6185_family(chip) || - mv88e6xxx_6320_family(chip)) { - reg |= PORT_CONTROL_EGRESS_ADD_TAG; - } + else + reg |= PORT_CONTROL_DSA_TAG; + reg |= PORT_CONTROL_EGRESS_ADD_TAG | + PORT_CONTROL_FORWARD_UNKNOWN; } if (dsa_is_dsa_port(ds, port)) { if (mv88e6xxx_6095_family(chip) || @@ -2535,7 +2522,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) /* If this port is connected to a SerDes, make sure the SerDes is not * powered down. */ - if (mv88e6xxx_6352_family(chip)) { + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SERDES)) { ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); if (ret < 0) return ret; @@ -2543,7 +2530,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if ((ret == PORT_STATUS_CMODE_100BASE_X) || (ret == PORT_STATUS_CMODE_1000BASE_X) || (ret == PORT_STATUS_CMODE_SGMII)) { - ret = mv88e6xxx_power_on_serdes(chip); + ret = mv88e6xxx_serdes_power_on(chip); if (ret < 0) return ret; } @@ -2635,10 +2622,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) /* Port Ethertype: use the Ethertype DSA Ethertype * value. */ - ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), - PORT_ETH_TYPE, ETH_P_EDSA); - if (ret) - return ret; + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_ETH_TYPE, ETH_P_EDSA); + if (ret) + return ret; + } + /* Tag Remap: use an identity 802.1p prio -> switch * prio mapping. */ @@ -2953,8 +2943,8 @@ static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip) break; /* Wait for the operation to complete */ - err = _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD, - GLOBAL2_IRL_CMD_BUSY); + err = mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD, + GLOBAL2_IRL_CMD_BUSY); if (err) break; } @@ -3008,9 +2998,9 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, - GLOBAL2_EEPROM_CMD_BUSY | - GLOBAL2_EEPROM_CMD_RUNNING); + return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, + GLOBAL2_EEPROM_CMD_BUSY | + GLOBAL2_EEPROM_CMD_RUNNING); } static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd) @@ -3058,6 +3048,62 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_eeprom_cmd(chip, cmd); } +static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_CMD, + GLOBAL2_SMI_PHY_CMD_BUSY); +} + +static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd) +{ + int err; + + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_CMD, cmd); + if (err) + return err; + + return mv88e6xxx_g2_smi_phy_wait(chip); +} + +static int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 *val) +{ + u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; + int err; + + err = mv88e6xxx_g2_smi_phy_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd); + if (err) + return err; + + return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_DATA, val); +} + +static int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 val) +{ + u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg; + int err; + + err = mv88e6xxx_g2_smi_phy_wait(chip); + if (err) + return err; + + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_DATA, val); + if (err) + return err; + + return mv88e6xxx_g2_smi_phy_cmd(chip, cmd); +} + +static const struct mv88e6xxx_ops mv88e6xxx_g2_smi_phy_ops = { + .read = mv88e6xxx_g2_smi_phy_read, + .write = mv88e6xxx_g2_smi_phy_write, +}; + static int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) { u16 reg; @@ -3191,84 +3237,35 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) return err; } -#ifdef CONFIG_NET_DSA_HWMON -static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, - int reg) -{ - struct mv88e6xxx_chip *chip = ds_to_priv(ds); - int ret; - - mutex_lock(&chip->reg_lock); - ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg); - mutex_unlock(&chip->reg_lock); - - return ret; -} - -static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, - int reg, int val) -{ - struct mv88e6xxx_chip *chip = ds_to_priv(ds); - int ret; - - mutex_lock(&chip->reg_lock); - ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val); - mutex_unlock(&chip->reg_lock); - - return ret; -} -#endif - -static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) -{ - if (port >= 0 && port < chip->info->num_ports) - return port; - return -EINVAL; -} - -static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum) +static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) { struct mv88e6xxx_chip *chip = bus->priv; - int addr = mv88e6xxx_port_to_mdio_addr(chip, port); - int ret; + u16 val; + int err; - if (addr < 0) + if (phy >= chip->info->num_ports) return 0xffff; mutex_lock(&chip->reg_lock); - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) - ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum); - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) - ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum); - else - ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum); - + err = mv88e6xxx_phy_read(chip, phy, reg, &val); mutex_unlock(&chip->reg_lock); - return ret; + + return err ? err : val; } -static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum, - u16 val) +static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct mv88e6xxx_chip *chip = bus->priv; - int addr = mv88e6xxx_port_to_mdio_addr(chip, port); - int ret; + int err; - if (addr < 0) + if (phy >= chip->info->num_ports) return 0xffff; mutex_lock(&chip->reg_lock); - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) - ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val); - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) - ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val); - else - ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val); - + err = mv88e6xxx_phy_write(chip, phy, reg, val); mutex_unlock(&chip->reg_lock); - return ret; + + return err; } static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, @@ -3278,9 +3275,6 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, struct mii_bus *bus; int err; - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) - mv88e6xxx_ppu_state_init(chip); - if (np) chip->mdio_np = of_get_child_by_name(np, "mdio"); @@ -3336,44 +3330,42 @@ static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u16 val; int ret; - int val; *temp = 0; mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6); + ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6); if (ret < 0) goto error; /* Enable temperature sensor */ - ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); + ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); if (ret < 0) goto error; - ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5)); + ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5)); if (ret < 0) goto error; /* Wait for temperature to stabilize */ usleep_range(10000, 12000); - val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); - if (val < 0) { - ret = val; + ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); + if (ret < 0) goto error; - } /* Disable temperature sensor */ - ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5)); + ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5)); if (ret < 0) goto error; *temp = ((val & 0x1f) - 5) * 5; error: - mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0); + mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0); mutex_unlock(&chip->reg_lock); return ret; } @@ -3382,15 +3374,18 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + u16 val; int ret; *temp = 0; - ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 27); + mutex_lock(&chip->reg_lock); + ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val); + mutex_unlock(&chip->reg_lock); if (ret < 0) return ret; - *temp = (ret & 0xff) - 25; + *temp = (val & 0xff) - 25; return 0; } @@ -3412,6 +3407,7 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + u16 val; int ret; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) @@ -3419,11 +3415,13 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) *temp = 0; - ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + mutex_lock(&chip->reg_lock); + ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); + mutex_unlock(&chip->reg_lock); if (ret < 0) return ret; - *temp = (((ret >> 8) & 0x1f) * 5) - 25; + *temp = (((val >> 8) & 0x1f) * 5) - 25; return 0; } @@ -3432,23 +3430,30 @@ static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - int ret; + u16 val; + int err; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) return -EOPNOTSUPP; - ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); - if (ret < 0) - return ret; + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); + if (err) + goto unlock; temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - return mv88e6xxx_mdio_page_write(ds, phy, 6, 26, - (ret & 0xe0ff) | (temp << 8)); + err = mv88e6xxx_phy_page_write(chip, phy, 6, 26, + (val & 0xe0ff) | (temp << 8)); +unlock: + mutex_unlock(&chip->reg_lock); + + return err; } static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + u16 val; int ret; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) @@ -3456,11 +3461,13 @@ static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) *alarm = false; - ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + mutex_lock(&chip->reg_lock); + ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); + mutex_unlock(&chip->reg_lock); if (ret < 0) return ret; - *alarm = !!(ret & 0x40); + *alarm = !!(val & 0x40); return 0; } @@ -3877,6 +3884,30 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) return chip; } +static const struct mv88e6xxx_ops mv88e6xxx_phy_ops = { + .read = mv88e6xxx_read, + .write = mv88e6xxx_write, +}; + +static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip) +{ + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SMI_PHY)) { + chip->phy_ops = &mv88e6xxx_g2_smi_phy_ops; + } else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) { + chip->phy_ops = &mv88e6xxx_phy_ppu_ops; + mv88e6xxx_ppu_state_init(chip); + } else { + chip->phy_ops = &mv88e6xxx_phy_ops; + } +} + +static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip) +{ + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) { + mv88e6xxx_ppu_state_destroy(chip); + } +} + static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, struct mii_bus *bus, int sw_addr) { @@ -3886,7 +3917,7 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, if (sw_addr == 0) chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP)) + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP)) chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; else return -EINVAL; @@ -3897,6 +3928,16 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, return 0; } +static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) + return DSA_TAG_PROTO_EDSA; + + return DSA_TAG_PROTO_DSA; +} + static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv) @@ -3924,6 +3965,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, if (err) goto free; + mv88e6xxx_phy_init(chip); + err = mv88e6xxx_mdio_register(chip, NULL); if (err) goto free; @@ -3937,9 +3980,9 @@ free: return NULL; } -static struct dsa_switch_driver mv88e6xxx_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_EDSA, +static struct dsa_switch_ops mv88e6xxx_switch_ops = { .probe = mv88e6xxx_drv_probe, + .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, .set_addr = mv88e6xxx_set_addr, .adjust_link = mv88e6xxx_adjust_link, @@ -3986,7 +4029,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, ds->dev = dev; ds->priv = chip; - ds->drv = &mv88e6xxx_switch_driver; + ds->ops = &mv88e6xxx_switch_ops; dev_set_drvdata(dev, ds); @@ -4025,6 +4068,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) return err; + mv88e6xxx_phy_init(chip); + chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); if (IS_ERR(chip->reset)) return PTR_ERR(chip->reset); @@ -4051,6 +4096,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); struct mv88e6xxx_chip *chip = ds_to_priv(ds); + mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip); mv88e6xxx_mdio_unregister(chip); } @@ -4076,7 +4122,7 @@ static struct mdio_driver mv88e6xxx_driver = { static int __init mv88e6xxx_init(void) { - register_switch_driver(&mv88e6xxx_switch_driver); + register_switch_driver(&mv88e6xxx_switch_ops); return mdio_driver_register(&mv88e6xxx_driver); } module_init(mv88e6xxx_init); @@ -4084,7 +4130,7 @@ module_init(mv88e6xxx_init); static void __exit mv88e6xxx_cleanup(void) { mdio_driver_unregister(&mv88e6xxx_driver); - unregister_switch_driver(&mv88e6xxx_switch_driver); + unregister_switch_driver(&mv88e6xxx_switch_ops); } module_exit(mv88e6xxx_cleanup); diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 48d6ea77f9bd..e157d4f69864 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -30,9 +30,12 @@ #define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) #define SMI_DATA 0x01 -/* Fiber/SERDES Registers are located at SMI address F, page 1 */ -#define REG_FIBER_SERDES 0x0f -#define PAGE_FIBER_SERDES 0x01 +/* PHY Registers */ +#define PHY_PAGE 0x16 +#define PHY_PAGE_COPPER 0x00 + +#define ADDR_SERDES 0x0f +#define SERDES_PAGE_FIBER 0x01 #define REG_PORT(p) (0x10 + (p)) #define PORT_STATUS 0x00 @@ -329,17 +332,16 @@ #define GLOBAL2_EEPROM_DATA 0x15 #define GLOBAL2_PTP_AVB_OP 0x16 #define GLOBAL2_PTP_AVB_DATA 0x17 -#define GLOBAL2_SMI_OP 0x18 -#define GLOBAL2_SMI_OP_BUSY BIT(15) -#define GLOBAL2_SMI_OP_CLAUSE_22 BIT(12) -#define GLOBAL2_SMI_OP_22_WRITE ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \ - GLOBAL2_SMI_OP_CLAUSE_22) -#define GLOBAL2_SMI_OP_22_READ ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \ - GLOBAL2_SMI_OP_CLAUSE_22) -#define GLOBAL2_SMI_OP_45_WRITE_ADDR ((0 << 10) | GLOBAL2_SMI_OP_BUSY) -#define GLOBAL2_SMI_OP_45_WRITE_DATA ((1 << 10) | GLOBAL2_SMI_OP_BUSY) -#define GLOBAL2_SMI_OP_45_READ_DATA ((2 << 10) | GLOBAL2_SMI_OP_BUSY) -#define GLOBAL2_SMI_DATA 0x19 +#define GLOBAL2_SMI_PHY_CMD 0x18 +#define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15) +#define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12) +#define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \ + GLOBAL2_SMI_PHY_CMD_MODE_22 | \ + GLOBAL2_SMI_PHY_CMD_BUSY) +#define GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA ((0x2 << 10) | \ + GLOBAL2_SMI_PHY_CMD_MODE_22 | \ + GLOBAL2_SMI_PHY_CMD_BUSY) +#define GLOBAL2_SMI_PHY_DATA 0x19 #define GLOBAL2_SCRATCH_MISC 0x1a #define GLOBAL2_SCRATCH_BUSY BIT(15) #define GLOBAL2_SCRATCH_REGISTER_SHIFT 8 @@ -384,10 +386,31 @@ enum mv88e6xxx_family { }; enum mv88e6xxx_cap { + /* Two different tag protocols can be used by the driver. All + * switches support DSA, but only later generations support + * EDSA. + */ + MV88E6XXX_CAP_EDSA, + /* Energy Efficient Ethernet. */ MV88E6XXX_CAP_EEE, + /* Multi-chip Addressing Mode. + * Some chips respond to only 2 registers of its own SMI device address + * when it is non-zero, and use indirect access to internal registers. + */ + MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */ + MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */ + + /* PHY Registers. + */ + MV88E6XXX_CAP_PHY_PAGE, /* (0x16) Page Register */ + + /* Fiber/SERDES Registers (SMI address F). + */ + MV88E6XXX_CAP_SERDES, + /* Switch Global 2 Registers. * The device contains a second set of global 16-bit registers. */ @@ -402,12 +425,8 @@ enum mv88e6xxx_cap { MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ MV88E6XXX_CAP_G2_EEPROM_CMD, /* (0x14) EEPROM Command */ MV88E6XXX_CAP_G2_EEPROM_DATA, /* (0x15) EEPROM Data */ - - /* Multi-chip Addressing Mode. - * Some chips require an indirect SMI access when their SMI device - * address is not zero. See SMI_CMD and SMI_DATA. - */ - MV88E6XXX_CAP_MULTI_CHIP, + MV88E6XXX_CAP_G2_SMI_PHY_CMD, /* (0x18) SMI PHY Command */ + MV88E6XXX_CAP_G2_SMI_PHY_DATA, /* (0x19) SMI PHY Data */ /* PHY Polling Unit. * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. @@ -415,12 +434,6 @@ enum mv88e6xxx_cap { MV88E6XXX_CAP_PPU, MV88E6XXX_CAP_PPU_ACTIVE, - /* SMI PHY Command and Data registers. - * This requires an indirect access to PHY registers through - * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done. - */ - MV88E6XXX_CAP_SMI_PHY, - /* Per VLAN Spanning Tree Unit (STU). * The Port State database, if present, is accessed through VTU * operations and dedicated SID registers. See GLOBAL_VTU_SID. @@ -440,7 +453,16 @@ enum mv88e6xxx_cap { }; /* Bitmask of capabilities */ +#define MV88E6XXX_FLAG_EDSA BIT(MV88E6XXX_CAP_EDSA) #define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE) + +#define MV88E6XXX_FLAG_SMI_CMD BIT(MV88E6XXX_CAP_SMI_CMD) +#define MV88E6XXX_FLAG_SMI_DATA BIT(MV88E6XXX_CAP_SMI_DATA) + +#define MV88E6XXX_FLAG_PHY_PAGE BIT(MV88E6XXX_CAP_PHY_PAGE) + +#define MV88E6XXX_FLAG_SERDES BIT(MV88E6XXX_CAP_SERDES) + #define MV88E6XXX_FLAG_GLOBAL2 BIT(MV88E6XXX_CAP_GLOBAL2) #define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT(MV88E6XXX_CAP_G2_MGMT_EN_2X) #define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT(MV88E6XXX_CAP_G2_MGMT_EN_0X) @@ -452,10 +474,11 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_G2_POT BIT(MV88E6XXX_CAP_G2_POT) #define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT(MV88E6XXX_CAP_G2_EEPROM_CMD) #define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT(MV88E6XXX_CAP_G2_EEPROM_DATA) -#define MV88E6XXX_FLAG_MULTI_CHIP BIT(MV88E6XXX_CAP_MULTI_CHIP) +#define MV88E6XXX_FLAG_G2_SMI_PHY_CMD BIT(MV88E6XXX_CAP_G2_SMI_PHY_CMD) +#define MV88E6XXX_FLAG_G2_SMI_PHY_DATA BIT(MV88E6XXX_CAP_G2_SMI_PHY_DATA) + #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) #define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE) -#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) #define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU) #define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) #define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) @@ -471,28 +494,43 @@ enum mv88e6xxx_cap { (MV88E6XXX_FLAG_G2_IRL_CMD | \ MV88E6XXX_FLAG_G2_IRL_DATA) +/* Multi-chip Addressing Mode */ +#define MV88E6XXX_FLAGS_MULTI_CHIP \ + (MV88E6XXX_FLAG_SMI_CMD | \ + MV88E6XXX_FLAG_SMI_DATA) + /* Cross-chip Port VLAN Table */ #define MV88E6XXX_FLAGS_PVT \ (MV88E6XXX_FLAG_G2_PVT_ADDR | \ MV88E6XXX_FLAG_G2_PVT_DATA) +/* Fiber/SERDES Registers at SMI address F, page 1 */ +#define MV88E6XXX_FLAGS_SERDES \ + (MV88E6XXX_FLAG_PHY_PAGE | \ + MV88E6XXX_FLAG_SERDES) + +/* Indirect PHY access via Global2 SMI PHY registers */ +#define MV88E6XXX_FLAGS_SMI_PHY \ + (MV88E6XXX_FLAG_G2_SMI_PHY_CMD |\ + MV88E6XXX_FLAG_G2_SMI_PHY_DATA) + #define MV88E6XXX_FLAGS_FAMILY_6095 \ (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU | \ - MV88E6XXX_FLAG_VTU) + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6097 \ (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAGS_PVT) #define MV88E6XXX_FLAGS_FAMILY_6165 \ @@ -501,69 +539,73 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_SWITCH_MAC | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAGS_PVT) #define MV88E6XXX_FLAGS_FAMILY_6185 \ (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU | \ MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6320 \ - (MV88E6XXX_FLAG_EEE | \ + (MV88E6XXX_FLAG_EDSA | \ + MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_SWITCH_MAC | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU_ACTIVE | \ - MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_EEPROM16 | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SMI_PHY) #define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ + (MV88E6XXX_FLAG_EDSA | \ + MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_SWITCH_MAC | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU_ACTIVE | \ - MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SMI_PHY) #define MV88E6XXX_FLAGS_FAMILY_6352 \ - (MV88E6XXX_FLAG_EEE | \ + (MV88E6XXX_FLAG_EDSA | \ + MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_SWITCH_MAC | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU_ACTIVE | \ - MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_EEPROM16 | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SERDES | \ + MV88E6XXX_FLAGS_SMI_PHY) struct mv88e6xxx_info { enum mv88e6xxx_family family; @@ -623,6 +665,7 @@ struct mv88e6xxx_chip { /* Handles automatic disabling and re-enabling of the PHY * polling unit. */ + const struct mv88e6xxx_ops *phy_ops; struct mutex ppu_mutex; int ppu_disabled; struct work_struct ppu_work; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 2ffd63463299..8cc7467b6c1f 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -24,6 +24,7 @@ source "drivers/net/ethernet/agere/Kconfig" source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/altera/Kconfig" +source "drivers/net/ethernet/amazon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1d349e9aa9a6..a09423df83f2 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_ALTERA_TSE) += altera/ +obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 38eaea18da23..00f9ee3fc3e5 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -192,8 +192,8 @@ static int desc_list_init(struct net_device *dev) goto init_error; skb_reserve(new_skb, NET_IP_ALIGN); - /* Invidate the data cache of skb->data range when it is write back - * cache. It will prevent overwritting the new data from DMA + /* Invalidate the data cache of skb->data range when it is write back + * cache. It will prevent overwriting the new data from DMA */ blackfin_dcache_invalidate_range((unsigned long)new_skb->head, (unsigned long)new_skb->end); @@ -1205,7 +1205,7 @@ static void bfin_mac_rx(struct bfin_mac_local *lp) } /* reserve 2 bytes for RXDWA padding */ skb_reserve(new_skb, NET_IP_ALIGN); - /* Invidate the data cache of skb->data range when it is write back + /* Invalidate the data cache of skb->data range when it is write back * cache. It will prevent overwritting the new data from DMA */ blackfin_dcache_invalidate_range((unsigned long)new_skb->head, @@ -1599,7 +1599,7 @@ static int bfin_mac_probe(struct platform_device *pdev) *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); /* probe mac */ - /*todo: how to proble? which is revision_register */ + /*todo: how to probe? which is revision_register */ bfin_write_EMAC_ADDRLO(0x12345678); if (bfin_read_EMAC_ADDRLO() != 0x12345678) { dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n"); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index bca07c5c94bd..f8df8248035e 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1105,27 +1105,6 @@ static void greth_set_msglevel(struct net_device *dev, u32 value) struct greth_private *greth = netdev_priv(dev); greth->msg_enable = value; } -static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct greth_private *greth = netdev_priv(dev); - struct phy_device *phy = greth->phy; - - if (!phy) - return -ENODEV; - - return phy_ethtool_gset(phy, cmd); -} - -static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct greth_private *greth = netdev_priv(dev); - struct phy_device *phy = greth->phy; - - if (!phy) - return -ENODEV; - - return phy_ethtool_sset(phy, cmd); -} static int greth_get_regs_len(struct net_device *dev) { @@ -1157,12 +1136,12 @@ static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, vo static const struct ethtool_ops greth_ethtool_ops = { .get_msglevel = greth_get_msglevel, .set_msglevel = greth_set_msglevel, - .get_settings = greth_get_settings, - .set_settings = greth_set_settings, .get_drvinfo = greth_get_drvinfo, .get_regs_len = greth_get_regs_len, .get_regs = greth_get_regs, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static struct net_device_ops greth_netdev_ops = { @@ -1224,7 +1203,7 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) static void greth_link_change(struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); - struct phy_device *phydev = greth->phy; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; u32 ctrl; @@ -1307,7 +1286,6 @@ static int greth_mdio_probe(struct net_device *dev) greth->link = 0; greth->speed = 0; greth->duplex = -1; - greth->phy = phy; return 0; } @@ -1325,6 +1303,7 @@ static int greth_mdio_init(struct greth_private *greth) { int ret; unsigned long timeout; + struct net_device *ndev = greth->netdev; greth->mdio = mdiobus_alloc(); if (!greth->mdio) { @@ -1349,15 +1328,16 @@ static int greth_mdio_init(struct greth_private *greth) goto unreg_mdio; } - phy_start(greth->phy); + phy_start(ndev->phydev); /* If Ethernet debug link is used make autoneg happen right away */ if (greth->edcl && greth_edcl == 1) { - phy_start_aneg(greth->phy); + phy_start_aneg(ndev->phydev); timeout = jiffies + 6*HZ; - while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) { + while (!phy_aneg_done(ndev->phydev) && + time_before(jiffies, timeout)) { } - phy_read_status(greth->phy); + phy_read_status(ndev->phydev); greth_link_change(greth->netdev); } @@ -1569,8 +1549,8 @@ static int greth_of_remove(struct platform_device *of_dev) dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); - if (greth->phy) - phy_stop(greth->phy); + if (ndev->phydev) + phy_stop(ndev->phydev); mdiobus_unregister(greth->mdio); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index 92dd918e4a83..9c07140a5d8d 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h @@ -123,7 +123,6 @@ struct greth_private { struct napi_struct napi; spinlock_t devlock; - struct phy_device *phy; struct mii_bus *mdio; unsigned int link; unsigned int speed; diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig new file mode 100644 index 000000000000..99b30353541a --- /dev/null +++ b/drivers/net/ethernet/amazon/Kconfig @@ -0,0 +1,27 @@ +# +# Amazon network device configuration +# + +config NET_VENDOR_AMAZON + bool "Amazon Devices" + default y + ---help--- + If you have a network (Ethernet) device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Amazon devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_AMAZON + +config ENA_ETHERNET + tristate "Elastic Network Adapter (ENA) support" + depends on (PCI_MSI && X86) + ---help--- + This driver supports Elastic Network Adapter (ENA)" + + To compile this driver as a module, choose M here. + The module will be called ena. + +endif #NET_VENDOR_AMAZON diff --git a/drivers/net/ethernet/amazon/Makefile b/drivers/net/ethernet/amazon/Makefile new file mode 100644 index 000000000000..8e0b73f60d51 --- /dev/null +++ b/drivers/net/ethernet/amazon/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Amazon network device drivers. +# + +obj-$(CONFIG_ENA_ETHERNET) += ena/ diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile new file mode 100644 index 000000000000..eaeeae06c5d9 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Elastic Network Adapter (ENA) device drivers. +# + +obj-$(CONFIG_ENA_ETHERNET) += ena.o + +ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h new file mode 100644 index 000000000000..a46e749bf226 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -0,0 +1,973 @@ +/* + * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _ENA_ADMIN_H_ +#define _ENA_ADMIN_H_ + +enum ena_admin_aq_opcode { + ENA_ADMIN_CREATE_SQ = 1, + + ENA_ADMIN_DESTROY_SQ = 2, + + ENA_ADMIN_CREATE_CQ = 3, + + ENA_ADMIN_DESTROY_CQ = 4, + + ENA_ADMIN_GET_FEATURE = 8, + + ENA_ADMIN_SET_FEATURE = 9, + + ENA_ADMIN_GET_STATS = 11, +}; + +enum ena_admin_aq_completion_status { + ENA_ADMIN_SUCCESS = 0, + + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, + + ENA_ADMIN_BAD_OPCODE = 2, + + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, + + ENA_ADMIN_MALFORMED_REQUEST = 4, + + /* Additional status is provided in ACQ entry extended_status */ + ENA_ADMIN_ILLEGAL_PARAMETER = 5, + + ENA_ADMIN_UNKNOWN_ERROR = 6, +}; + +enum ena_admin_aq_feature_id { + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + + ENA_ADMIN_MAX_QUEUES_NUM = 2, + + ENA_ADMIN_RSS_HASH_FUNCTION = 10, + + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, + + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + + ENA_ADMIN_MTU = 14, + + ENA_ADMIN_RSS_HASH_INPUT = 18, + + ENA_ADMIN_INTERRUPT_MODERATION = 20, + + ENA_ADMIN_AENQ_CONFIG = 26, + + ENA_ADMIN_LINK_CONFIG = 27, + + ENA_ADMIN_HOST_ATTR_CONFIG = 28, + + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, +}; + +enum ena_admin_placement_policy_type { + /* descriptors and headers are in host memory */ + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, + + /* descriptors and headers are in device memory (a.k.a Low Latency + * Queue) + */ + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, +}; + +enum ena_admin_link_types { + ENA_ADMIN_LINK_SPEED_1G = 0x1, + + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, + + ENA_ADMIN_LINK_SPEED_5G = 0x4, + + ENA_ADMIN_LINK_SPEED_10G = 0x8, + + ENA_ADMIN_LINK_SPEED_25G = 0x10, + + ENA_ADMIN_LINK_SPEED_40G = 0x20, + + ENA_ADMIN_LINK_SPEED_50G = 0x40, + + ENA_ADMIN_LINK_SPEED_100G = 0x80, + + ENA_ADMIN_LINK_SPEED_200G = 0x100, + + ENA_ADMIN_LINK_SPEED_400G = 0x200, +}; + +enum ena_admin_completion_policy_type { + /* completion queue entry for each sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, + + /* completion queue entry upon request in sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, + + /* current queue head pointer is updated in OS memory upon sq + * descriptor request + */ + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, + + /* current queue head pointer is updated in OS memory for each sq + * descriptor + */ + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, +}; + +/* basic stats return ena_admin_basic_stats while extanded stats return a + * buffer (string format) with additional statistics per queue and per + * device id + */ +enum ena_admin_get_stats_type { + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, + + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, +}; + +enum ena_admin_get_stats_scope { + ENA_ADMIN_SPECIFIC_QUEUE = 0, + + ENA_ADMIN_ETH_TRAFFIC = 1, +}; + +struct ena_admin_aq_common_desc { + /* 11:0 : command_id + * 15:12 : reserved12 + */ + u16 command_id; + + /* as appears in ena_admin_aq_opcode */ + u8 opcode; + + /* 0 : phase + * 1 : ctrl_data - control buffer address valid + * 2 : ctrl_data_indirect - control buffer address + * points to list of pages with addresses of control + * buffers + * 7:3 : reserved3 + */ + u8 flags; +}; + +/* used in ena_admin_aq_entry. Can point directly to control data, or to a + * page list chunk. Used also at the end of indirect mode page list chunks, + * for chaining. + */ +struct ena_admin_ctrl_buff_info { + u32 length; + + struct ena_common_mem_addr address; +}; + +struct ena_admin_sq { + u16 sq_idx; + + /* 4:0 : reserved + * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx + */ + u8 sq_identity; + + u8 reserved1; +}; + +struct ena_admin_aq_entry { + struct ena_admin_aq_common_desc aq_common_descriptor; + + union { + u32 inline_data_w1[3]; + + struct ena_admin_ctrl_buff_info control_buffer; + } u; + + u32 inline_data_w4[12]; +}; + +struct ena_admin_acq_common_desc { + /* command identifier to associate it with the aq descriptor + * 11:0 : command_id + * 15:12 : reserved12 + */ + u16 command; + + u8 status; + + /* 0 : phase + * 7:1 : reserved1 + */ + u8 flags; + + u16 extended_status; + + /* serves as a hint what AQ entries can be revoked */ + u16 sq_head_indx; +}; + +struct ena_admin_acq_entry { + struct ena_admin_acq_common_desc acq_common_descriptor; + + u32 response_specific_data[14]; +}; + +struct ena_admin_aq_create_sq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* 4:0 : reserved0_w1 + * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx + */ + u8 sq_identity; + + u8 reserved8_w1; + + /* 3:0 : placement_policy - Describing where the SQ + * descriptor ring and the SQ packet headers reside: + * 0x1 - descriptors and headers are in OS memory, + * 0x3 - descriptors and headers in device memory + * (a.k.a Low Latency Queue) + * 6:4 : completion_policy - Describing what policy + * to use for generation completion entry (cqe) in + * the CQ associated with this SQ: 0x0 - cqe for each + * sq descriptor, 0x1 - cqe upon request in sq + * descriptor, 0x2 - current queue head pointer is + * updated in OS memory upon sq descriptor request + * 0x3 - current queue head pointer is updated in OS + * memory for each sq descriptor + * 7 : reserved15_w1 + */ + u8 sq_caps_2; + + /* 0 : is_physically_contiguous - Described if the + * queue ring memory is allocated in physical + * contiguous pages or split. + * 7:1 : reserved17_w1 + */ + u8 sq_caps_3; + + /* associated completion queue id. This CQ must be created prior to + * SQ creation + */ + u16 cq_idx; + + /* submission queue depth in entries */ + u16 sq_depth; + + /* SQ physical base address in OS memory. This field should not be + * used for Low Latency queues. Has to be page aligned. + */ + struct ena_common_mem_addr sq_ba; + + /* specifies queue head writeback location in OS memory. Valid if + * completion_policy is set to completion_policy_head_on_demand or + * completion_policy_head. Has to be cache aligned + */ + struct ena_common_mem_addr sq_head_writeback; + + u32 reserved0_w7; + + u32 reserved0_w8; +}; + +enum ena_admin_sq_direction { + ENA_ADMIN_SQ_DIRECTION_TX = 1, + + ENA_ADMIN_SQ_DIRECTION_RX = 2, +}; + +struct ena_admin_acq_create_sq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; + + u16 sq_idx; + + u16 reserved; + + /* queue doorbell address as an offset to PCIe MMIO REG BAR */ + u32 sq_doorbell_offset; + + /* low latency queue ring base address as an offset to PCIe MMIO + * LLQ_MEM BAR + */ + u32 llq_descriptors_offset; + + /* low latency queue headers' memory as an offset to PCIe MMIO + * LLQ_MEM BAR + */ + u32 llq_headers_offset; +}; + +struct ena_admin_aq_destroy_sq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_sq sq; +}; + +struct ena_admin_acq_destroy_sq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; +}; + +struct ena_admin_aq_create_cq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* 4:0 : reserved5 + * 5 : interrupt_mode_enabled - if set, cq operates + * in interrupt mode, otherwise - polling + * 7:6 : reserved6 + */ + u8 cq_caps_1; + + /* 4:0 : cq_entry_size_words - size of CQ entry in + * 32-bit words, valid values: 4, 8. + * 7:5 : reserved7 + */ + u8 cq_caps_2; + + /* completion queue depth in # of entries. must be power of 2 */ + u16 cq_depth; + + /* msix vector assigned to this cq */ + u32 msix_vector; + + /* cq physical base address in OS memory. CQ must be physically + * contiguous + */ + struct ena_common_mem_addr cq_ba; +}; + +struct ena_admin_acq_create_cq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; + + u16 cq_idx; + + /* actual cq depth in number of entries */ + u16 cq_actual_depth; + + u32 numa_node_register_offset; + + u32 cq_head_db_register_offset; + + u32 cq_interrupt_unmask_register_offset; +}; + +struct ena_admin_aq_destroy_cq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + u16 cq_idx; + + u16 reserved1; +}; + +struct ena_admin_acq_destroy_cq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; +}; + +/* ENA AQ Get Statistics command. Extended statistics are placed in control + * buffer pointed by AQ entry + */ +struct ena_admin_aq_get_stats_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + union { + /* command specific inline data */ + u32 inline_data_w1[3]; + + struct ena_admin_ctrl_buff_info control_buffer; + } u; + + /* stats type as defined in enum ena_admin_get_stats_type */ + u8 type; + + /* stats scope defined in enum ena_admin_get_stats_scope */ + u8 scope; + + u16 reserved3; + + /* queue id. used when scope is specific_queue */ + u16 queue_idx; + + /* device id, value 0xFFFF means mine. only privileged device can get + * stats of other device + */ + u16 device_id; +}; + +/* Basic Statistics Command. */ +struct ena_admin_basic_stats { + u32 tx_bytes_low; + + u32 tx_bytes_high; + + u32 tx_pkts_low; + + u32 tx_pkts_high; + + u32 rx_bytes_low; + + u32 rx_bytes_high; + + u32 rx_pkts_low; + + u32 rx_pkts_high; + + u32 rx_drops_low; + + u32 rx_drops_high; +}; + +struct ena_admin_acq_get_stats_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + struct ena_admin_basic_stats basic_stats; +}; + +struct ena_admin_get_set_feature_common_desc { + /* 1:0 : select - 0x1 - current value; 0x3 - default + * value + * 7:3 : reserved3 + */ + u8 flags; + + /* as appears in ena_admin_aq_feature_id */ + u8 feature_id; + + u16 reserved16; +}; + +struct ena_admin_device_attr_feature_desc { + u32 impl_id; + + u32 device_version; + + /* bitmap of ena_admin_aq_feature_id */ + u32 supported_features; + + u32 reserved3; + + /* Indicates how many bits are used physical address access. */ + u32 phys_addr_width; + + /* Indicates how many bits are used virtual address access. */ + u32 virt_addr_width; + + /* unicast MAC address (in Network byte order) */ + u8 mac_addr[6]; + + u8 reserved7[2]; + + u32 max_mtu; +}; + +struct ena_admin_queue_feature_desc { + /* including LLQs */ + u32 max_sq_num; + + u32 max_sq_depth; + + u32 max_cq_num; + + u32 max_cq_depth; + + u32 max_llq_num; + + u32 max_llq_depth; + + u32 max_header_size; + + /* Maximum Descriptors number, including meta descriptor, allowed for + * a single Tx packet + */ + u16 max_packet_tx_descs; + + /* Maximum Descriptors number allowed for a single Rx packet */ + u16 max_packet_rx_descs; +}; + +struct ena_admin_set_feature_mtu_desc { + /* exclude L2 */ + u32 mtu; +}; + +struct ena_admin_set_feature_host_attr_desc { + /* host OS info base address in OS memory. host info is 4KB of + * physically contiguous + */ + struct ena_common_mem_addr os_info_ba; + + /* host debug area base address in OS memory. debug area must be + * physically contiguous + */ + struct ena_common_mem_addr debug_ba; + + /* debug area size */ + u32 debug_area_size; +}; + +struct ena_admin_feature_intr_moder_desc { + /* interrupt delay granularity in usec */ + u16 intr_delay_resolution; + + u16 reserved; +}; + +struct ena_admin_get_feature_link_desc { + /* Link speed in Mb */ + u32 speed; + + /* bit field of enum ena_admin_link types */ + u32 supported; + + /* 0 : autoneg + * 1 : duplex - Full Duplex + * 31:2 : reserved2 + */ + u32 flags; +}; + +struct ena_admin_feature_aenq_desc { + /* bitmask for AENQ groups the device can report */ + u32 supported_groups; + + /* bitmask for AENQ groups to report */ + u32 enabled_groups; +}; + +struct ena_admin_feature_offload_desc { + /* 0 : TX_L3_csum_ipv4 + * 1 : TX_L4_ipv4_csum_part - The checksum field + * should be initialized with pseudo header checksum + * 2 : TX_L4_ipv4_csum_full + * 3 : TX_L4_ipv6_csum_part - The checksum field + * should be initialized with pseudo header checksum + * 4 : TX_L4_ipv6_csum_full + * 5 : tso_ipv4 + * 6 : tso_ipv6 + * 7 : tso_ecn + */ + u32 tx; + + /* Receive side supported stateless offload + * 0 : RX_L3_csum_ipv4 - IPv4 checksum + * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum + * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum + * 3 : RX_hash - Hash calculation + */ + u32 rx_supported; + + u32 rx_enabled; +}; + +enum ena_admin_hash_functions { + ENA_ADMIN_TOEPLITZ = 1, + + ENA_ADMIN_CRC32 = 2, +}; + +struct ena_admin_feature_rss_flow_hash_control { + u32 keys_num; + + u32 reserved; + + u32 key[10]; +}; + +struct ena_admin_feature_rss_flow_hash_function { + /* 7:0 : funcs - bitmask of ena_admin_hash_functions */ + u32 supported_func; + + /* 7:0 : selected_func - bitmask of + * ena_admin_hash_functions + */ + u32 selected_func; + + /* initial value */ + u32 init_val; +}; + +/* RSS flow hash protocols */ +enum ena_admin_flow_hash_proto { + ENA_ADMIN_RSS_TCP4 = 0, + + ENA_ADMIN_RSS_UDP4 = 1, + + ENA_ADMIN_RSS_TCP6 = 2, + + ENA_ADMIN_RSS_UDP6 = 3, + + ENA_ADMIN_RSS_IP4 = 4, + + ENA_ADMIN_RSS_IP6 = 5, + + ENA_ADMIN_RSS_IP4_FRAG = 6, + + ENA_ADMIN_RSS_NOT_IP = 7, + + ENA_ADMIN_RSS_PROTO_NUM = 16, +}; + +/* RSS flow hash fields */ +enum ena_admin_flow_hash_fields { + /* Ethernet Dest Addr */ + ENA_ADMIN_RSS_L2_DA = 0, + + /* Ethernet Src Addr */ + ENA_ADMIN_RSS_L2_SA = 1, + + /* ipv4/6 Dest Addr */ + ENA_ADMIN_RSS_L3_DA = 2, + + /* ipv4/6 Src Addr */ + ENA_ADMIN_RSS_L3_SA = 5, + + /* tcp/udp Dest Port */ + ENA_ADMIN_RSS_L4_DP = 6, + + /* tcp/udp Src Port */ + ENA_ADMIN_RSS_L4_SP = 7, +}; + +struct ena_admin_proto_input { + /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */ + u16 fields; + + u16 reserved2; +}; + +struct ena_admin_feature_rss_hash_control { + struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM]; +}; + +struct ena_admin_feature_rss_flow_hash_input { + /* supported hash input sorting + * 1 : L3_sort - support swap L3 addresses if DA is + * smaller than SA + * 2 : L4_sort - support swap L4 ports if DP smaller + * SP + */ + u16 supported_input_sort; + + /* enabled hash input sorting + * 1 : enable_L3_sort - enable swap L3 addresses if + * DA smaller than SA + * 2 : enable_L4_sort - enable swap L4 ports if DP + * smaller than SP + */ + u16 enabled_input_sort; +}; + +enum ena_admin_os_type { + ENA_ADMIN_OS_LINUX = 1, + + ENA_ADMIN_OS_WIN = 2, + + ENA_ADMIN_OS_DPDK = 3, + + ENA_ADMIN_OS_FREEBSD = 4, + + ENA_ADMIN_OS_IPXE = 5, +}; + +struct ena_admin_host_info { + /* defined in enum ena_admin_os_type */ + u32 os_type; + + /* os distribution string format */ + u8 os_dist_str[128]; + + /* OS distribution numeric format */ + u32 os_dist; + + /* kernel version string format */ + u8 kernel_ver_str[32]; + + /* Kernel version numeric format */ + u32 kernel_ver; + + /* 7:0 : major + * 15:8 : minor + * 23:16 : sub_minor + */ + u32 driver_version; + + /* features bitmap */ + u32 supported_network_features[4]; +}; + +struct ena_admin_rss_ind_table_entry { + u16 cq_idx; + + u16 reserved; +}; + +struct ena_admin_feature_rss_ind_table { + /* min supported table size (2^min_size) */ + u16 min_size; + + /* max supported table size (2^max_size) */ + u16 max_size; + + /* table size (2^size) */ + u16 size; + + u16 reserved; + + /* index of the inline entry. 0xFFFFFFFF means invalid */ + u32 inline_index; + + /* used for updating single entry, ignored when setting the entire + * table through the control buffer. + */ + struct ena_admin_rss_ind_table_entry inline_entry; +}; + +struct ena_admin_get_feat_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_ctrl_buff_info control_buffer; + + struct ena_admin_get_set_feature_common_desc feat_common; + + u32 raw[11]; +}; + +struct ena_admin_get_feat_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + union { + u32 raw[14]; + + struct ena_admin_device_attr_feature_desc dev_attr; + + struct ena_admin_queue_feature_desc max_queue; + + struct ena_admin_feature_aenq_desc aenq; + + struct ena_admin_get_feature_link_desc link; + + struct ena_admin_feature_offload_desc offload; + + struct ena_admin_feature_rss_flow_hash_function flow_hash_func; + + struct ena_admin_feature_rss_flow_hash_input flow_hash_input; + + struct ena_admin_feature_rss_ind_table ind_table; + + struct ena_admin_feature_intr_moder_desc intr_moderation; + } u; +}; + +struct ena_admin_set_feat_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_ctrl_buff_info control_buffer; + + struct ena_admin_get_set_feature_common_desc feat_common; + + union { + u32 raw[11]; + + /* mtu size */ + struct ena_admin_set_feature_mtu_desc mtu; + + /* host attributes */ + struct ena_admin_set_feature_host_attr_desc host_attr; + + /* AENQ configuration */ + struct ena_admin_feature_aenq_desc aenq; + + /* rss flow hash function */ + struct ena_admin_feature_rss_flow_hash_function flow_hash_func; + + /* rss flow hash input */ + struct ena_admin_feature_rss_flow_hash_input flow_hash_input; + + /* rss indirection table */ + struct ena_admin_feature_rss_ind_table ind_table; + } u; +}; + +struct ena_admin_set_feat_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + union { + u32 raw[14]; + } u; +}; + +struct ena_admin_aenq_common_desc { + u16 group; + + u16 syndrom; + + /* 0 : phase */ + u8 flags; + + u8 reserved1[3]; + + u32 timestamp_low; + + u32 timestamp_high; +}; + +/* asynchronous event notification groups */ +enum ena_admin_aenq_group { + ENA_ADMIN_LINK_CHANGE = 0, + + ENA_ADMIN_FATAL_ERROR = 1, + + ENA_ADMIN_WARNING = 2, + + ENA_ADMIN_NOTIFICATION = 3, + + ENA_ADMIN_KEEP_ALIVE = 4, + + ENA_ADMIN_AENQ_GROUPS_NUM = 5, +}; + +enum ena_admin_aenq_notification_syndrom { + ENA_ADMIN_SUSPEND = 0, + + ENA_ADMIN_RESUME = 1, +}; + +struct ena_admin_aenq_entry { + struct ena_admin_aenq_common_desc aenq_common_desc; + + /* command specific inline data */ + u32 inline_data_w4[12]; +}; + +struct ena_admin_aenq_link_change_desc { + struct ena_admin_aenq_common_desc aenq_common_desc; + + /* 0 : link_status */ + u32 flags; +}; + +struct ena_admin_ena_mmio_req_read_less_resp { + u16 req_id; + + u16 reg_off; + + /* value is valid when poll is cleared */ + u32 reg_val; +}; + +/* aq_common_desc */ +#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) + +/* sq */ +#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) + +/* acq_common_desc */ +#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) + +/* aq_create_sq_cmd */ +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) + +/* aq_create_cq_cmd */ +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5) +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) + +/* get_set_feature_common_desc */ +#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) + +/* get_feature_link_desc */ +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) + +/* feature_offload_desc */ +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) + +/* feature_rss_flow_hash_function */ +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0) + +/* feature_rss_flow_hash_input */ +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) + +/* host_info */ +#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) +#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) + +/* aenq_common_desc */ +#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) + +/* aenq_link_change_desc */ +#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) + +#endif /*_ENA_ADMIN_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c new file mode 100644 index 000000000000..3066d9c99984 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -0,0 +1,2666 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "ena_com.h" + +/*****************************************************************************/ +/*****************************************************************************/ + +/* Timeout in micro-sec */ +#define ADMIN_CMD_TIMEOUT_US (1000000) + +#define ENA_ASYNC_QUEUE_DEPTH 4 +#define ENA_ADMIN_QUEUE_DEPTH 32 + +#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ + | (ENA_COMMON_SPEC_VERSION_MINOR)) + +#define ENA_CTRL_MAJOR 0 +#define ENA_CTRL_MINOR 0 +#define ENA_CTRL_SUB_MINOR 1 + +#define MIN_ENA_CTRL_VER \ + (((ENA_CTRL_MAJOR) << \ + (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ + ((ENA_CTRL_MINOR) << \ + (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ + (ENA_CTRL_SUB_MINOR)) + +#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) +#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) + +#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF + +/*****************************************************************************/ +/*****************************************************************************/ +/*****************************************************************************/ + +enum ena_cmd_status { + ENA_CMD_SUBMITTED, + ENA_CMD_COMPLETED, + /* Abort - canceled by the driver */ + ENA_CMD_ABORTED, +}; + +struct ena_comp_ctx { + struct completion wait_event; + struct ena_admin_acq_entry *user_cqe; + u32 comp_size; + enum ena_cmd_status status; + /* status from the device */ + u8 comp_status; + u8 cmd_opcode; + bool occupied; +}; + +struct ena_com_stats_ctx { + struct ena_admin_aq_get_stats_cmd get_cmd; + struct ena_admin_acq_get_stats_resp get_resp; +}; + +static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, + struct ena_common_mem_addr *ena_addr, + dma_addr_t addr) +{ + if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { + pr_err("dma address has more bits that the device supports\n"); + return -EINVAL; + } + + ena_addr->mem_addr_low = (u32)addr; + ena_addr->mem_addr_high = (u64)addr >> 32; + + return 0; +} + +static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) +{ + struct ena_com_admin_sq *sq = &queue->sq; + u16 size = ADMIN_SQ_SIZE(queue->q_depth); + + sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, + GFP_KERNEL); + + if (!sq->entries) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + sq->head = 0; + sq->tail = 0; + sq->phase = 1; + + sq->db_addr = NULL; + + return 0; +} + +static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) +{ + struct ena_com_admin_cq *cq = &queue->cq; + u16 size = ADMIN_CQ_SIZE(queue->q_depth); + + cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, + GFP_KERNEL); + + if (!cq->entries) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + cq->head = 0; + cq->phase = 1; + + return 0; +} + +static int ena_com_admin_init_aenq(struct ena_com_dev *dev, + struct ena_aenq_handlers *aenq_handlers) +{ + struct ena_com_aenq *aenq = &dev->aenq; + u32 addr_low, addr_high, aenq_caps; + u16 size; + + dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; + size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); + aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, + GFP_KERNEL); + + if (!aenq->entries) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + aenq->head = aenq->q_depth; + aenq->phase = 1; + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); + + writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); + writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); + + aenq_caps = 0; + aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; + aenq_caps |= (sizeof(struct ena_admin_aenq_entry) + << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; + writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); + + if (unlikely(!aenq_handlers)) { + pr_err("aenq handlers pointer is NULL\n"); + return -EINVAL; + } + + aenq->aenq_handlers = aenq_handlers; + + return 0; +} + +static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, + struct ena_comp_ctx *comp_ctx) +{ + comp_ctx->occupied = false; + atomic_dec(&queue->outstanding_cmds); +} + +static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, + u16 command_id, bool capture) +{ + if (unlikely(command_id >= queue->q_depth)) { + pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, queue->q_depth); + return NULL; + } + + if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { + pr_err("Completion context is occupied\n"); + return NULL; + } + + if (capture) { + atomic_inc(&queue->outstanding_cmds); + queue->comp_ctx[command_id].occupied = true; + } + + return &queue->comp_ctx[command_id]; +} + +static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) +{ + struct ena_comp_ctx *comp_ctx; + u16 tail_masked, cmd_id; + u16 queue_size_mask; + u16 cnt; + + queue_size_mask = admin_queue->q_depth - 1; + + tail_masked = admin_queue->sq.tail & queue_size_mask; + + /* In case of queue FULL */ + cnt = admin_queue->sq.tail - admin_queue->sq.head; + if (cnt >= admin_queue->q_depth) { + pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", + admin_queue->sq.tail, admin_queue->sq.head, + admin_queue->q_depth); + admin_queue->stats.out_of_space++; + return ERR_PTR(-ENOSPC); + } + + cmd_id = admin_queue->curr_cmd_id; + + cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & + ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; + + cmd->aq_common_descriptor.command_id |= cmd_id & + ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; + + comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); + if (unlikely(!comp_ctx)) + return ERR_PTR(-EINVAL); + + comp_ctx->status = ENA_CMD_SUBMITTED; + comp_ctx->comp_size = (u32)comp_size_in_bytes; + comp_ctx->user_cqe = comp; + comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; + + reinit_completion(&comp_ctx->wait_event); + + memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); + + admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & + queue_size_mask; + + admin_queue->sq.tail++; + admin_queue->stats.submitted_cmd++; + + if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) + admin_queue->sq.phase = !admin_queue->sq.phase; + + writel(admin_queue->sq.tail, admin_queue->sq.db_addr); + + return comp_ctx; +} + +static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) +{ + size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); + struct ena_comp_ctx *comp_ctx; + u16 i; + + queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL); + if (unlikely(!queue->comp_ctx)) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + for (i = 0; i < queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(queue, i, false); + if (comp_ctx) + init_completion(&comp_ctx->wait_event); + } + + return 0; +} + +static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) +{ + unsigned long flags; + struct ena_comp_ctx *comp_ctx; + + spin_lock_irqsave(&admin_queue->q_lock, flags); + if (unlikely(!admin_queue->running_state)) { + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + return ERR_PTR(-ENODEV); + } + comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, + cmd_size_in_bytes, + comp, + comp_size_in_bytes); + if (unlikely(IS_ERR(comp_ctx))) + admin_queue->running_state = false; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + return comp_ctx; +} + +static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx, + struct ena_com_io_sq *io_sq) +{ + size_t size; + int dev_node = 0; + + memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); + + io_sq->desc_entry_size = + (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_desc) : + sizeof(struct ena_eth_io_rx_desc); + + size = io_sq->desc_entry_size * io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { + dev_node = dev_to_node(ena_dev->dmadev); + set_dev_node(ena_dev->dmadev, ctx->numa_node); + io_sq->desc_addr.virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, size, + &io_sq->desc_addr.phys_addr, + GFP_KERNEL); + set_dev_node(ena_dev->dmadev, dev_node); + if (!io_sq->desc_addr.virt_addr) { + io_sq->desc_addr.virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, size, + &io_sq->desc_addr.phys_addr, + GFP_KERNEL); + } + } else { + dev_node = dev_to_node(ena_dev->dmadev); + set_dev_node(ena_dev->dmadev, ctx->numa_node); + io_sq->desc_addr.virt_addr = + devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + set_dev_node(ena_dev->dmadev, dev_node); + if (!io_sq->desc_addr.virt_addr) { + io_sq->desc_addr.virt_addr = + devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + } + } + + if (!io_sq->desc_addr.virt_addr) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + io_sq->tail = 0; + io_sq->next_to_comp = 0; + io_sq->phase = 1; + + return 0; +} + +static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx, + struct ena_com_io_cq *io_cq) +{ + size_t size; + int prev_node = 0; + + memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); + + /* Use the basic completion descriptor for Rx */ + io_cq->cdesc_entry_size_in_bytes = + (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_cdesc) : + sizeof(struct ena_eth_io_rx_cdesc_base); + + size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; + + prev_node = dev_to_node(ena_dev->dmadev); + set_dev_node(ena_dev->dmadev, ctx->numa_node); + io_cq->cdesc_addr.virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, size, + &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); + set_dev_node(ena_dev->dmadev, prev_node); + if (!io_cq->cdesc_addr.virt_addr) { + io_cq->cdesc_addr.virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, size, + &io_cq->cdesc_addr.phys_addr, + GFP_KERNEL); + } + + if (!io_cq->cdesc_addr.virt_addr) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + io_cq->phase = 1; + io_cq->head = 0; + + return 0; +} + +static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, + struct ena_admin_acq_entry *cqe) +{ + struct ena_comp_ctx *comp_ctx; + u16 cmd_id; + + cmd_id = cqe->acq_common_descriptor.command & + ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; + + comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); + if (unlikely(!comp_ctx)) { + pr_err("comp_ctx is NULL. Changing the admin queue running state\n"); + admin_queue->running_state = false; + return; + } + + comp_ctx->status = ENA_CMD_COMPLETED; + comp_ctx->comp_status = cqe->acq_common_descriptor.status; + + if (comp_ctx->user_cqe) + memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); + + if (!admin_queue->polling) + complete(&comp_ctx->wait_event); +} + +static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) +{ + struct ena_admin_acq_entry *cqe = NULL; + u16 comp_num = 0; + u16 head_masked; + u8 phase; + + head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); + phase = admin_queue->cq.phase; + + cqe = &admin_queue->cq.entries[head_masked]; + + /* Go over all the completions */ + while ((cqe->acq_common_descriptor.flags & + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { + /* Do not read the rest of the completion entry before the + * phase bit was validated + */ + rmb(); + ena_com_handle_single_admin_completion(admin_queue, cqe); + + head_masked++; + comp_num++; + if (unlikely(head_masked == admin_queue->q_depth)) { + head_masked = 0; + phase = !phase; + } + + cqe = &admin_queue->cq.entries[head_masked]; + } + + admin_queue->cq.head += comp_num; + admin_queue->cq.phase = phase; + admin_queue->sq.head += comp_num; + admin_queue->stats.completed_cmd += comp_num; +} + +static int ena_com_comp_status_to_errno(u8 comp_status) +{ + if (unlikely(comp_status != 0)) + pr_err("admin command failed[%u]\n", comp_status); + + if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) + return -EINVAL; + + switch (comp_status) { + case ENA_ADMIN_SUCCESS: + return 0; + case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: + return -ENOMEM; + case ENA_ADMIN_UNSUPPORTED_OPCODE: + return -EPERM; + case ENA_ADMIN_BAD_OPCODE: + case ENA_ADMIN_MALFORMED_REQUEST: + case ENA_ADMIN_ILLEGAL_PARAMETER: + case ENA_ADMIN_UNKNOWN_ERROR: + return -EINVAL; + } + + return 0; +} + +static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + unsigned long flags; + u32 start_time; + int ret; + + start_time = ((u32)jiffies_to_usecs(jiffies)); + + while (comp_ctx->status == ENA_CMD_SUBMITTED) { + if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > + ADMIN_CMD_TIMEOUT_US) { + pr_err("Wait for completion (polling) timeout\n"); + /* ENA didn't have any completion */ + spin_lock_irqsave(&admin_queue->q_lock, flags); + admin_queue->stats.no_completion++; + admin_queue->running_state = false; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + ret = -ETIME; + goto err; + } + + spin_lock_irqsave(&admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + msleep(100); + } + + if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { + pr_err("Command was aborted\n"); + spin_lock_irqsave(&admin_queue->q_lock, flags); + admin_queue->stats.aborted_cmd++; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + ret = -ENODEV; + goto err; + } + + WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", + comp_ctx->status); + + ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); +err: + comp_ctxt_release(admin_queue, comp_ctx); + return ret; +} + +static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + unsigned long flags; + int ret; + + wait_for_completion_timeout(&comp_ctx->wait_event, + usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US)); + + /* In case the command wasn't completed find out the root cause. + * There might be 2 kinds of errors + * 1) No completion (timeout reached) + * 2) There is completion but the device didn't get any msi-x interrupt. + */ + if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { + spin_lock_irqsave(&admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + admin_queue->stats.no_completion++; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + if (comp_ctx->status == ENA_CMD_COMPLETED) + pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", + comp_ctx->cmd_opcode); + else + pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", + comp_ctx->cmd_opcode, comp_ctx->status); + + admin_queue->running_state = false; + ret = -ETIME; + goto err; + } + + ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); +err: + comp_ctxt_release(admin_queue, comp_ctx); + return ret; +} + +/* This method read the hardware device register through posting writes + * and waiting for response + * On timeout the function will return ENA_MMIO_READ_TIMEOUT + */ +static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = + mmio_read->read_resp; + u32 mmio_read_reg, ret; + unsigned long flags; + int i; + + might_sleep(); + + /* If readless is disabled, perform regular read */ + if (!mmio_read->readless_supported) + return readl(ena_dev->reg_bar + offset); + + spin_lock_irqsave(&mmio_read->lock, flags); + mmio_read->seq_num++; + + read_resp->req_id = mmio_read->seq_num + 0xDEAD; + mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & + ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; + mmio_read_reg |= mmio_read->seq_num & + ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; + + /* make sure read_resp->req_id get updated before the hw can write + * there + */ + wmb(); + + writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); + + for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) { + if (read_resp->req_id == mmio_read->seq_num) + break; + + udelay(1); + } + + if (unlikely(i == ENA_REG_READ_TIMEOUT)) { + pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", + mmio_read->seq_num, offset, read_resp->req_id, + read_resp->reg_off); + ret = ENA_MMIO_READ_TIMEOUT; + goto err; + } + + if (read_resp->reg_off != offset) { + pr_err("Read failure: wrong offset provided"); + ret = ENA_MMIO_READ_TIMEOUT; + } else { + ret = read_resp->reg_val; + } +err: + spin_unlock_irqrestore(&mmio_read->lock, flags); + + return ret; +} + +/* There are two types to wait for completion. + * Polling mode - wait until the completion is available. + * Async mode - wait on wait queue until the completion is ready + * (or the timeout expired). + * It is expected that the IRQ called ena_com_handle_admin_completion + * to mark the completions. + */ +static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + if (admin_queue->polling) + return ena_com_wait_and_process_admin_cq_polling(comp_ctx, + admin_queue); + + return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, + admin_queue); +} + +static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_destroy_sq_cmd destroy_cmd; + struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; + u8 direction; + int ret; + + memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); + + if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + direction = ENA_ADMIN_SQ_DIRECTION_TX; + else + direction = ENA_ADMIN_SQ_DIRECTION_RX; + + destroy_cmd.sq.sq_identity |= (direction << + ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_SQ_SQ_DIRECTION_MASK; + + destroy_cmd.sq.sq_idx = io_sq->idx; + destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); + + if (unlikely(ret && (ret != -ENODEV))) + pr_err("failed to destroy io sq error: %d\n", ret); + + return ret; +} + +static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq, + struct ena_com_io_cq *io_cq) +{ + size_t size; + + if (io_cq->cdesc_addr.virt_addr) { + size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; + + dma_free_coherent(ena_dev->dmadev, size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr); + + io_cq->cdesc_addr.virt_addr = NULL; + } + + if (io_sq->desc_addr.virt_addr) { + size = io_sq->desc_entry_size * io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + dma_free_coherent(ena_dev->dmadev, size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr); + else + devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr); + + io_sq->desc_addr.virt_addr = NULL; + } +} + +static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, + u16 exp_state) +{ + u32 val, i; + + for (i = 0; i < timeout; i++) { + val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + + if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { + pr_err("Reg read timeout occurred\n"); + return -ETIME; + } + + if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == + exp_state) + return 0; + + /* The resolution of the timeout is 100ms */ + msleep(100); + } + + return -ETIME; +} + +static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, + enum ena_admin_aq_feature_id feature_id) +{ + u32 feature_mask = 1 << feature_id; + + /* Device attributes is always supported */ + if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && + !(ena_dev->supported_features & feature_mask)) + return false; + + return true; +} + +static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *get_resp, + enum ena_admin_aq_feature_id feature_id, + dma_addr_t control_buf_dma_addr, + u32 control_buff_size) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_get_feat_cmd get_cmd; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { + pr_info("Feature %d isn't supported\n", feature_id); + return -EPERM; + } + + memset(&get_cmd, 0x0, sizeof(get_cmd)); + admin_queue = &ena_dev->admin_queue; + + get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; + + if (control_buff_size) + get_cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + else + get_cmd.aq_common_descriptor.flags = 0; + + ret = ena_com_mem_addr_set(ena_dev, + &get_cmd.control_buffer.address, + control_buf_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + get_cmd.control_buffer.length = control_buff_size; + + get_cmd.feat_common.feature_id = feature_id; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *) + &get_cmd, + sizeof(get_cmd), + (struct ena_admin_acq_entry *) + get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + pr_err("Failed to submit get_feature command %d error: %d\n", + feature_id, ret); + + return ret; +} + +static int ena_com_get_feature(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *get_resp, + enum ena_admin_aq_feature_id feature_id) +{ + return ena_com_get_feature_ex(ena_dev, + get_resp, + feature_id, + 0, + 0); +} + +static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + rss->hash_key = + dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), + &rss->hash_key_dma_addr, GFP_KERNEL); + + if (unlikely(!rss->hash_key)) + return -ENOMEM; + + return 0; +} + +static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (rss->hash_key) + dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), + rss->hash_key, rss->hash_key_dma_addr); + rss->hash_key = NULL; +} + +static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + rss->hash_ctrl = + dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), + &rss->hash_ctrl_dma_addr, GFP_KERNEL); + + if (unlikely(!rss->hash_ctrl)) + return -ENOMEM; + + return 0; +} + +static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (rss->hash_ctrl) + dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), + rss->hash_ctrl, rss->hash_ctrl_dma_addr); + rss->hash_ctrl = NULL; +} + +static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, + u16 log_size) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + size_t tbl_size; + int ret; + + ret = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + if (unlikely(ret)) + return ret; + + if ((get_resp.u.ind_table.min_size > log_size) || + (get_resp.u.ind_table.max_size < log_size)) { + pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", + 1 << log_size, 1 << get_resp.u.ind_table.min_size, + 1 << get_resp.u.ind_table.max_size); + return -EINVAL; + } + + tbl_size = (1ULL << log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + rss->rss_ind_tbl = + dma_zalloc_coherent(ena_dev->dmadev, tbl_size, + &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); + if (unlikely(!rss->rss_ind_tbl)) + goto mem_err1; + + tbl_size = (1ULL << log_size) * sizeof(u16); + rss->host_rss_ind_tbl = + devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); + if (unlikely(!rss->host_rss_ind_tbl)) + goto mem_err2; + + rss->tbl_log_size = log_size; + + return 0; + +mem_err2: + tbl_size = (1ULL << log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr); + rss->rss_ind_tbl = NULL; +mem_err1: + rss->tbl_log_size = 0; + return -ENOMEM; +} + +static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + size_t tbl_size = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + if (rss->rss_ind_tbl) + dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr); + rss->rss_ind_tbl = NULL; + + if (rss->host_rss_ind_tbl) + devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); + rss->host_rss_ind_tbl = NULL; +} + +static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq, u16 cq_idx) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_create_sq_cmd create_cmd; + struct ena_admin_acq_create_sq_resp_desc cmd_completion; + u8 direction; + int ret; + + memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd)); + + create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; + + if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + direction = ENA_ADMIN_SQ_DIRECTION_TX; + else + direction = ENA_ADMIN_SQ_DIRECTION_RX; + + create_cmd.sq_identity |= (direction << + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; + + create_cmd.sq_caps_2 |= io_sq->mem_queue_type & + ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; + + create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << + ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & + ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; + + create_cmd.sq_caps_3 |= + ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; + + create_cmd.cq_idx = cq_idx; + create_cmd.sq_depth = io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { + ret = ena_com_mem_addr_set(ena_dev, + &create_cmd.sq_ba, + io_sq->desc_addr.phys_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + } + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); + if (unlikely(ret)) { + pr_err("Failed to create IO SQ. error: %d\n", ret); + return ret; + } + + io_sq->idx = cmd_completion.sq_idx; + + io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + (uintptr_t)cmd_completion.sq_doorbell_offset); + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + + cmd_completion.llq_headers_offset); + + io_sq->desc_addr.pbuf_dev_addr = + (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + + cmd_completion.llq_descriptors_offset); + } + + pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); + + return ret; +} + +static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_com_io_sq *io_sq; + u16 qid; + int i; + + for (i = 0; i < 1 << rss->tbl_log_size; i++) { + qid = rss->host_rss_ind_tbl[i]; + if (qid >= ENA_TOTAL_NUM_QUEUES) + return -EINVAL; + + io_sq = &ena_dev->io_sq_queues[qid]; + + if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) + return -EINVAL; + + rss->rss_ind_tbl[i].cq_idx = io_sq->idx; + } + + return 0; +} + +static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) +{ + u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; + struct ena_rss *rss = &ena_dev->rss; + u8 idx; + u16 i; + + for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) + dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; + + for (i = 0; i < 1 << rss->tbl_log_size; i++) { + if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) + return -EINVAL; + idx = (u8)rss->rss_ind_tbl[i].cq_idx; + + if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) + return -EINVAL; + + rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; + } + + return 0; +} + +static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) +{ + size_t size; + + size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; + + ena_dev->intr_moder_tbl = + devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + if (!ena_dev->intr_moder_tbl) + return -ENOMEM; + + ena_com_config_default_interrupt_moderation_table(ena_dev); + + return 0; +} + +static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, + u16 intr_delay_resolution) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + unsigned int i; + + if (!intr_delay_resolution) { + pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); + intr_delay_resolution = 1; + } + ena_dev->intr_delay_resolution = intr_delay_resolution; + + /* update Rx */ + for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) + intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; + + /* update Tx */ + ena_dev->intr_moder_tx_interval /= intr_delay_resolution; +} + +/*****************************************************************************/ +/******************************* API ******************************/ +/*****************************************************************************/ + +int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size, + struct ena_admin_acq_entry *comp, + size_t comp_size) +{ + struct ena_comp_ctx *comp_ctx; + int ret; + + comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, + comp, comp_size); + if (unlikely(IS_ERR(comp_ctx))) { + pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx)); + return PTR_ERR(comp_ctx); + } + + ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); + if (unlikely(ret)) { + if (admin_queue->running_state) + pr_err("Failed to process command. ret = %d\n", ret); + else + pr_debug("Failed to process command. ret = %d\n", ret); + } + return ret; +} + +int ena_com_create_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_create_cq_cmd create_cmd; + struct ena_admin_acq_create_cq_resp_desc cmd_completion; + int ret; + + memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd)); + + create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; + + create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & + ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; + create_cmd.cq_caps_1 |= + ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; + + create_cmd.msix_vector = io_cq->msix_vector; + create_cmd.cq_depth = io_cq->q_depth; + + ret = ena_com_mem_addr_set(ena_dev, + &create_cmd.cq_ba, + io_cq->cdesc_addr.phys_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); + if (unlikely(ret)) { + pr_err("Failed to create IO CQ. error: %d\n", ret); + return ret; + } + + io_cq->idx = cmd_completion.cq_idx; + + io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_interrupt_unmask_register_offset); + + if (cmd_completion.cq_head_db_register_offset) + io_cq->cq_head_db_reg = + (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_head_db_register_offset); + + if (cmd_completion.numa_node_register_offset) + io_cq->numa_node_cfg_reg = + (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.numa_node_register_offset); + + pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); + + return ret; +} + +int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, + struct ena_com_io_sq **io_sq, + struct ena_com_io_cq **io_cq) +{ + if (qid >= ENA_TOTAL_NUM_QUEUES) { + pr_err("Invalid queue number %d but the max is %d\n", qid, + ENA_TOTAL_NUM_QUEUES); + return -EINVAL; + } + + *io_sq = &ena_dev->io_sq_queues[qid]; + *io_cq = &ena_dev->io_cq_queues[qid]; + + return 0; +} + +void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_comp_ctx *comp_ctx; + u16 i; + + if (!admin_queue->comp_ctx) + return; + + for (i = 0; i < admin_queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(admin_queue, i, false); + if (unlikely(!comp_ctx)) + break; + + comp_ctx->status = ENA_CMD_ABORTED; + + complete(&comp_ctx->wait_event); + } +} + +void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + unsigned long flags; + + spin_lock_irqsave(&admin_queue->q_lock, flags); + while (atomic_read(&admin_queue->outstanding_cmds) != 0) { + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + msleep(20); + spin_lock_irqsave(&admin_queue->q_lock, flags); + } + spin_unlock_irqrestore(&admin_queue->q_lock, flags); +} + +int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_destroy_cq_cmd destroy_cmd; + struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; + int ret; + + memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); + + destroy_cmd.cq_idx = io_cq->idx; + destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); + + if (unlikely(ret && (ret != -ENODEV))) + pr_err("Failed to destroy IO CQ. error: %d\n", ret); + + return ret; +} + +bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) +{ + return ena_dev->admin_queue.running_state; +} + +void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + unsigned long flags; + + spin_lock_irqsave(&admin_queue->q_lock, flags); + ena_dev->admin_queue.running_state = state; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); +} + +void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) +{ + u16 depth = ena_dev->aenq.q_depth; + + WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); + + /* Init head_db to mark that all entries in the queue + * are initially available + */ + writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); +} + +int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_admin_get_feat_resp get_resp; + int ret; + + ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG); + if (ret) { + pr_info("Can't get aenq configuration\n"); + return ret; + } + + if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { + pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", + get_resp.u.aenq.supported_groups, groups_flag); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = 0; + cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; + cmd.u.aenq.enabled_groups = groups_flag; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to config AENQ ret: %d\n", ret); + + return ret; +} + +int ena_com_get_dma_width(struct ena_com_dev *ena_dev) +{ + u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); + int width; + + if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { + pr_err("Reg read timeout occurred\n"); + return -ETIME; + } + + width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> + ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; + + pr_debug("ENA dma width: %d\n", width); + + if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { + pr_err("DMA width illegal value: %d\n", width); + return -EINVAL; + } + + ena_dev->dma_addr_bits = width; + + return width; +} + +int ena_com_validate_version(struct ena_com_dev *ena_dev) +{ + u32 ver; + u32 ctrl_ver; + u32 ctrl_ver_masked; + + /* Make sure the ENA version and the controller version are at least + * as the driver expects + */ + ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); + ctrl_ver = ena_com_reg_bar_read32(ena_dev, + ENA_REGS_CONTROLLER_VERSION_OFF); + + if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || + (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { + pr_err("Reg read timeout occurred\n"); + return -ETIME; + } + + pr_info("ena device version: %d.%d\n", + (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, + ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); + + if (ver < MIN_ENA_VER) { + pr_err("ENA version is lower than the minimal version the driver supports\n"); + return -1; + } + + pr_info("ena controller version: %d.%d.%d implementation version %d\n", + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> + ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> + ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); + + ctrl_ver_masked = + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); + + /* Validate the ctrl version without the implementation ID */ + if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { + pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); + return -1; + } + + return 0; +} + +void ena_com_admin_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_com_admin_cq *cq = &admin_queue->cq; + struct ena_com_admin_sq *sq = &admin_queue->sq; + struct ena_com_aenq *aenq = &ena_dev->aenq; + u16 size; + + if (admin_queue->comp_ctx) + devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); + admin_queue->comp_ctx = NULL; + size = ADMIN_SQ_SIZE(admin_queue->q_depth); + if (sq->entries) + dma_free_coherent(ena_dev->dmadev, size, sq->entries, + sq->dma_addr); + sq->entries = NULL; + + size = ADMIN_CQ_SIZE(admin_queue->q_depth); + if (cq->entries) + dma_free_coherent(ena_dev->dmadev, size, cq->entries, + cq->dma_addr); + cq->entries = NULL; + + size = ADMIN_AENQ_SIZE(aenq->q_depth); + if (ena_dev->aenq.entries) + dma_free_coherent(ena_dev->dmadev, size, aenq->entries, + aenq->dma_addr); + aenq->entries = NULL; +} + +void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) +{ + ena_dev->admin_queue.polling = polling; +} + +int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + spin_lock_init(&mmio_read->lock); + mmio_read->read_resp = + dma_zalloc_coherent(ena_dev->dmadev, + sizeof(*mmio_read->read_resp), + &mmio_read->read_resp_dma_addr, GFP_KERNEL); + if (unlikely(!mmio_read->read_resp)) + return -ENOMEM; + + ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); + + mmio_read->read_resp->req_id = 0x0; + mmio_read->seq_num = 0x0; + mmio_read->readless_supported = true; + + return 0; +} + +void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + mmio_read->readless_supported = readless_supported; +} + +void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); + writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); + + dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), + mmio_read->read_resp, mmio_read->read_resp_dma_addr); + + mmio_read->read_resp = NULL; +} + +void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + u32 addr_low, addr_high; + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); + + writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); + writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); +} + +int ena_com_admin_init(struct ena_com_dev *ena_dev, + struct ena_aenq_handlers *aenq_handlers, + bool init_spinlock) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; + int ret; + + dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + + if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { + pr_err("Reg read timeout occurred\n"); + return -ETIME; + } + + if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { + pr_err("Device isn't ready, abort com init\n"); + return -ENODEV; + } + + admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; + + admin_queue->q_dmadev = ena_dev->dmadev; + admin_queue->polling = false; + admin_queue->curr_cmd_id = 0; + + atomic_set(&admin_queue->outstanding_cmds, 0); + + if (init_spinlock) + spin_lock_init(&admin_queue->q_lock); + + ret = ena_com_init_comp_ctxt(admin_queue); + if (ret) + goto error; + + ret = ena_com_admin_init_sq(admin_queue); + if (ret) + goto error; + + ret = ena_com_admin_init_cq(admin_queue); + if (ret) + goto error; + + admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + ENA_REGS_AQ_DB_OFF); + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); + + writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); + writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); + + writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); + writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); + + aq_caps = 0; + aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; + aq_caps |= (sizeof(struct ena_admin_aq_entry) << + ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; + + acq_caps = 0; + acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; + acq_caps |= (sizeof(struct ena_admin_acq_entry) << + ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; + + writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); + writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); + ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); + if (ret) + goto error; + + admin_queue->running_state = true; + + return 0; +error: + ena_com_admin_destroy(ena_dev); + + return ret; +} + +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx) +{ + struct ena_com_io_sq *io_sq; + struct ena_com_io_cq *io_cq; + int ret; + + if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { + pr_err("Qid (%d) is bigger than max num of queues (%d)\n", + ctx->qid, ENA_TOTAL_NUM_QUEUES); + return -EINVAL; + } + + io_sq = &ena_dev->io_sq_queues[ctx->qid]; + io_cq = &ena_dev->io_cq_queues[ctx->qid]; + + memset(io_sq, 0x0, sizeof(struct ena_com_io_sq)); + memset(io_cq, 0x0, sizeof(struct ena_com_io_cq)); + + /* Init CQ */ + io_cq->q_depth = ctx->queue_size; + io_cq->direction = ctx->direction; + io_cq->qid = ctx->qid; + + io_cq->msix_vector = ctx->msix_vector; + + io_sq->q_depth = ctx->queue_size; + io_sq->direction = ctx->direction; + io_sq->qid = ctx->qid; + + io_sq->mem_queue_type = ctx->mem_queue_type; + + if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + /* header length is limited to 8 bits */ + io_sq->tx_max_header_size = + min_t(u32, ena_dev->tx_max_header_size, SZ_256); + + ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); + if (ret) + goto error; + ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); + if (ret) + goto error; + + ret = ena_com_create_io_cq(ena_dev, io_cq); + if (ret) + goto error; + + ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); + if (ret) + goto destroy_io_cq; + + return 0; + +destroy_io_cq: + ena_com_destroy_io_cq(ena_dev, io_cq); +error: + ena_com_io_queue_free(ena_dev, io_sq, io_cq); + return ret; +} + +void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) +{ + struct ena_com_io_sq *io_sq; + struct ena_com_io_cq *io_cq; + + if (qid >= ENA_TOTAL_NUM_QUEUES) { + pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid, + ENA_TOTAL_NUM_QUEUES); + return; + } + + io_sq = &ena_dev->io_sq_queues[qid]; + io_cq = &ena_dev->io_cq_queues[qid]; + + ena_com_destroy_io_sq(ena_dev, io_sq); + ena_com_destroy_io_cq(ena_dev, io_cq); + + ena_com_io_queue_free(ena_dev, io_sq, io_cq); +} + +int ena_com_get_link_params(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *resp) +{ + return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG); +} + +int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + struct ena_admin_get_feat_resp get_resp; + int rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_DEVICE_ATTRIBUTES); + if (rc) + return rc; + + memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, + sizeof(get_resp.u.dev_attr)); + ena_dev->supported_features = get_resp.u.dev_attr.supported_features; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_MAX_QUEUES_NUM); + if (rc) + return rc; + + memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, + sizeof(get_resp.u.max_queue)); + ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_AENQ_CONFIG); + if (rc) + return rc; + + memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, + sizeof(get_resp.u.aenq)); + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); + if (rc) + return rc; + + memcpy(&get_feat_ctx->offload, &get_resp.u.offload, + sizeof(get_resp.u.offload)); + + return 0; +} + +void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) +{ + ena_com_handle_admin_completion(&ena_dev->admin_queue); +} + +/* ena_handle_specific_aenq_event: + * return the handler that is relevant to the specific event group + */ +static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, + u16 group) +{ + struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; + + if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) + return aenq_handlers->handlers[group]; + + return aenq_handlers->unimplemented_handler; +} + +/* ena_aenq_intr_handler: + * handles the aenq incoming events. + * pop events from the queue and apply the specific handler + */ +void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) +{ + struct ena_admin_aenq_entry *aenq_e; + struct ena_admin_aenq_common_desc *aenq_common; + struct ena_com_aenq *aenq = &dev->aenq; + ena_aenq_handler handler_cb; + u16 masked_head, processed = 0; + u8 phase; + + masked_head = aenq->head & (aenq->q_depth - 1); + phase = aenq->phase; + aenq_e = &aenq->entries[masked_head]; /* Get first entry */ + aenq_common = &aenq_e->aenq_common_desc; + + /* Go over all the events */ + while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == + phase) { + pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", + aenq_common->group, aenq_common->syndrom, + (u64)aenq_common->timestamp_low + + ((u64)aenq_common->timestamp_high << 32)); + + /* Handle specific event*/ + handler_cb = ena_com_get_specific_aenq_cb(dev, + aenq_common->group); + handler_cb(data, aenq_e); /* call the actual event handler*/ + + /* Get next event entry */ + masked_head++; + processed++; + + if (unlikely(masked_head == aenq->q_depth)) { + masked_head = 0; + phase = !phase; + } + aenq_e = &aenq->entries[masked_head]; + aenq_common = &aenq_e->aenq_common_desc; + } + + aenq->head += processed; + aenq->phase = phase; + + /* Don't update aenq doorbell if there weren't any processed events */ + if (!processed) + return; + + /* write the aenq doorbell after all AENQ descriptors were read */ + mb(); + writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); +} + +int ena_com_dev_reset(struct ena_com_dev *ena_dev) +{ + u32 stat, timeout, cap, reset_val; + int rc; + + stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); + + if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || + (cap == ENA_MMIO_READ_TIMEOUT))) { + pr_err("Reg read32 timeout occurred\n"); + return -ETIME; + } + + if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { + pr_err("Device isn't ready, can't reset device\n"); + return -EINVAL; + } + + timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> + ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; + if (timeout == 0) { + pr_err("Invalid timeout value\n"); + return -EINVAL; + } + + /* start reset */ + reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; + writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); + + /* Write again the MMIO read request address */ + ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); + + rc = wait_for_reset_state(ena_dev, timeout, + ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); + if (rc != 0) { + pr_err("Reset indication didn't turn on\n"); + return rc; + } + + /* reset done */ + writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); + rc = wait_for_reset_state(ena_dev, timeout, 0); + if (rc != 0) { + pr_err("Reset indication didn't turn off\n"); + return rc; + } + + return 0; +} + +static int ena_get_dev_stats(struct ena_com_dev *ena_dev, + struct ena_com_stats_ctx *ctx, + enum ena_admin_get_stats_type type) +{ + struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; + struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; + struct ena_com_admin_queue *admin_queue; + int ret; + + admin_queue = &ena_dev->admin_queue; + + get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; + get_cmd->aq_common_descriptor.flags = 0; + get_cmd->type = type; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)get_cmd, + sizeof(*get_cmd), + (struct ena_admin_acq_entry *)get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + pr_err("Failed to get stats. error: %d\n", ret); + + return ret; +} + +int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, + struct ena_admin_basic_stats *stats) +{ + struct ena_com_stats_ctx ctx; + int ret; + + memset(&ctx, 0x0, sizeof(ctx)); + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); + if (likely(ret == 0)) + memcpy(stats, &ctx.get_resp.basic_stats, + sizeof(ctx.get_resp.basic_stats)); + + return ret; +} + +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { + pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = 0; + cmd.feat_common.feature_id = ENA_ADMIN_MTU; + cmd.u.mtu.mtu = mtu; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set mtu %d. error: %d\n", mtu, ret); + + return ret; +} + +int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload) +{ + int ret; + struct ena_admin_get_feat_resp resp; + + ret = ena_com_get_feature(ena_dev, &resp, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); + if (unlikely(ret)) { + pr_err("Failed to get offload capabilities %d\n", ret); + return ret; + } + + memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); + + return 0; +} + +int ena_com_set_hash_function(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_admin_get_feat_resp get_resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_FUNCTION)) { + pr_info("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); + return -EPERM; + } + + /* Validate hash function is supported */ + ret = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION); + if (unlikely(ret)) + return ret; + + if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { + pr_err("Func hash %d isn't supported by device, abort\n", + rss->hash_func); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; + cmd.u.flow_hash_func.init_val = rss->hash_init_val; + cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->hash_key_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + cmd.control_buffer.length = sizeof(*rss->hash_key); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + if (unlikely(ret)) { + pr_err("Failed to set hash function %d. error: %d\n", + rss->hash_func, ret); + return -EINVAL; + } + + return 0; +} + +int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions func, + const u8 *key, u16 key_len, u32 init_val) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + struct ena_admin_feature_rss_flow_hash_control *hash_key = + rss->hash_key; + int rc; + + /* Make sure size is a mult of DWs */ + if (unlikely(key_len & 0x3)) + return -EINVAL; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, + sizeof(*rss->hash_key)); + if (unlikely(rc)) + return rc; + + if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { + pr_err("Flow hash function %d isn't supported\n", func); + return -EPERM; + } + + switch (func) { + case ENA_ADMIN_TOEPLITZ: + if (key_len > sizeof(hash_key->key)) { + pr_err("key len (%hu) is bigger than the max supported (%zu)\n", + key_len, sizeof(hash_key->key)); + return -EINVAL; + } + + memcpy(hash_key->key, key, key_len); + rss->hash_init_val = init_val; + hash_key->keys_num = key_len >> 2; + break; + case ENA_ADMIN_CRC32: + rss->hash_init_val = init_val; + break; + default: + pr_err("Invalid hash function (%d)\n", func); + return -EINVAL; + } + + rc = ena_com_set_hash_function(ena_dev); + + /* Restore the old function */ + if (unlikely(rc)) + ena_com_get_hash_function(ena_dev, NULL, NULL); + + return rc; +} + +int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions *func, + u8 *key) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + struct ena_admin_feature_rss_flow_hash_control *hash_key = + rss->hash_key; + int rc; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, + sizeof(*rss->hash_key)); + if (unlikely(rc)) + return rc; + + rss->hash_func = get_resp.u.flow_hash_func.selected_func; + if (func) + *func = rss->hash_func; + + if (key) + memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); + + return 0; +} + +int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 *fields) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + int rc; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_INPUT, + rss->hash_ctrl_dma_addr, + sizeof(*rss->hash_ctrl)); + if (unlikely(rc)) + return rc; + + if (fields) + *fields = rss->hash_ctrl->selected_fields[proto].fields; + + return 0; +} + +int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_INPUT)) { + pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; + cmd.u.flow_hash_input.enabled_input_sort = + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->hash_ctrl_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + cmd.control_buffer.length = sizeof(*hash_ctrl); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + if (unlikely(ret)) + pr_err("Failed to set hash input. error: %d\n", ret); + + return ret; +} + +int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = + rss->hash_ctrl; + u16 available_fields = 0; + int rc, i; + + /* Get the supported hash input */ + rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); + if (unlikely(rc)) + return rc; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; + + for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { + available_fields = hash_ctrl->selected_fields[i].fields & + hash_ctrl->supported_fields[i].fields; + if (available_fields != hash_ctrl->selected_fields[i].fields) { + pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", + i, hash_ctrl->supported_fields[i].fields, + hash_ctrl->selected_fields[i].fields); + return -EPERM; + } + } + + rc = ena_com_set_hash_ctrl(ena_dev); + + /* In case of failure, restore the old hash ctrl */ + if (unlikely(rc)) + ena_com_get_hash_ctrl(ena_dev, 0, NULL); + + return rc; +} + +int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 hash_fields) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; + u16 supported_fields; + int rc; + + if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { + pr_err("Invalid proto num (%u)\n", proto); + return -EINVAL; + } + + /* Get the ctrl table */ + rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); + if (unlikely(rc)) + return rc; + + /* Make sure all the fields are supported */ + supported_fields = hash_ctrl->supported_fields[proto].fields; + if ((hash_fields & supported_fields) != hash_fields) { + pr_err("proto %d doesn't support the required fields %x. supports only: %x\n", + proto, hash_fields, supported_fields); + } + + hash_ctrl->selected_fields[proto].fields = hash_fields; + + rc = ena_com_set_hash_ctrl(ena_dev); + + /* In case of failure, restore the old hash ctrl */ + if (unlikely(rc)) + ena_com_get_hash_ctrl(ena_dev, 0, NULL); + + return 0; +} + +int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, + u16 entry_idx, u16 entry_value) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) + return -EINVAL; + + if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) + return -EINVAL; + + rss->host_rss_ind_tbl[entry_idx] = entry_value; + + return 0; +} + +int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id( + ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { + pr_info("Feature %d isn't supported\n", + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + return -EPERM; + } + + ret = ena_com_ind_tbl_convert_to_device(ena_dev); + if (ret) { + pr_err("Failed to convert host indirection table to device table\n"); + return ret; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; + cmd.u.ind_table.size = rss->tbl_log_size; + cmd.u.ind_table.inline_index = 0xFFFFFFFF; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->rss_ind_tbl_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set indirect table. error: %d\n", ret); + + return ret; +} + +int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + u32 tbl_size; + int i, rc; + + tbl_size = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, + rss->rss_ind_tbl_dma_addr, + tbl_size); + if (unlikely(rc)) + return rc; + + if (!ind_tbl) + return 0; + + rc = ena_com_ind_tbl_convert_from_device(ena_dev); + if (unlikely(rc)) + return rc; + + for (i = 0; i < (1 << rss->tbl_log_size); i++) + ind_tbl[i] = rss->host_rss_ind_tbl[i]; + + return 0; +} + +int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) +{ + int rc; + + memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); + + rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); + if (unlikely(rc)) + goto err_indr_tbl; + + rc = ena_com_hash_key_allocate(ena_dev); + if (unlikely(rc)) + goto err_hash_key; + + rc = ena_com_hash_ctrl_init(ena_dev); + if (unlikely(rc)) + goto err_hash_ctrl; + + return 0; + +err_hash_ctrl: + ena_com_hash_key_destroy(ena_dev); +err_hash_key: + ena_com_indirect_table_destroy(ena_dev); +err_indr_tbl: + + return rc; +} + +void ena_com_rss_destroy(struct ena_com_dev *ena_dev) +{ + ena_com_indirect_table_destroy(ena_dev); + ena_com_hash_key_destroy(ena_dev); + ena_com_hash_ctrl_destroy(ena_dev); + + memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); +} + +int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + host_attr->host_info = + dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, + &host_attr->host_info_dma_addr, GFP_KERNEL); + if (unlikely(!host_attr->host_info)) + return -ENOMEM; + + return 0; +} + +int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + u32 debug_area_size) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + host_attr->debug_area_virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, + &host_attr->debug_area_dma_addr, GFP_KERNEL); + if (unlikely(!host_attr->debug_area_virt_addr)) { + host_attr->debug_area_size = 0; + return -ENOMEM; + } + + host_attr->debug_area_size = debug_area_size; + + return 0; +} + +void ena_com_delete_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + if (host_attr->host_info) { + dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, + host_attr->host_info_dma_addr); + host_attr->host_info = NULL; + } +} + +void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + if (host_attr->debug_area_virt_addr) { + dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, + host_attr->debug_area_virt_addr, + host_attr->debug_area_dma_addr); + host_attr->debug_area_virt_addr = NULL; + } +} + +int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_HOST_ATTR_CONFIG)) { + pr_warn("Set host attribute isn't supported\n"); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.u.host_attr.debug_ba, + host_attr->debug_area_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.u.host_attr.os_info_ba, + host_attr->host_info_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set host attributes: %d\n", ret); + + return ret; +} + +/* Interrupt moderation */ +bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) +{ + return ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_INTERRUPT_MODERATION); +} + +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs) +{ + if (!ena_dev->intr_delay_resolution) { + pr_err("Illegal interrupt delay granularity value\n"); + return -EFAULT; + } + + ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / + ena_dev->intr_delay_resolution; + + return 0; +} + +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs) +{ + if (!ena_dev->intr_delay_resolution) { + pr_err("Illegal interrupt delay granularity value\n"); + return -EFAULT; + } + + /* We use LOWEST entry of moderation table for storing + * nonadaptive interrupt coalescing values + */ + ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = + rx_coalesce_usecs / ena_dev->intr_delay_resolution; + + return 0; +} + +void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) +{ + if (ena_dev->intr_moder_tbl) + devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl); + ena_dev->intr_moder_tbl = NULL; +} + +int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) +{ + struct ena_admin_get_feat_resp get_resp; + u16 delay_resolution; + int rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_INTERRUPT_MODERATION); + + if (rc) { + if (rc == -EPERM) { + pr_info("Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); + rc = 0; + } else { + pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", + rc); + } + + /* no moderation supported, disable adaptive support */ + ena_com_disable_adaptive_moderation(ena_dev); + return rc; + } + + rc = ena_com_init_interrupt_moderation_table(ena_dev); + if (rc) + goto err; + + /* if moderation is supported by device we set adaptive moderation */ + delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; + ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); + ena_com_enable_adaptive_moderation(ena_dev); + + return 0; +err: + ena_com_destroy_interrupt_moderation(ena_dev); + return rc; +} + +void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (!intr_moder_tbl) + return; + + intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = + ENA_INTR_LOWEST_USECS; + intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = + ENA_INTR_LOWEST_PKTS; + intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = + ENA_INTR_LOWEST_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = + ENA_INTR_LOW_USECS; + intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = + ENA_INTR_LOW_PKTS; + intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = + ENA_INTR_LOW_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = + ENA_INTR_MID_USECS; + intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = + ENA_INTR_MID_PKTS; + intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = + ENA_INTR_MID_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = + ENA_INTR_HIGH_USECS; + intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = + ENA_INTR_HIGH_PKTS; + intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = + ENA_INTR_HIGH_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = + ENA_INTR_HIGHEST_USECS; + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = + ENA_INTR_HIGHEST_PKTS; + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = + ENA_INTR_HIGHEST_BYTES; +} + +unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) +{ + return ena_dev->intr_moder_tx_interval; +} + +unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (intr_moder_tbl) + return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; + + return 0; +} + +void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) + return; + + intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; + if (ena_dev->intr_delay_resolution) + intr_moder_tbl[level].intr_moder_interval /= + ena_dev->intr_delay_resolution; + intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; + + /* use hardcoded value until ethtool supports bytecount parameter */ + if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) + intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; +} + +void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) + return; + + entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; + if (ena_dev->intr_delay_resolution) + entry->intr_moder_interval *= ena_dev->intr_delay_resolution; + entry->pkts_per_interval = + intr_moder_tbl[level].pkts_per_interval; + entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h new file mode 100644 index 000000000000..509d7b8e15ab --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -0,0 +1,1038 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ENA_COM +#define ENA_COM + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/gfp.h> +#include <linux/sched.h> +#include <linux/sizes.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include "ena_common_defs.h" +#include "ena_admin_defs.h" +#include "ena_eth_io_defs.h" +#include "ena_regs_defs.h" + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define ENA_MAX_NUM_IO_QUEUES 128U +/* We need to queues for each IO (on for Tx and one for Rx) */ +#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) + +#define ENA_MAX_HANDLERS 256 + +#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48 + +/* Unit in usec */ +#define ENA_REG_READ_TIMEOUT 200000 + +#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry)) +#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) +#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) + +/*****************************************************************************/ +/*****************************************************************************/ +/* ENA adaptive interrupt moderation settings */ + +#define ENA_INTR_LOWEST_USECS (0) +#define ENA_INTR_LOWEST_PKTS (3) +#define ENA_INTR_LOWEST_BYTES (2 * 1524) + +#define ENA_INTR_LOW_USECS (32) +#define ENA_INTR_LOW_PKTS (12) +#define ENA_INTR_LOW_BYTES (16 * 1024) + +#define ENA_INTR_MID_USECS (80) +#define ENA_INTR_MID_PKTS (48) +#define ENA_INTR_MID_BYTES (64 * 1024) + +#define ENA_INTR_HIGH_USECS (128) +#define ENA_INTR_HIGH_PKTS (96) +#define ENA_INTR_HIGH_BYTES (128 * 1024) + +#define ENA_INTR_HIGHEST_USECS (192) +#define ENA_INTR_HIGHEST_PKTS (128) +#define ENA_INTR_HIGHEST_BYTES (192 * 1024) + +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196 +#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4 +#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6 +#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4 +#define ENA_INTR_MODER_LEVEL_STRIDE 2 +#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF + +enum ena_intr_moder_level { + ENA_INTR_MODER_LOWEST = 0, + ENA_INTR_MODER_LOW, + ENA_INTR_MODER_MID, + ENA_INTR_MODER_HIGH, + ENA_INTR_MODER_HIGHEST, + ENA_INTR_MAX_NUM_OF_LEVELS, +}; + +struct ena_intr_moder_entry { + unsigned int intr_moder_interval; + unsigned int pkts_per_interval; + unsigned int bytes_per_interval; +}; + +enum queue_direction { + ENA_COM_IO_QUEUE_DIRECTION_TX, + ENA_COM_IO_QUEUE_DIRECTION_RX +}; + +struct ena_com_buf { + dma_addr_t paddr; /**< Buffer physical address */ + u16 len; /**< Buffer length in bytes */ +}; + +struct ena_com_rx_buf_info { + u16 len; + u16 req_id; +}; + +struct ena_com_io_desc_addr { + u8 __iomem *pbuf_dev_addr; /* LLQ address */ + u8 *virt_addr; + dma_addr_t phys_addr; +}; + +struct ena_com_tx_meta { + u16 mss; + u16 l3_hdr_len; + u16 l3_hdr_offset; + u16 l4_hdr_len; /* In words */ +}; + +struct ena_com_io_cq { + struct ena_com_io_desc_addr cdesc_addr; + + /* Interrupt unmask register */ + u32 __iomem *unmask_reg; + + /* The completion queue head doorbell register */ + u32 __iomem *cq_head_db_reg; + + /* numa configuration register (for TPH) */ + u32 __iomem *numa_node_cfg_reg; + + /* The value to write to the above register to unmask + * the interrupt of this queue + */ + u32 msix_vector; + + enum queue_direction direction; + + /* holds the number of cdesc of the current packet */ + u16 cur_rx_pkt_cdesc_count; + /* save the firt cdesc idx of the current packet */ + u16 cur_rx_pkt_cdesc_start_idx; + + u16 q_depth; + /* Caller qid */ + u16 qid; + + /* Device queue index */ + u16 idx; + u16 head; + u16 last_head_update; + u8 phase; + u8 cdesc_entry_size_in_bytes; + +} ____cacheline_aligned; + +struct ena_com_io_sq { + struct ena_com_io_desc_addr desc_addr; + + u32 __iomem *db_addr; + u8 __iomem *header_addr; + + enum queue_direction direction; + enum ena_admin_placement_policy_type mem_queue_type; + + u32 msix_vector; + struct ena_com_tx_meta cached_tx_meta; + + u16 q_depth; + u16 qid; + + u16 idx; + u16 tail; + u16 next_to_comp; + u32 tx_max_header_size; + u8 phase; + u8 desc_entry_size; + u8 dma_addr_bits; +} ____cacheline_aligned; + +struct ena_com_admin_cq { + struct ena_admin_acq_entry *entries; + dma_addr_t dma_addr; + + u16 head; + u8 phase; +}; + +struct ena_com_admin_sq { + struct ena_admin_aq_entry *entries; + dma_addr_t dma_addr; + + u32 __iomem *db_addr; + + u16 head; + u16 tail; + u8 phase; + +}; + +struct ena_com_stats_admin { + u32 aborted_cmd; + u32 submitted_cmd; + u32 completed_cmd; + u32 out_of_space; + u32 no_completion; +}; + +struct ena_com_admin_queue { + void *q_dmadev; + spinlock_t q_lock; /* spinlock for the admin queue */ + struct ena_comp_ctx *comp_ctx; + u16 q_depth; + struct ena_com_admin_cq cq; + struct ena_com_admin_sq sq; + + /* Indicate if the admin queue should poll for completion */ + bool polling; + + u16 curr_cmd_id; + + /* Indicate that the ena was initialized and can + * process new admin commands + */ + bool running_state; + + /* Count the number of outstanding admin commands */ + atomic_t outstanding_cmds; + + struct ena_com_stats_admin stats; +}; + +struct ena_aenq_handlers; + +struct ena_com_aenq { + u16 head; + u8 phase; + struct ena_admin_aenq_entry *entries; + dma_addr_t dma_addr; + u16 q_depth; + struct ena_aenq_handlers *aenq_handlers; +}; + +struct ena_com_mmio_read { + struct ena_admin_ena_mmio_req_read_less_resp *read_resp; + dma_addr_t read_resp_dma_addr; + u16 seq_num; + bool readless_supported; + /* spin lock to ensure a single outstanding read */ + spinlock_t lock; +}; + +struct ena_rss { + /* Indirect table */ + u16 *host_rss_ind_tbl; + struct ena_admin_rss_ind_table_entry *rss_ind_tbl; + dma_addr_t rss_ind_tbl_dma_addr; + u16 tbl_log_size; + + /* Hash key */ + enum ena_admin_hash_functions hash_func; + struct ena_admin_feature_rss_flow_hash_control *hash_key; + dma_addr_t hash_key_dma_addr; + u32 hash_init_val; + + /* Flow Control */ + struct ena_admin_feature_rss_hash_control *hash_ctrl; + dma_addr_t hash_ctrl_dma_addr; + +}; + +struct ena_host_attribute { + /* Debug area */ + u8 *debug_area_virt_addr; + dma_addr_t debug_area_dma_addr; + u32 debug_area_size; + + /* Host information */ + struct ena_admin_host_info *host_info; + dma_addr_t host_info_dma_addr; +}; + +/* Each ena_dev is a PCI function. */ +struct ena_com_dev { + struct ena_com_admin_queue admin_queue; + struct ena_com_aenq aenq; + struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; + struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; + u8 __iomem *reg_bar; + void __iomem *mem_bar; + void *dmadev; + + enum ena_admin_placement_policy_type tx_mem_queue_type; + u32 tx_max_header_size; + u16 stats_func; /* Selected function for extended statistic dump */ + u16 stats_queue; /* Selected queue for extended statistic dump */ + + struct ena_com_mmio_read mmio_read; + + struct ena_rss rss; + u32 supported_features; + u32 dma_addr_bits; + + struct ena_host_attribute host_attr; + bool adaptive_coalescing; + u16 intr_delay_resolution; + u32 intr_moder_tx_interval; + struct ena_intr_moder_entry *intr_moder_tbl; +}; + +struct ena_com_dev_get_features_ctx { + struct ena_admin_queue_feature_desc max_queues; + struct ena_admin_device_attr_feature_desc dev_attr; + struct ena_admin_feature_aenq_desc aenq; + struct ena_admin_feature_offload_desc offload; +}; + +struct ena_com_create_io_ctx { + enum ena_admin_placement_policy_type mem_queue_type; + enum queue_direction direction; + int numa_node; + u32 msix_vector; + u16 queue_size; + u16 qid; +}; + +typedef void (*ena_aenq_handler)(void *data, + struct ena_admin_aenq_entry *aenq_e); + +/* Holds aenq handlers. Indexed by AENQ event group */ +struct ena_aenq_handlers { + ena_aenq_handler handlers[ENA_MAX_HANDLERS]; + ena_aenq_handler unimplemented_handler; +}; + +/*****************************************************************************/ +/*****************************************************************************/ + +/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + * + * Initialize the register read mechanism. + * + * @note: This method must be the first stage in the initialization sequence. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); + +/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + * @readless_supported: readless mode (enable/disable) + */ +void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, + bool readless_supported); + +/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return + * value physical address. + * @ena_dev: ENA communication layer struct + */ +void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev); + +/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + */ +void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_admin_init - Init the admin and the async queues + * @ena_dev: ENA communication layer struct + * @aenq_handlers: Those handlers to be called upon event. + * @init_spinlock: Indicate if this method should init the admin spinlock or + * the spinlock was init before (for example, in a case of FLR). + * + * Initialize the admin submission and completion queues. + * Initialize the asynchronous events notification queues. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_admin_init(struct ena_com_dev *ena_dev, + struct ena_aenq_handlers *aenq_handlers, + bool init_spinlock); + +/* ena_com_admin_destroy - Destroy the admin and the async events queues. + * @ena_dev: ENA communication layer struct + * + * @note: Before calling this method, the caller must validate that the device + * won't send any additional admin completions/aenq. + * To achieve that, a FLR is recommended. + */ +void ena_com_admin_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_dev_reset - Perform device FLR to the device. + * @ena_dev: ENA communication layer struct + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_dev_reset(struct ena_com_dev *ena_dev); + +/* ena_com_create_io_queue - Create io queue. + * @ena_dev: ENA communication layer struct + * @ctx - create context structure + * + * Create the submission and the completion queues. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx); + +/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid. + * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. + */ +void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); + +/* ena_com_get_io_handlers - Return the io queue handlers + * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. + * @io_sq - IO submission queue handler + * @io_cq - IO completion queue handler. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, + struct ena_com_io_sq **io_sq, + struct ena_com_io_cq **io_cq); + +/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications + * @ena_dev: ENA communication layer struct + * + * After this method, aenq event can be received via AENQ. + */ +void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_running_state - Set the state of the admin queue + * @ena_dev: ENA communication layer struct + * + * Change the state of the admin queue (enable/disable) + */ +void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state); + +/* ena_com_get_admin_running_state - Get the admin queue state + * @ena_dev: ENA communication layer struct + * + * Retrieve the state of the admin queue (enable/disable) + * + * @return - current polling mode (enable/disable) + */ +bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode + * @ena_dev: ENA communication layer struct + * @polling: ENAble/Disable polling mode + * + * Set the admin completion mode. + */ +void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); + +/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode + * @ena_dev: ENA communication layer struct + * + * Get the admin completion mode. + * If polling mode is on, ena_com_execute_admin_command will perform a + * polling on the admin completion queue for the commands completion, + * otherwise it will wait on wait event. + * + * @return state + */ +bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev); + +/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler + * @ena_dev: ENA communication layer struct + * + * This method go over the admin completion queue and wake up all the pending + * threads that wait on the commands wait event. + * + * @note: Should be called after MSI-X interrupt. + */ +void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); + +/* ena_com_aenq_intr_handler - AENQ interrupt handler + * @ena_dev: ENA communication layer struct + * + * This method go over the async event notification queue and call the proper + * aenq handler. + */ +void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); + +/* ena_com_abort_admin_commands - Abort all the outstanding admin commands. + * @ena_dev: ENA communication layer struct + * + * This method aborts all the outstanding admin commands. + * The caller should then call ena_com_wait_for_abort_completion to make sure + * all the commands were completed. + */ +void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); + +/* ena_com_wait_for_abort_completion - Wait for admin commands abort. + * @ena_dev: ENA communication layer struct + * + * This method wait until all the outstanding admin commands will be completed. + */ +void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); + +/* ena_com_validate_version - Validate the device parameters + * @ena_dev: ENA communication layer struct + * + * This method validate the device parameters are the same as the saved + * parameters in ena_dev. + * This method is useful after device reset, to validate the device mac address + * and the device offloads are the same as before the reset. + * + * @return - 0 on success negative value otherwise. + */ +int ena_com_validate_version(struct ena_com_dev *ena_dev); + +/* ena_com_get_link_params - Retrieve physical link parameters. + * @ena_dev: ENA communication layer struct + * @resp: Link parameters + * + * Retrieve the physical link parameters, + * like speed, auto-negotiation and full duplex support. + * + * @return - 0 on Success negative value otherwise. + */ +int ena_com_get_link_params(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *resp); + +/* ena_com_get_dma_width - Retrieve physical dma address width the device + * supports. + * @ena_dev: ENA communication layer struct + * + * Retrieve the maximum physical address bits the device can handle. + * + * @return: > 0 on Success and negative value otherwise. + */ +int ena_com_get_dma_width(struct ena_com_dev *ena_dev); + +/* ena_com_set_aenq_config - Set aenq groups configurations + * @ena_dev: ENA communication layer struct + * @groups flag: bit fields flags of enum ena_admin_aenq_group. + * + * Configure which aenq event group the driver would like to receive. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); + +/* ena_com_get_dev_attr_feat - Get device features + * @ena_dev: ENA communication layer struct + * @get_feat_ctx: returned context that contain the get features. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx); + +/* ena_com_get_dev_basic_stats - Get device basic statistics + * @ena_dev: ENA communication layer struct + * @stats: stats return value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, + struct ena_admin_basic_stats *stats); + +/* ena_com_set_dev_mtu - Configure the device mtu. + * @ena_dev: ENA communication layer struct + * @mtu: mtu value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); + +/* ena_com_get_offload_settings - Retrieve the device offloads capabilities + * @ena_dev: ENA communication layer struct + * @offlad: offload return value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload); + +/* ena_com_rss_init - Init RSS + * @ena_dev: ENA communication layer struct + * @log_size: indirection log size + * + * Allocate RSS/RFS resources. + * The caller then can configure rss using ena_com_set_hash_function, + * ena_com_set_hash_ctrl and ena_com_indirect_table_set. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); + +/* ena_com_rss_destroy - Destroy rss + * @ena_dev: ENA communication layer struct + * + * Free all the RSS/RFS resources. + */ +void ena_com_rss_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_fill_hash_function - Fill RSS hash function + * @ena_dev: ENA communication layer struct + * @func: The hash function (Toeplitz or crc) + * @key: Hash key (for toeplitz hash) + * @key_len: key length (max length 10 DW) + * @init_val: initial value for the hash function + * + * Fill the ena_dev resources with the desire hash function, hash key, key_len + * and key initial value (if needed by the hash function). + * To flush the key into the device the caller should call + * ena_com_set_hash_function. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions func, + const u8 *key, u16 key_len, u32 init_val); + +/* ena_com_set_hash_function - Flush the hash function and it dependencies to + * the device. + * @ena_dev: ENA communication layer struct + * + * Flush the hash function and it dependencies (key, key length and + * initial value) if needed. + * + * @note: Prior to this method the caller should call ena_com_fill_hash_function + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_hash_function(struct ena_com_dev *ena_dev); + +/* ena_com_get_hash_function - Retrieve the hash function and the hash key + * from the device. + * @ena_dev: ENA communication layer struct + * @func: hash function + * @key: hash key + * + * Retrieve the hash function and the hash key from the device. + * + * @note: If the caller called ena_com_fill_hash_function but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions *func, + u8 *key); + +/* ena_com_fill_hash_ctrl - Fill RSS hash control + * @ena_dev: ENA communication layer struct. + * @proto: The protocol to configure. + * @hash_fields: bit mask of ena_admin_flow_hash_fields + * + * Fill the ena_dev resources with the desire hash control (the ethernet + * fields that take part of the hash) for a specific protocol. + * To flush the hash control to the device, the caller should call + * ena_com_set_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 hash_fields); + +/* ena_com_set_hash_ctrl - Flush the hash control resources to the device. + * @ena_dev: ENA communication layer struct + * + * Flush the hash control (the ethernet fields that take part of the hash) + * + * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); + +/* ena_com_get_hash_ctrl - Retrieve the hash control from the device. + * @ena_dev: ENA communication layer struct + * @proto: The protocol to retrieve. + * @fields: bit mask of ena_admin_flow_hash_fields. + * + * Retrieve the hash control from the device. + * + * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 *fields); + +/* ena_com_set_default_hash_ctrl - Set the hash control to a default + * configuration. + * @ena_dev: ENA communication layer struct + * + * Fill the ena_dev resources with the default hash control configuration. + * To flush the hash control to the device, the caller should call + * ena_com_set_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev); + +/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS + * indirection table + * @ena_dev: ENA communication layer struct. + * @entry_idx - indirection table entry. + * @entry_value - redirection value + * + * Fill a single entry of the RSS indirection table in the ena_dev resources. + * To flush the indirection table to the device, the called should call + * ena_com_indirect_table_set. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, + u16 entry_idx, u16 entry_value); + +/* ena_com_indirect_table_set - Flush the indirection table to the device. + * @ena_dev: ENA communication layer struct + * + * Flush the indirection hash control to the device. + * Prior to this method the caller should call ena_com_indirect_table_fill_entry + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); + +/* ena_com_indirect_table_get - Retrieve the indirection table from the device. + * @ena_dev: ENA communication layer struct + * @ind_tbl: indirection table + * + * Retrieve the RSS indirection table from the device. + * + * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); + +/* ena_com_allocate_host_info - Allocate host info resources. + * @ena_dev: ENA communication layer struct + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); + +/* ena_com_allocate_debug_area - Allocate debug area. + * @ena_dev: ENA communication layer struct + * @debug_area_size - debug area size. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + u32 debug_area_size); + +/* ena_com_delete_debug_area - Free the debug area resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocate debug area. + */ +void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); + +/* ena_com_delete_host_info - Free the host info resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocate host info. + */ +void ena_com_delete_host_info(struct ena_com_dev *ena_dev); + +/* ena_com_set_host_attributes - Update the device with the host + * attributes (debug area and host info) base address. + * @ena_dev: ENA communication layer struct + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_host_attributes(struct ena_com_dev *ena_dev); + +/* ena_com_create_io_cq - Create io completion queue. + * @ena_dev: ENA communication layer struct + * @io_cq - io completion queue handler + + * Create IO completion queue. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_create_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq); + +/* ena_com_destroy_io_cq - Destroy io completion queue. + * @ena_dev: ENA communication layer struct + * @io_cq - io completion queue handler + + * Destroy IO completion queue. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq); + +/* ena_com_execute_admin_command - Execute admin command + * @admin_queue: admin queue. + * @cmd: the admin command to execute. + * @cmd_size: the command size. + * @cmd_completion: command completion return value. + * @cmd_comp_size: command completion size. + + * Submit an admin command and then wait until the device will return a + * completion. + * The completion will be copyed into cmd_comp. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size, + struct ena_admin_acq_entry *cmd_comp, + size_t cmd_comp_size); + +/* ena_com_init_interrupt_moderation - Init interrupt moderation + * @ena_dev: ENA communication layer struct + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); + +/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources + * @ena_dev: ENA communication layer struct + */ +void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev); + +/* ena_com_interrupt_moderation_supported - Return if interrupt moderation + * capability is supported by the device. + * + * @return - supported or not. + */ +bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); + +/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt + * moderation table back to the default parameters. + * @ena_dev: ENA communication layer struct + */ +void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev); + +/* ena_com_update_nonadaptive_moderation_interval_tx - Update the + * non-adaptive interval in Tx direction. + * @ena_dev: ENA communication layer struct + * @tx_coalesce_usecs: Interval in usec. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs); + +/* ena_com_update_nonadaptive_moderation_interval_rx - Update the + * non-adaptive interval in Rx direction. + * @ena_dev: ENA communication layer struct + * @rx_coalesce_usecs: Interval in usec. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs); + +/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the + * non-adaptive interval in Tx direction. + * @ena_dev: ENA communication layer struct + * + * @return - interval in usec + */ +unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); + +/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the + * non-adaptive interval in Rx direction. + * @ena_dev: ENA communication layer struct + * + * @return - interval in usec + */ +unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); + +/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt + * moderation table. + * @ena_dev: ENA communication layer struct + * @level: Interrupt moderation table level + * @entry: Entry value + * + * Update a single entry in the interrupt moderation table. + */ +void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry); + +/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry. + * @ena_dev: ENA communication layer struct + * @level: Interrupt moderation table level + * @entry: Entry to fill. + * + * Initialize the entry according to the adaptive interrupt moderation table. + */ +void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry); + +static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) +{ + return ena_dev->adaptive_coalescing; +} + +static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) +{ + ena_dev->adaptive_coalescing = true; +} + +static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) +{ + ena_dev->adaptive_coalescing = false; +} + +/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay + * @ena_dev: ENA communication layer struct + * @pkts: Number of packets since the last update + * @bytes: Number of bytes received since the last update. + * @smoothed_interval: Returned interval + * @moder_tbl_idx: Current table level as input update new level as return + * value. + */ +static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev, + unsigned int pkts, + unsigned int bytes, + unsigned int *smoothed_interval, + unsigned int *moder_tbl_idx) +{ + enum ena_intr_moder_level curr_moder_idx, new_moder_idx; + struct ena_intr_moder_entry *curr_moder_entry; + struct ena_intr_moder_entry *pred_moder_entry; + struct ena_intr_moder_entry *new_moder_entry; + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + unsigned int interval; + + /* We apply adaptive moderation on Rx path only. + * Tx uses static interrupt moderation. + */ + if (!pkts || !bytes) + /* Tx interrupt, or spurious interrupt, + * in both cases we just use same delay values + */ + return; + + curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx); + if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) { + pr_err("Wrong moderation index %u\n", curr_moder_idx); + return; + } + + curr_moder_entry = &intr_moder_tbl[curr_moder_idx]; + new_moder_idx = curr_moder_idx; + + if (curr_moder_idx == ENA_INTR_MODER_LOWEST) { + if ((pkts > curr_moder_entry->pkts_per_interval) || + (bytes > curr_moder_entry->bytes_per_interval)) + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE); + } else { + pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE]; + + if ((pkts <= pred_moder_entry->pkts_per_interval) || + (bytes <= pred_moder_entry->bytes_per_interval)) + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE); + else if ((pkts > curr_moder_entry->pkts_per_interval) || + (bytes > curr_moder_entry->bytes_per_interval)) { + if (curr_moder_idx != ENA_INTR_MODER_HIGHEST) + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE); + } + } + new_moder_entry = &intr_moder_tbl[new_moder_idx]; + + interval = new_moder_entry->intr_moder_interval; + *smoothed_interval = ( + (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT + + ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) / + 10; + + *moder_tbl_idx = new_moder_idx; +} + +/* ena_com_update_intr_reg - Prepare interrupt register + * @intr_reg: interrupt register to update. + * @rx_delay_interval: Rx interval in usecs + * @tx_delay_interval: Tx interval in usecs + * @unmask: unask enable/disable + * + * Prepare interrupt update register with the supplied parameters. + */ +static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, + u32 rx_delay_interval, + u32 tx_delay_interval, + bool unmask) +{ + intr_reg->intr_control = 0; + intr_reg->intr_control |= rx_delay_interval & + ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; + + intr_reg->intr_control |= + (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) + & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; + + if (unmask) + intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; +} + +#endif /* !(ENA_COM) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h new file mode 100644 index 000000000000..bb8d73676eab --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h @@ -0,0 +1,48 @@ +/* + * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _ENA_COMMON_H_ +#define _ENA_COMMON_H_ + +#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */ +#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */ + +/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ +struct ena_common_mem_addr { + u32 mem_addr_low; + + u16 mem_addr_high; + + /* MBZ */ + u16 reserved16; +}; + +#endif /*_ENA_COMMON_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c new file mode 100644 index 000000000000..539c536464a5 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -0,0 +1,501 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "ena_eth_com.h" + +static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( + struct ena_com_io_cq *io_cq) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + u16 expected_phase, head_masked; + u16 desc_phase; + + head_masked = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr + + (head_masked * io_cq->cdesc_entry_size_in_bytes)); + + desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; + + if (desc_phase != expected_phase) + return NULL; + + return cdesc; +} + +static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) +{ + io_cq->head++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) + io_cq->phase ^= 1; +} + +static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +{ + u16 tail_masked; + u32 offset; + + tail_masked = io_sq->tail & (io_sq->q_depth - 1); + + offset = tail_masked * io_sq->desc_entry_size; + + return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); +} + +static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) +{ + u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); + u32 offset = tail_masked * io_sq->desc_entry_size; + + /* In case this queue isn't a LLQ */ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return; + + memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, + io_sq->desc_addr.virt_addr + offset, + io_sq->desc_entry_size); +} + +static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) +{ + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase ^= 1; +} + +static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, + u8 *head_src, u16 header_len) +{ + u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); + u8 __iomem *dev_head_addr = + io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return 0; + + if (unlikely(!io_sq->header_addr)) { + pr_err("Push buffer header ptr is NULL\n"); + return -EINVAL; + } + + memcpy_toio(dev_head_addr, head_src, header_len); + + return 0; +} + +static inline struct ena_eth_io_rx_cdesc_base * + ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx) +{ + idx &= (io_cq->q_depth - 1); + return (struct ena_eth_io_rx_cdesc_base *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + idx * io_cq->cdesc_entry_size_in_bytes); +} + +static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, + u16 *first_cdesc_idx) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + u16 count = 0, head_masked; + u32 last = 0; + + do { + cdesc = ena_com_get_next_rx_cdesc(io_cq); + if (!cdesc) + break; + + ena_com_cq_inc_head(io_cq); + count++; + last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; + } while (!last); + + if (last) { + *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; + count += io_cq->cur_rx_pkt_cdesc_count; + + head_masked = io_cq->head & (io_cq->q_depth - 1); + + io_cq->cur_rx_pkt_cdesc_count = 0; + io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; + + pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n", + io_cq->qid, *first_cdesc_idx, count); + } else { + io_cq->cur_rx_pkt_cdesc_count += count; + count = 0; + } + + return count; +} + +static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + int rc; + + if (ena_tx_ctx->meta_valid) { + rc = memcmp(&io_sq->cached_tx_meta, + &ena_tx_ctx->ena_meta, + sizeof(struct ena_com_tx_meta)); + + if (unlikely(rc != 0)) + return true; + } + + return false; +} + +static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + struct ena_eth_io_tx_meta_desc *meta_desc = NULL; + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + + meta_desc = get_sq_desc(io_sq); + memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; + + /* bits 0-9 of the mss */ + meta_desc->word2 |= (ena_meta->mss << + ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; + /* bits 10-13 of the mss */ + meta_desc->len_ctrl |= ((ena_meta->mss >> 10) << + ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; + + /* Extended meta desc */ + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + meta_desc->len_ctrl |= (io_sq->phase << + ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_META_DESC_PHASE_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; + meta_desc->word2 |= ena_meta->l3_hdr_len & + ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; + meta_desc->word2 |= (ena_meta->l3_hdr_offset << + ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & + ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; + + meta_desc->word2 |= (ena_meta->l4_hdr_len << + ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & + ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + + /* Cached the meta desc */ + memcpy(&io_sq->cached_tx_meta, ena_meta, + sizeof(struct ena_com_tx_meta)); + + ena_com_copy_curr_sq_desc_to_dev(io_sq); + ena_com_sq_update_tail(io_sq); +} + +static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, + struct ena_eth_io_rx_cdesc_base *cdesc) +{ + ena_rx_ctx->l3_proto = cdesc->status & + ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; + ena_rx_ctx->l4_proto = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; + ena_rx_ctx->l3_csum_err = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; + ena_rx_ctx->l4_csum_err = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; + ena_rx_ctx->hash = cdesc->hash; + ena_rx_ctx->frag = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; + + pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n", + ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, + ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, + ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); +} + +/*****************************************************************************/ +/***************************** API **********************************/ +/*****************************************************************************/ + +int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + int *nb_hw_desc) +{ + struct ena_eth_io_tx_desc *desc = NULL; + struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; + void *push_header = ena_tx_ctx->push_header; + u16 header_len = ena_tx_ctx->header_len; + u16 num_bufs = ena_tx_ctx->num_bufs; + int total_desc, i, rc; + bool have_meta; + u64 addr_hi; + + WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); + + /* num_bufs +1 for potential meta desc */ + if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { + pr_err("Not enough space in the tx queue\n"); + return -ENOMEM; + } + + if (unlikely(header_len > io_sq->tx_max_header_size)) { + pr_err("header size is too large %d max header: %d\n", + header_len, io_sq->tx_max_header_size); + return -EINVAL; + } + + /* start with pushing the header (if needed) */ + rc = ena_com_write_header(io_sq, push_header, header_len); + if (unlikely(rc)) + return rc; + + have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, + ena_tx_ctx); + if (have_meta) + ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + + /* If the caller doesn't want send packets */ + if (unlikely(!num_bufs && !header_len)) { + *nb_hw_desc = have_meta ? 0 : 1; + return 0; + } + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); + + /* Set first desc when we don't have meta descriptor */ + if (!have_meta) + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK; + + desc->buff_addr_hi_hdr_sz |= (header_len << + ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & + ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; + desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_DESC_PHASE_MASK; + + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; + + /* Bits 0-9 */ + desc->meta_ctrl |= (ena_tx_ctx->req_id << + ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & + ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; + + desc->meta_ctrl |= (ena_tx_ctx->df << + ENA_ETH_IO_TX_DESC_DF_SHIFT) & + ENA_ETH_IO_TX_DESC_DF_MASK; + + /* Bits 10-15 */ + desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) << + ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & + ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; + + if (ena_tx_ctx->meta_valid) { + desc->meta_ctrl |= (ena_tx_ctx->tso_enable << + ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_TSO_EN_MASK; + desc->meta_ctrl |= ena_tx_ctx->l3_proto & + ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_proto << + ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable << + ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable << + ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial << + ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; + } + + for (i = 0; i < num_bufs; i++) { + /* The first desc share the same desc as the header */ + if (likely(i != 0)) { + ena_com_copy_curr_sq_desc_to_dev(io_sq); + ena_com_sq_update_tail(io_sq); + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); + + desc->len_ctrl |= (io_sq->phase << + ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_DESC_PHASE_MASK; + } + + desc->len_ctrl |= ena_bufs->len & + ENA_ETH_IO_TX_DESC_LENGTH_MASK; + + addr_hi = ((ena_bufs->paddr & + GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + + desc->buff_addr_lo = (u32)ena_bufs->paddr; + desc->buff_addr_hi_hdr_sz |= addr_hi & + ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; + ena_bufs++; + } + + /* set the last desc indicator */ + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; + + ena_com_copy_curr_sq_desc_to_dev(io_sq); + + ena_com_sq_update_tail(io_sq); + + total_desc = max_t(u16, num_bufs, 1); + total_desc += have_meta ? 1 : 0; + + *nb_hw_desc = total_desc; + return 0; +} + +int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + struct ena_com_io_sq *io_sq, + struct ena_com_rx_ctx *ena_rx_ctx) +{ + struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; + struct ena_eth_io_rx_cdesc_base *cdesc = NULL; + u16 cdesc_idx = 0; + u16 nb_hw_desc; + u16 i; + + WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); + + nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); + if (nb_hw_desc == 0) { + ena_rx_ctx->descs = nb_hw_desc; + return 0; + } + + pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, + nb_hw_desc); + + if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { + pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, + ena_rx_ctx->max_bufs); + return -ENOSPC; + } + + for (i = 0; i < nb_hw_desc; i++) { + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); + + ena_buf->len = cdesc->length; + ena_buf->req_id = cdesc->req_id; + ena_buf++; + } + + /* Update SQ head ptr */ + io_sq->next_to_comp += nb_hw_desc; + + pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, + io_sq->next_to_comp); + + /* Get rx flags from the last pkt */ + ena_com_rx_set_flags(ena_rx_ctx, cdesc); + + ena_rx_ctx->descs = nb_hw_desc; + return 0; +} + +int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct ena_com_buf *ena_buf, + u16 req_id) +{ + struct ena_eth_io_rx_desc *desc; + + WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); + + if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) + return -ENOSPC; + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); + + desc->length = ena_buf->len; + + desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; + desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; + desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; + desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + + desc->req_id = req_id; + + desc->buff_addr_lo = (u32)ena_buf->paddr; + desc->buff_addr_hi = + ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + + ena_com_sq_update_tail(io_sq); + + return 0; +} + +int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) +{ + u8 expected_phase, cdesc_phase; + struct ena_eth_io_tx_cdesc *cdesc; + u16 masked_head; + + masked_head = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_tx_cdesc *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + + /* When the current completion descriptor phase isn't the same as the + * expected, it mean that the device still didn't update + * this completion. + */ + cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + if (cdesc_phase != expected_phase) + return -EAGAIN; + + ena_com_cq_inc_head(io_cq); + + *req_id = cdesc->req_id; + + return 0; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h new file mode 100644 index 000000000000..bb53c3a4f8e9 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -0,0 +1,160 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ENA_ETH_COM_H_ +#define ENA_ETH_COM_H_ + +#include "ena_com.h" + +/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ +#define ENA_COMP_HEAD_THRESH 4 + +struct ena_com_tx_ctx { + struct ena_com_tx_meta ena_meta; + struct ena_com_buf *ena_bufs; + /* For LLQ, header buffer - pushed to the device mem space */ + void *push_header; + + enum ena_eth_io_l3_proto_index l3_proto; + enum ena_eth_io_l4_proto_index l4_proto; + u16 num_bufs; + u16 req_id; + /* For regular queue, indicate the size of the header + * For LLQ, indicate the size of the pushed buffer + */ + u16 header_len; + + u8 meta_valid; + u8 tso_enable; + u8 l3_csum_enable; + u8 l4_csum_enable; + u8 l4_csum_partial; + u8 df; /* Don't fragment */ +}; + +struct ena_com_rx_ctx { + struct ena_com_rx_buf_info *ena_bufs; + enum ena_eth_io_l3_proto_index l3_proto; + enum ena_eth_io_l4_proto_index l4_proto; + bool l3_csum_err; + bool l4_csum_err; + /* fragmented packet */ + bool frag; + u32 hash; + u16 descs; + int max_bufs; +}; + +int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + int *nb_hw_desc); + +int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + struct ena_com_io_sq *io_sq, + struct ena_com_rx_ctx *ena_rx_ctx); + +int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct ena_com_buf *ena_buf, + u16 req_id); + +int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); + +static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, + struct ena_eth_io_intr_reg *intr_reg) +{ + writel(intr_reg->intr_control, io_cq->unmask_reg); +} + +static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) +{ + u16 tail, next_to_comp, cnt; + + next_to_comp = io_sq->next_to_comp; + tail = io_sq->tail; + cnt = tail - next_to_comp; + + return io_sq->q_depth - 1 - cnt; +} + +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +{ + u16 tail; + + tail = io_sq->tail; + + pr_debug("write submission queue doorbell for queue: %d tail: %d\n", + io_sq->qid, tail); + + writel(tail, io_sq->db_addr); + + return 0; +} + +static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) +{ + u16 unreported_comp, head; + bool need_update; + + head = io_cq->head; + unreported_comp = head - io_cq->last_head_update; + need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); + + if (io_cq->cq_head_db_reg && need_update) { + pr_debug("Write completion queue doorbell for queue %d: head: %d\n", + io_cq->qid, head); + writel(head, io_cq->cq_head_db_reg); + io_cq->last_head_update = head; + } + + return 0; +} + +static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, + u8 numa_node) +{ + struct ena_eth_io_numa_node_cfg_reg numa_cfg; + + if (!io_cq->numa_node_cfg_reg) + return; + + numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) + | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; + + writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); +} + +static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) +{ + io_sq->next_to_comp += elem; +} + +#endif /* ENA_ETH_COM_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h new file mode 100644 index 000000000000..f320c58793a5 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h @@ -0,0 +1,416 @@ +/* + * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _ENA_ETH_IO_H_ +#define _ENA_ETH_IO_H_ + +enum ena_eth_io_l3_proto_index { + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, + + ENA_ETH_IO_L3_PROTO_IPV4 = 8, + + ENA_ETH_IO_L3_PROTO_IPV6 = 11, + + ENA_ETH_IO_L3_PROTO_FCOE = 21, + + ENA_ETH_IO_L3_PROTO_ROCE = 22, +}; + +enum ena_eth_io_l4_proto_index { + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, + + ENA_ETH_IO_L4_PROTO_TCP = 12, + + ENA_ETH_IO_L4_PROTO_UDP = 13, + + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, +}; + +struct ena_eth_io_tx_desc { + /* 15:0 : length - Buffer length in bytes, must + * include any packet trailers that the ENA supposed + * to update like End-to-End CRC, Authentication GMAC + * etc. This length must not include the + * 'Push_Buffer' length. This length must not include + * the 4-byte added in the end for 802.3 Ethernet FCS + * 21:16 : req_id_hi - Request ID[15:10] + * 22 : reserved22 - MBZ + * 23 : meta_desc - MBZ + * 24 : phase + * 25 : reserved1 - MBZ + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : comp_req - Indicates whether completion + * should be posted, after packet is transmitted. + * Valid only for first descriptor + * 30:29 : reserved29 - MBZ + * 31 : reserved31 - MBZ + */ + u32 len_ctrl; + + /* 3:0 : l3_proto_idx - L3 protocol. This field + * required when l3_csum_en,l3_csum or tso_en are set. + * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and + * DF flags of the IPv4 header is 0. Otherwise must + * be set to 1 + * 6:5 : reserved5 + * 7 : tso_en - Enable TSO, For TCP only. + * 12:8 : l4_proto_idx - L4 protocol. This field need + * to be set when l4_csum_en or tso_en are set. + * 13 : l3_csum_en - enable IPv4 header checksum. + * 14 : l4_csum_en - enable TCP/UDP checksum. + * 15 : ethernet_fcs_dis - when set, the controller + * will not append the 802.3 Ethernet Frame Check + * Sequence to the packet + * 16 : reserved16 + * 17 : l4_csum_partial - L4 partial checksum. when + * set to 0, the ENA calculates the L4 checksum, + * where the Destination Address required for the + * TCP/UDP pseudo-header is taken from the actual + * packet L3 header. when set to 1, the ENA doesn't + * calculate the sum of the pseudo-header, instead, + * the checksum field of the L4 is used instead. When + * TSO enabled, the checksum of the pseudo-header + * must not include the tcp length field. L4 partial + * checksum should be used for IPv6 packet that + * contains Routing Headers. + * 20:18 : reserved18 - MBZ + * 21 : reserved21 - MBZ + * 31:22 : req_id_lo - Request ID[9:0] + */ + u32 meta_ctrl; + + u32 buff_addr_lo; + + /* address high and header size + * 15:0 : addr_hi - Buffer Pointer[47:32] + * 23:16 : reserved16_w2 + * 31:24 : header_length - Header length. For Low + * Latency Queues, this fields indicates the number + * of bytes written to the headers' memory. For + * normal queues, if packet is TCP or UDP, and longer + * than max_header_size, then this field should be + * set to the sum of L4 header offset and L4 header + * size(without options), otherwise, this field + * should be set to 0. For both modes, this field + * must not exceed the max_header_size. + * max_header_size value is reported by the Max + * Queues Feature descriptor + */ + u32 buff_addr_hi_hdr_sz; +}; + +struct ena_eth_io_tx_meta_desc { + /* 9:0 : req_id_lo - Request ID[9:0] + * 11:10 : reserved10 - MBZ + * 12 : reserved12 - MBZ + * 13 : reserved13 - MBZ + * 14 : ext_valid - if set, offset fields in Word2 + * are valid Also MSS High in Word 0 and bits [31:24] + * in Word 3 + * 15 : reserved15 + * 19:16 : mss_hi + * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1: + * Extended Metadata Descriptor + * 21 : meta_store - Store extended metadata in queue + * cache + * 22 : reserved22 - MBZ + * 23 : meta_desc - MBO + * 24 : phase + * 25 : reserved25 - MBZ + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : comp_req - Indicates whether completion + * should be posted, after packet is transmitted. + * Valid only for first descriptor + * 30:29 : reserved29 - MBZ + * 31 : reserved31 - MBZ + */ + u32 len_ctrl; + + /* 5:0 : req_id_hi + * 31:6 : reserved6 - MBZ + */ + u32 word1; + + /* 7:0 : l3_hdr_len + * 15:8 : l3_hdr_off + * 21:16 : l4_hdr_len_in_words - counts the L4 header + * length in words. there is an explicit assumption + * that L4 header appears right after L3 header and + * L4 offset is based on l3_hdr_off+l3_hdr_len + * 31:22 : mss_lo + */ + u32 word2; + + u32 reserved; +}; + +struct ena_eth_io_tx_cdesc { + /* Request ID[15:0] */ + u16 req_id; + + u8 status; + + /* flags + * 0 : phase + * 7:1 : reserved1 + */ + u8 flags; + + u16 sub_qid; + + u16 sq_head_idx; +}; + +struct ena_eth_io_rx_desc { + /* In bytes. 0 means 64KB */ + u16 length; + + /* MBZ */ + u8 reserved2; + + /* 0 : phase + * 1 : reserved1 - MBZ + * 2 : first - Indicates first descriptor in + * transaction + * 3 : last - Indicates last descriptor in transaction + * 4 : comp_req + * 5 : reserved5 - MBO + * 7:6 : reserved6 - MBZ + */ + u8 ctrl; + + u16 req_id; + + /* MBZ */ + u16 reserved6; + + u32 buff_addr_lo; + + u16 buff_addr_hi; + + /* MBZ */ + u16 reserved16_w3; +}; + +/* 4-word format Note: all ethernet parsing information are valid only when + * last=1 + */ +struct ena_eth_io_rx_cdesc_base { + /* 4:0 : l3_proto_idx + * 6:5 : src_vlan_cnt + * 7 : reserved7 - MBZ + * 12:8 : l4_proto_idx + * 13 : l3_csum_err - when set, either the L3 + * checksum error detected, or, the controller didn't + * validate the checksum. This bit is valid only when + * l3_proto_idx indicates IPv4 packet + * 14 : l4_csum_err - when set, either the L4 + * checksum error detected, or, the controller didn't + * validate the checksum. This bit is valid only when + * l4_proto_idx indicates TCP/UDP packet, and, + * ipv4_frag is not set + * 15 : ipv4_frag - Indicates IPv4 fragmented packet + * 23:16 : reserved16 + * 24 : phase + * 25 : l3_csum2 - second checksum engine result + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 29:28 : reserved28 + * 30 : buffer - 0: Metadata descriptor. 1: Buffer + * Descriptor was used + * 31 : reserved31 + */ + u32 status; + + u16 length; + + u16 req_id; + + /* 32-bit hash result */ + u32 hash; + + u16 sub_qid; + + u16 reserved; +}; + +/* 8-word format */ +struct ena_eth_io_rx_cdesc_ext { + struct ena_eth_io_rx_cdesc_base base; + + u32 buff_addr_lo; + + u16 buff_addr_hi; + + u16 reserved16; + + u32 reserved_w6; + + u32 reserved_w7; +}; + +struct ena_eth_io_intr_reg { + /* 14:0 : rx_intr_delay + * 29:15 : tx_intr_delay + * 30 : intr_unmask + * 31 : reserved + */ + u32 intr_control; +}; + +struct ena_eth_io_numa_node_cfg_reg { + /* 7:0 : numa + * 30:8 : reserved + * 31 : enabled + */ + u32 numa_cfg; +}; + +/* tx_desc */ +#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) +#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 +#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) +#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 +#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) + +/* tx_meta_desc */ +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) +#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 +#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) +#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) + +/* tx_cdesc */ +#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) + +/* rx_desc */ +#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 +#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) +#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 +#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) +#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 +#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) + +/* rx_cdesc_base */ +#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) + +/* intr_reg */ +#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) + +/* numa_node_cfg_reg */ +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) + +#endif /*_ENA_ETH_IO_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c new file mode 100644 index 000000000000..67b2338f8fb3 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -0,0 +1,895 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> + +#include "ena_netdev.h" + +struct ena_stats { + char name[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define ENA_STAT_ENA_COM_ENTRY(stat) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ +} + +#define ENA_STAT_ENTRY(stat, stat_type) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ +} + +#define ENA_STAT_RX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, rx) + +#define ENA_STAT_TX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, tx) + +#define ENA_STAT_GLOBAL_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, dev) + +static const struct ena_stats ena_stats_global_strings[] = { + ENA_STAT_GLOBAL_ENTRY(tx_timeout), + ENA_STAT_GLOBAL_ENTRY(io_suspend), + ENA_STAT_GLOBAL_ENTRY(io_resume), + ENA_STAT_GLOBAL_ENTRY(wd_expired), + ENA_STAT_GLOBAL_ENTRY(interface_up), + ENA_STAT_GLOBAL_ENTRY(interface_down), + ENA_STAT_GLOBAL_ENTRY(admin_q_pause), +}; + +static const struct ena_stats ena_stats_tx_strings[] = { + ENA_STAT_TX_ENTRY(cnt), + ENA_STAT_TX_ENTRY(bytes), + ENA_STAT_TX_ENTRY(queue_stop), + ENA_STAT_TX_ENTRY(queue_wakeup), + ENA_STAT_TX_ENTRY(dma_mapping_err), + ENA_STAT_TX_ENTRY(linearize), + ENA_STAT_TX_ENTRY(linearize_failed), + ENA_STAT_TX_ENTRY(napi_comp), + ENA_STAT_TX_ENTRY(tx_poll), + ENA_STAT_TX_ENTRY(doorbells), + ENA_STAT_TX_ENTRY(prepare_ctx_err), + ENA_STAT_TX_ENTRY(missing_tx_comp), + ENA_STAT_TX_ENTRY(bad_req_id), +}; + +static const struct ena_stats ena_stats_rx_strings[] = { + ENA_STAT_RX_ENTRY(cnt), + ENA_STAT_RX_ENTRY(bytes), + ENA_STAT_RX_ENTRY(refil_partial), + ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(page_alloc_fail), + ENA_STAT_RX_ENTRY(skb_alloc_fail), + ENA_STAT_RX_ENTRY(dma_mapping_err), + ENA_STAT_RX_ENTRY(bad_desc_num), + ENA_STAT_RX_ENTRY(rx_copybreak_pkt), +}; + +static const struct ena_stats ena_stats_ena_com_strings[] = { + ENA_STAT_ENA_COM_ENTRY(aborted_cmd), + ENA_STAT_ENA_COM_ENTRY(submitted_cmd), + ENA_STAT_ENA_COM_ENTRY(completed_cmd), + ENA_STAT_ENA_COM_ENTRY(out_of_space), + ENA_STAT_ENA_COM_ENTRY(no_completion), +}; + +#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) +#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) +#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) +#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) + +static void ena_safe_update_stat(u64 *src, u64 *dst, + struct u64_stats_sync *syncp) +{ + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(syncp); + *(dst) = *src; + } while (u64_stats_fetch_retry_irq(syncp, start)); +} + +static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) +{ + const struct ena_stats *ena_stats; + struct ena_ring *ring; + + u64 *ptr; + int i, j; + + for (i = 0; i < adapter->num_queues; i++) { + /* Tx stats */ + ring = &adapter->tx_ring[i]; + + for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { + ena_stats = &ena_stats_tx_strings[j]; + + ptr = (u64 *)((uintptr_t)&ring->tx_stats + + (uintptr_t)ena_stats->stat_offset); + + ena_safe_update_stat(ptr, (*data)++, &ring->syncp); + } + + /* Rx stats */ + ring = &adapter->rx_ring[i]; + + for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { + ena_stats = &ena_stats_rx_strings[j]; + + ptr = (u64 *)((uintptr_t)&ring->rx_stats + + (uintptr_t)ena_stats->stat_offset); + + ena_safe_update_stat(ptr, (*data)++, &ring->syncp); + } + } +} + +static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) +{ + const struct ena_stats *ena_stats; + u32 *ptr; + int i; + + for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { + ena_stats = &ena_stats_ena_com_strings[i]; + + ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + + (uintptr_t)ena_stats->stat_offset); + + *(*data)++ = *ptr; + } +} + +static void ena_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + const struct ena_stats *ena_stats; + u64 *ptr; + int i; + + for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { + ena_stats = &ena_stats_global_strings[i]; + + ptr = (u64 *)((uintptr_t)&adapter->dev_stats + + (uintptr_t)ena_stats->stat_offset); + + ena_safe_update_stat(ptr, data++, &adapter->syncp); + } + + ena_queue_stats(adapter, &data); + ena_dev_admin_queue_stats(adapter, &data); +} + +int ena_get_sset_count(struct net_device *netdev, int sset) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; +} + +static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) +{ + const struct ena_stats *ena_stats; + int i, j; + + for (i = 0; i < adapter->num_queues; i++) { + /* Tx stats */ + for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { + ena_stats = &ena_stats_tx_strings[j]; + + snprintf(*data, ETH_GSTRING_LEN, + "queue_%u_tx_%s", i, ena_stats->name); + (*data) += ETH_GSTRING_LEN; + } + /* Rx stats */ + for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { + ena_stats = &ena_stats_rx_strings[j]; + + snprintf(*data, ETH_GSTRING_LEN, + "queue_%u_rx_%s", i, ena_stats->name); + (*data) += ETH_GSTRING_LEN; + } + } +} + +static void ena_com_dev_strings(u8 **data) +{ + const struct ena_stats *ena_stats; + int i; + + for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { + ena_stats = &ena_stats_ena_com_strings[i]; + + snprintf(*data, ETH_GSTRING_LEN, + "ena_admin_q_%s", ena_stats->name); + (*data) += ETH_GSTRING_LEN; + } +} + +static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + const struct ena_stats *ena_stats; + int i; + + if (sset != ETH_SS_STATS) + return; + + for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { + ena_stats = &ena_stats_global_strings[i]; + + memcpy(data, ena_stats->name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + ena_queue_strings(adapter, &data); + ena_com_dev_strings(&data); +} + +static int ena_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct ena_admin_get_feature_link_desc *link; + struct ena_admin_get_feat_resp feat_resp; + int rc; + + rc = ena_com_get_link_params(ena_dev, &feat_resp); + if (rc) + return rc; + + link = &feat_resp.u.link; + link_ksettings->base.speed = link->speed; + + if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + } + + link_ksettings->base.autoneg = + (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + link_ksettings->base.duplex = DUPLEX_FULL; + + return 0; +} + +static int ena_get_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce) +{ + struct ena_adapter *adapter = netdev_priv(net_dev); + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct ena_intr_moder_entry intr_moder_entry; + + if (!ena_com_interrupt_moderation_supported(ena_dev)) { + /* the devie doesn't support interrupt moderation */ + return -EOPNOTSUPP; + } + coalesce->tx_coalesce_usecs = + ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) / + ena_dev->intr_delay_resolution; + if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) { + coalesce->rx_coalesce_usecs = + ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) + / ena_dev->intr_delay_resolution; + } else { + ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry); + coalesce->rx_coalesce_usecs_low = intr_moder_entry.intr_moder_interval; + coalesce->rx_max_coalesced_frames_low = intr_moder_entry.pkts_per_interval; + + ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry); + coalesce->rx_coalesce_usecs = intr_moder_entry.intr_moder_interval; + coalesce->rx_max_coalesced_frames = intr_moder_entry.pkts_per_interval; + + ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry); + coalesce->rx_coalesce_usecs_high = intr_moder_entry.intr_moder_interval; + coalesce->rx_max_coalesced_frames_high = intr_moder_entry.pkts_per_interval; + } + coalesce->use_adaptive_rx_coalesce = + ena_com_get_adaptive_moderation_enabled(ena_dev); + + return 0; +} + +static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) +{ + unsigned int val; + int i; + + val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev); + + for (i = 0; i < adapter->num_queues; i++) + adapter->tx_ring[i].smoothed_interval = val; +} + +static int ena_set_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce) +{ + struct ena_adapter *adapter = netdev_priv(net_dev); + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct ena_intr_moder_entry intr_moder_entry; + int rc; + + if (!ena_com_interrupt_moderation_supported(ena_dev)) { + /* the devie doesn't support interrupt moderation */ + return -EOPNOTSUPP; + } + + if (coalesce->rx_coalesce_usecs_irq || + coalesce->rx_max_coalesced_frames_irq || + coalesce->tx_coalesce_usecs_irq || + coalesce->tx_max_coalesced_frames || + coalesce->tx_max_coalesced_frames_irq || + coalesce->stats_block_coalesce_usecs || + coalesce->use_adaptive_tx_coalesce || + coalesce->pkt_rate_low || + coalesce->tx_coalesce_usecs_low || + coalesce->tx_max_coalesced_frames_low || + coalesce->pkt_rate_high || + coalesce->tx_coalesce_usecs_high || + coalesce->tx_max_coalesced_frames_high || + coalesce->rate_sample_interval) + return -EINVAL; + + rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, + coalesce->tx_coalesce_usecs); + if (rc) + return rc; + + ena_update_tx_rings_intr_moderation(adapter); + + if (ena_com_get_adaptive_moderation_enabled(ena_dev)) { + if (!coalesce->use_adaptive_rx_coalesce) { + ena_com_disable_adaptive_moderation(ena_dev); + rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, + coalesce->rx_coalesce_usecs); + return rc; + } + } else { /* was in non-adaptive mode */ + if (coalesce->use_adaptive_rx_coalesce) { + ena_com_enable_adaptive_moderation(ena_dev); + } else { + rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, + coalesce->rx_coalesce_usecs); + return rc; + } + } + + intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_low; + intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_low; + intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; + ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry); + + intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs; + intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames; + intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; + ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry); + + intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_high; + intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_high; + intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; + ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry); + + return 0; +} + +static u32 ena_get_msglevel(struct net_device *netdev) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void ena_set_msglevel(struct net_device *netdev, u32 value) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = value; +} + +static void ena_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ena_adapter *adapter = netdev_priv(dev); + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static void ena_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_ring *tx_ring = &adapter->tx_ring[0]; + struct ena_ring *rx_ring = &adapter->rx_ring[0]; + + ring->rx_max_pending = rx_ring->ring_size; + ring->tx_max_pending = tx_ring->ring_size; + ring->rx_pending = rx_ring->ring_size; + ring->tx_pending = tx_ring->ring_size; +} + +static u32 ena_flow_hash_to_flow_type(u16 hash_fields) +{ + u32 data = 0; + + if (hash_fields & ENA_ADMIN_RSS_L2_DA) + data |= RXH_L2DA; + + if (hash_fields & ENA_ADMIN_RSS_L3_DA) + data |= RXH_IP_DST; + + if (hash_fields & ENA_ADMIN_RSS_L3_SA) + data |= RXH_IP_SRC; + + if (hash_fields & ENA_ADMIN_RSS_L4_DP) + data |= RXH_L4_B_2_3; + + if (hash_fields & ENA_ADMIN_RSS_L4_SP) + data |= RXH_L4_B_0_1; + + return data; +} + +static u16 ena_flow_data_to_flow_hash(u32 hash_fields) +{ + u16 data = 0; + + if (hash_fields & RXH_L2DA) + data |= ENA_ADMIN_RSS_L2_DA; + + if (hash_fields & RXH_IP_DST) + data |= ENA_ADMIN_RSS_L3_DA; + + if (hash_fields & RXH_IP_SRC) + data |= ENA_ADMIN_RSS_L3_SA; + + if (hash_fields & RXH_L4_B_2_3) + data |= ENA_ADMIN_RSS_L4_DP; + + if (hash_fields & RXH_L4_B_0_1) + data |= ENA_ADMIN_RSS_L4_SP; + + return data; +} + +static int ena_get_rss_hash(struct ena_com_dev *ena_dev, + struct ethtool_rxnfc *cmd) +{ + enum ena_admin_flow_hash_proto proto; + u16 hash_fields; + int rc; + + cmd->data = 0; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + proto = ENA_ADMIN_RSS_TCP4; + break; + case UDP_V4_FLOW: + proto = ENA_ADMIN_RSS_UDP4; + break; + case TCP_V6_FLOW: + proto = ENA_ADMIN_RSS_TCP6; + break; + case UDP_V6_FLOW: + proto = ENA_ADMIN_RSS_UDP6; + break; + case IPV4_FLOW: + proto = ENA_ADMIN_RSS_IP4; + break; + case IPV6_FLOW: + proto = ENA_ADMIN_RSS_IP6; + break; + case ETHER_FLOW: + proto = ENA_ADMIN_RSS_NOT_IP; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + return -EOPNOTSUPP; + default: + return -EINVAL; + } + + rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields); + if (rc) { + /* If device don't have permission, return unsupported */ + if (rc == -EPERM) + rc = -EOPNOTSUPP; + return rc; + } + + cmd->data = ena_flow_hash_to_flow_type(hash_fields); + + return 0; +} + +static int ena_set_rss_hash(struct ena_com_dev *ena_dev, + struct ethtool_rxnfc *cmd) +{ + enum ena_admin_flow_hash_proto proto; + u16 hash_fields; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + proto = ENA_ADMIN_RSS_TCP4; + break; + case UDP_V4_FLOW: + proto = ENA_ADMIN_RSS_UDP4; + break; + case TCP_V6_FLOW: + proto = ENA_ADMIN_RSS_TCP6; + break; + case UDP_V6_FLOW: + proto = ENA_ADMIN_RSS_UDP6; + break; + case IPV4_FLOW: + proto = ENA_ADMIN_RSS_IP4; + break; + case IPV6_FLOW: + proto = ENA_ADMIN_RSS_IP6; + break; + case ETHER_FLOW: + proto = ENA_ADMIN_RSS_NOT_IP; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + return -EOPNOTSUPP; + default: + return -EINVAL; + } + + hash_fields = ena_flow_data_to_flow_hash(cmd->data); + + return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields); +} + +static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + rc = ena_set_rss_hash(adapter->ena_dev, info); + break; + case ETHTOOL_SRXCLSRLDEL: + case ETHTOOL_SRXCLSRLINS: + default: + netif_err(adapter, drv, netdev, + "Command parameter %d is not supported\n", info->cmd); + rc = -EOPNOTSUPP; + } + + return (rc == -EPERM) ? -EOPNOTSUPP : rc; +} + +static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, + u32 *rules) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adapter->num_queues; + rc = 0; + break; + case ETHTOOL_GRXFH: + rc = ena_get_rss_hash(adapter->ena_dev, info); + break; + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + case ETHTOOL_GRXCLSRLALL: + default: + netif_err(adapter, drv, netdev, + "Command parameter %d is not supported\n", info->cmd); + rc = -EOPNOTSUPP; + } + + return (rc == -EPERM) ? -EOPNOTSUPP : rc; +} + +static u32 ena_get_rxfh_indir_size(struct net_device *netdev) +{ + return ENA_RX_RSS_TABLE_SIZE; +} + +static u32 ena_get_rxfh_key_size(struct net_device *netdev) +{ + return ENA_HASH_KEY_SIZE; +} + +static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + enum ena_admin_hash_functions ena_func; + u8 func; + int rc; + + rc = ena_com_indirect_table_get(adapter->ena_dev, indir); + if (rc) + return rc; + + rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key); + if (rc) + return rc; + + switch (ena_func) { + case ENA_ADMIN_TOEPLITZ: + func = ETH_RSS_HASH_TOP; + break; + case ENA_ADMIN_CRC32: + func = ETH_RSS_HASH_XOR; + break; + default: + netif_err(adapter, drv, netdev, + "Command parameter is not supported\n"); + return -EOPNOTSUPP; + } + + if (hfunc) + *hfunc = func; + + return rc; +} + +static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_com_dev *ena_dev = adapter->ena_dev; + enum ena_admin_hash_functions func; + int rc, i; + + if (indir) { + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { + rc = ena_com_indirect_table_fill_entry(ena_dev, + ENA_IO_RXQ_IDX(indir[i]), + i); + if (unlikely(rc)) { + netif_err(adapter, drv, netdev, + "Cannot fill indirect table (index is too large)\n"); + return rc; + } + } + + rc = ena_com_indirect_table_set(ena_dev); + if (rc) { + netif_err(adapter, drv, netdev, + "Cannot set indirect table\n"); + return rc == -EPERM ? -EOPNOTSUPP : rc; + } + } + + switch (hfunc) { + case ETH_RSS_HASH_TOP: + func = ENA_ADMIN_TOEPLITZ; + break; + case ETH_RSS_HASH_XOR: + func = ENA_ADMIN_CRC32; + break; + default: + netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n", + hfunc); + return -EOPNOTSUPP; + } + + if (key) { + rc = ena_com_fill_hash_function(ena_dev, func, key, + ENA_HASH_KEY_SIZE, + 0xFFFFFFFF); + if (unlikely(rc)) { + netif_err(adapter, drv, netdev, "Cannot fill key\n"); + return rc == -EPERM ? -EOPNOTSUPP : rc; + } + } + + return 0; +} + +static void ena_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = ENA_MAX_NUM_IO_QUEUES; + channels->max_tx = ENA_MAX_NUM_IO_QUEUES; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = adapter->num_queues; + channels->tx_count = adapter->num_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + +static int ena_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, void *data) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = adapter->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ena_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int ret = 0; + u32 len; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + len = *(u32 *)data; + if (len > adapter->netdev->mtu) { + ret = -EINVAL; + break; + } + adapter->rx_copybreak = len; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct ethtool_ops ena_ethtool_ops = { + .get_link_ksettings = ena_get_link_ksettings, + .get_drvinfo = ena_get_drvinfo, + .get_msglevel = ena_get_msglevel, + .set_msglevel = ena_set_msglevel, + .get_link = ethtool_op_get_link, + .get_coalesce = ena_get_coalesce, + .set_coalesce = ena_set_coalesce, + .get_ringparam = ena_get_ringparam, + .get_sset_count = ena_get_sset_count, + .get_strings = ena_get_strings, + .get_ethtool_stats = ena_get_ethtool_stats, + .get_rxnfc = ena_get_rxnfc, + .set_rxnfc = ena_set_rxnfc, + .get_rxfh_indir_size = ena_get_rxfh_indir_size, + .get_rxfh_key_size = ena_get_rxfh_key_size, + .get_rxfh = ena_get_rxfh, + .set_rxfh = ena_set_rxfh, + .get_channels = ena_get_channels, + .get_tunable = ena_get_tunable, + .set_tunable = ena_set_tunable, +}; + +void ena_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ena_ethtool_ops; +} + +static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf) +{ + struct net_device *netdev = adapter->netdev; + u8 *strings_buf; + u64 *data_buf; + int strings_num; + int i, rc; + + strings_num = ena_get_sset_count(netdev, ETH_SS_STATS); + if (strings_num <= 0) { + netif_err(adapter, drv, netdev, "Can't get stats num\n"); + return; + } + + strings_buf = devm_kzalloc(&adapter->pdev->dev, + strings_num * ETH_GSTRING_LEN, + GFP_ATOMIC); + if (!strings_buf) { + netif_err(adapter, drv, netdev, + "failed to alloc strings_buf\n"); + return; + } + + data_buf = devm_kzalloc(&adapter->pdev->dev, + strings_num * sizeof(u64), + GFP_ATOMIC); + if (!data_buf) { + netif_err(adapter, drv, netdev, + "failed to allocate data buf\n"); + devm_kfree(&adapter->pdev->dev, strings_buf); + return; + } + + ena_get_strings(netdev, ETH_SS_STATS, strings_buf); + ena_get_ethtool_stats(netdev, NULL, data_buf); + + /* If there is a buffer, dump stats, otherwise print them to dmesg */ + if (buf) + for (i = 0; i < strings_num; i++) { + rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64), + "%s %llu\n", + strings_buf + i * ETH_GSTRING_LEN, + data_buf[i]); + buf += rc; + } + else + for (i = 0; i < strings_num; i++) + netif_err(adapter, drv, netdev, "%s: %llu\n", + strings_buf + i * ETH_GSTRING_LEN, + data_buf[i]); + + devm_kfree(&adapter->pdev->dev, strings_buf); + devm_kfree(&adapter->pdev->dev, data_buf); +} + +void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf) +{ + if (!buf) + return; + + ena_dump_stats_ex(adapter, buf); +} + +void ena_dump_stats_to_dmesg(struct ena_adapter *adapter) +{ + ena_dump_stats_ex(adapter, NULL); +} diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c new file mode 100644 index 000000000000..bfeaec5bd7b9 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -0,0 +1,3272 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#ifdef CONFIG_RFS_ACCEL +#include <linux/cpu_rmap.h> +#endif /* CONFIG_RFS_ACCEL */ +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/numa.h> +#include <linux/pci.h> +#include <linux/utsname.h> +#include <linux/version.h> +#include <linux/vmalloc.h> +#include <net/ip.h> + +#include "ena_netdev.h" +#include "ena_pci_id_tbl.h" + +static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n"; + +MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); +MODULE_DESCRIPTION(DEVICE_NAME); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (5 * HZ) + +#define ENA_NAPI_BUDGET 64 + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ + NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static struct ena_aenq_handlers aenq_handlers; + +static struct workqueue_struct *ena_wq; + +MODULE_DEVICE_TABLE(pci, ena_pci_tbl); + +static int ena_rss_init_default(struct ena_adapter *adapter); + +static void ena_tx_timeout(struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.tx_timeout++; + u64_stats_update_end(&adapter->syncp); + + netif_err(adapter, tx_err, dev, "Transmit time out\n"); + + /* Change the state of the device to trigger reset */ + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); +} + +static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + adapter->rx_ring[i].mtu = mtu; +} + +static int ena_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ena_adapter *adapter = netdev_priv(dev); + int ret; + + if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { + netif_err(adapter, drv, dev, + "Invalid MTU setting. new_mtu: %d\n", new_mtu); + + return -EINVAL; + } + + ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); + if (!ret) { + netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu); + update_rx_ring_mtu(adapter, new_mtu); + dev->mtu = new_mtu; + } else { + netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", + new_mtu); + } + + return ret; +} + +static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) +{ +#ifdef CONFIG_RFS_ACCEL + u32 i; + int rc; + + adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues); + if (!adapter->netdev->rx_cpu_rmap) + return -ENOMEM; + for (i = 0; i < adapter->num_queues; i++) { + int irq_idx = ENA_IO_IRQ_IDX(i); + + rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, + adapter->msix_entries[irq_idx].vector); + if (rc) { + free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); + adapter->netdev->rx_cpu_rmap = NULL; + return rc; + } + } +#endif /* CONFIG_RFS_ACCEL */ + return 0; +} + +static void ena_init_io_rings_common(struct ena_adapter *adapter, + struct ena_ring *ring, u16 qid) +{ + ring->qid = qid; + ring->pdev = adapter->pdev; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->napi = &adapter->ena_napi[qid].napi; + ring->adapter = adapter; + ring->ena_dev = adapter->ena_dev; + ring->per_napi_packets = 0; + ring->per_napi_bytes = 0; + ring->cpu = 0; + u64_stats_init(&ring->syncp); +} + +static void ena_init_io_rings(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev; + struct ena_ring *txr, *rxr; + int i; + + ena_dev = adapter->ena_dev; + + for (i = 0; i < adapter->num_queues; i++) { + txr = &adapter->tx_ring[i]; + rxr = &adapter->rx_ring[i]; + + /* TX/RX common ring state */ + ena_init_io_rings_common(adapter, txr, i); + ena_init_io_rings_common(adapter, rxr, i); + + /* TX specific ring state */ + txr->ring_size = adapter->tx_ring_size; + txr->tx_max_header_size = ena_dev->tx_max_header_size; + txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; + txr->sgl_size = adapter->max_tx_sgl_size; + txr->smoothed_interval = + ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); + + /* RX specific ring state */ + rxr->ring_size = adapter->rx_ring_size; + rxr->rx_copybreak = adapter->rx_copybreak; + rxr->sgl_size = adapter->max_rx_sgl_size; + rxr->smoothed_interval = + ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); + } +} + +/* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) + * @adapter: network interface device structure + * @qid: queue index + * + * Return 0 on success, negative on failure + */ +static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) +{ + struct ena_ring *tx_ring = &adapter->tx_ring[qid]; + struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; + int size, i, node; + + if (tx_ring->tx_buffer_info) { + netif_err(adapter, ifup, + adapter->netdev, "tx_buffer_info info is not NULL"); + return -EEXIST; + } + + size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; + node = cpu_to_node(ena_irq->cpu); + + tx_ring->tx_buffer_info = vzalloc_node(size, node); + if (!tx_ring->tx_buffer_info) { + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + return -ENOMEM; + } + + size = sizeof(u16) * tx_ring->ring_size; + tx_ring->free_tx_ids = vzalloc_node(size, node); + if (!tx_ring->free_tx_ids) { + tx_ring->free_tx_ids = vzalloc(size); + if (!tx_ring->free_tx_ids) { + vfree(tx_ring->tx_buffer_info); + return -ENOMEM; + } + } + + /* Req id ring for TX out of order completions */ + for (i = 0; i < tx_ring->ring_size; i++) + tx_ring->free_tx_ids[i] = i; + + /* Reset tx statistics */ + memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cpu = ena_irq->cpu; + return 0; +} + +/* ena_free_tx_resources - Free I/O Tx Resources per Queue + * @adapter: network interface device structure + * @qid: queue index + * + * Free all transmit software resources + */ +static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) +{ + struct ena_ring *tx_ring = &adapter->tx_ring[qid]; + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + vfree(tx_ring->free_tx_ids); + tx_ring->free_tx_ids = NULL; +} + +/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues + * @adapter: private structure + * + * Return 0 on success, negative on failure + */ +static int ena_setup_all_tx_resources(struct ena_adapter *adapter) +{ + int i, rc = 0; + + for (i = 0; i < adapter->num_queues; i++) { + rc = ena_setup_tx_resources(adapter, i); + if (rc) + goto err_setup_tx; + } + + return 0; + +err_setup_tx: + + netif_err(adapter, ifup, adapter->netdev, + "Tx queue %d: allocation failed\n", i); + + /* rewind the index freeing the rings as we go */ + while (i--) + ena_free_tx_resources(adapter, i); + return rc; +} + +/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + ena_free_tx_resources(adapter, i); +} + +/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) + * @adapter: network interface device structure + * @qid: queue index + * + * Returns 0 on success, negative on failure + */ +static int ena_setup_rx_resources(struct ena_adapter *adapter, + u32 qid) +{ + struct ena_ring *rx_ring = &adapter->rx_ring[qid]; + struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; + int size, node; + + if (rx_ring->rx_buffer_info) { + netif_err(adapter, ifup, adapter->netdev, + "rx_buffer_info is not NULL"); + return -EEXIST; + } + + /* alloc extra element so in rx path + * we can always prefetch rx_info + 1 + */ + size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); + node = cpu_to_node(ena_irq->cpu); + + rx_ring->rx_buffer_info = vzalloc_node(size, node); + if (!rx_ring->rx_buffer_info) { + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + return -ENOMEM; + } + + /* Reset rx statistics */ + memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cpu = ena_irq->cpu; + + return 0; +} + +/* ena_free_rx_resources - Free I/O Rx Resources + * @adapter: network interface device structure + * @qid: queue index + * + * Free all receive software resources + */ +static void ena_free_rx_resources(struct ena_adapter *adapter, + u32 qid) +{ + struct ena_ring *rx_ring = &adapter->rx_ring[qid]; + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; +} + +/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int ena_setup_all_rx_resources(struct ena_adapter *adapter) +{ + int i, rc = 0; + + for (i = 0; i < adapter->num_queues; i++) { + rc = ena_setup_rx_resources(adapter, i); + if (rc) + goto err_setup_rx; + } + + return 0; + +err_setup_rx: + + netif_err(adapter, ifup, adapter->netdev, + "Rx queue %d: allocation failed\n", i); + + /* rewind the index freeing the rings as we go */ + while (i--) + ena_free_rx_resources(adapter, i); + return rc; +} + +/* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + ena_free_rx_resources(adapter, i); +} + +static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, + struct ena_rx_buffer *rx_info, gfp_t gfp) +{ + struct ena_com_buf *ena_buf; + struct page *page; + dma_addr_t dma; + + /* if previous allocated page is not used */ + if (unlikely(rx_info->page)) + return 0; + + page = alloc_page(gfp); + if (unlikely(!page)) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.page_alloc_fail++; + u64_stats_update_end(&rx_ring->syncp); + return -ENOMEM; + } + + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.dma_mapping_err++; + u64_stats_update_end(&rx_ring->syncp); + + __free_page(page); + return -EIO; + } + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "alloc page %p, rx_info %p\n", page, rx_info); + + rx_info->page = page; + rx_info->page_offset = 0; + ena_buf = &rx_info->ena_buf; + ena_buf->paddr = dma; + ena_buf->len = PAGE_SIZE; + + return 0; +} + +static void ena_free_rx_page(struct ena_ring *rx_ring, + struct ena_rx_buffer *rx_info) +{ + struct page *page = rx_info->page; + struct ena_com_buf *ena_buf = &rx_info->ena_buf; + + if (unlikely(!page)) { + netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, + "Trying to free unallocated buffer\n"); + return; + } + + dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, + DMA_FROM_DEVICE); + + __free_page(page); + rx_info->page = NULL; +} + +static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) +{ + u16 next_to_use; + u32 i; + int rc; + + next_to_use = rx_ring->next_to_use; + + for (i = 0; i < num; i++) { + struct ena_rx_buffer *rx_info = + &rx_ring->rx_buffer_info[next_to_use]; + + rc = ena_alloc_rx_page(rx_ring, rx_info, + __GFP_COLD | GFP_ATOMIC | __GFP_COMP); + if (unlikely(rc < 0)) { + netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, + "failed to alloc buffer for rx queue %d\n", + rx_ring->qid); + break; + } + rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, + &rx_info->ena_buf, + next_to_use); + if (unlikely(rc)) { + netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, + "failed to add buffer for rx queue %d\n", + rx_ring->qid); + break; + } + next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, + rx_ring->ring_size); + } + + if (unlikely(i < num)) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.refil_partial++; + u64_stats_update_end(&rx_ring->syncp); + netdev_warn(rx_ring->netdev, + "refilled rx qid %d with only %d buffers (from %d)\n", + rx_ring->qid, i, num); + } + + if (likely(i)) { + /* Add memory barrier to make sure the desc were written before + * issue a doorbell + */ + wmb(); + ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); + } + + rx_ring->next_to_use = next_to_use; + + return i; +} + +static void ena_free_rx_bufs(struct ena_adapter *adapter, + u32 qid) +{ + struct ena_ring *rx_ring = &adapter->rx_ring[qid]; + u32 i; + + for (i = 0; i < rx_ring->ring_size; i++) { + struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; + + if (rx_info->page) + ena_free_rx_page(rx_ring, rx_info); + } +} + +/* ena_refill_all_rx_bufs - allocate all queues Rx buffers + * @adapter: board private structure + * + */ +static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) +{ + struct ena_ring *rx_ring; + int i, rc, bufs_num; + + for (i = 0; i < adapter->num_queues; i++) { + rx_ring = &adapter->rx_ring[i]; + bufs_num = rx_ring->ring_size - 1; + rc = ena_refill_rx_bufs(rx_ring, bufs_num); + + if (unlikely(rc != bufs_num)) + netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, + "refilling Queue %d failed. allocated %d buffers from: %d\n", + i, rc, bufs_num); + } +} + +static void ena_free_all_rx_bufs(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + ena_free_rx_bufs(adapter, i); +} + +/* ena_free_tx_bufs - Free Tx Buffers per Queue + * @tx_ring: TX ring for which buffers be freed + */ +static void ena_free_tx_bufs(struct ena_ring *tx_ring) +{ + u32 i; + + for (i = 0; i < tx_ring->ring_size; i++) { + struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; + struct ena_com_buf *ena_buf; + int nr_frags; + int j; + + if (!tx_info->skb) + continue; + + netdev_notice(tx_ring->netdev, + "free uncompleted tx skb qid %d idx 0x%x\n", + tx_ring->qid, i); + + ena_buf = tx_info->bufs; + dma_unmap_single(tx_ring->dev, + ena_buf->paddr, + ena_buf->len, + DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + nr_frags = tx_info->num_of_bufs - 1; + for (j = 0; j < nr_frags; j++) { + ena_buf++; + dma_unmap_page(tx_ring->dev, + ena_buf->paddr, + ena_buf->len, + DMA_TO_DEVICE); + } + + dev_kfree_skb_any(tx_info->skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->qid)); +} + +static void ena_free_all_tx_bufs(struct ena_adapter *adapter) +{ + struct ena_ring *tx_ring; + int i; + + for (i = 0; i < adapter->num_queues; i++) { + tx_ring = &adapter->tx_ring[i]; + ena_free_tx_bufs(tx_ring); + } +} + +static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) +{ + u16 ena_qid; + int i; + + for (i = 0; i < adapter->num_queues; i++) { + ena_qid = ENA_IO_TXQ_IDX(i); + ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); + } +} + +static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) +{ + u16 ena_qid; + int i; + + for (i = 0; i < adapter->num_queues; i++) { + ena_qid = ENA_IO_RXQ_IDX(i); + ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); + } +} + +static void ena_destroy_all_io_queues(struct ena_adapter *adapter) +{ + ena_destroy_all_tx_queues(adapter); + ena_destroy_all_rx_queues(adapter); +} + +static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) +{ + struct ena_tx_buffer *tx_info = NULL; + + if (likely(req_id < tx_ring->ring_size)) { + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->skb)) + return 0; + } + + if (tx_info) + netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, + "tx_info doesn't have valid skb\n"); + else + netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, + "Invalid req_id: %hu\n", req_id); + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.bad_req_id++; + u64_stats_update_end(&tx_ring->syncp); + + /* Trigger device reset */ + set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags); + return -EFAULT; +} + +static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) +{ + struct netdev_queue *txq; + bool above_thresh; + u32 tx_bytes = 0; + u32 total_done = 0; + u16 next_to_clean; + u16 req_id; + int tx_pkts = 0; + int rc; + + next_to_clean = tx_ring->next_to_clean; + txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); + + while (tx_pkts < budget) { + struct ena_tx_buffer *tx_info; + struct sk_buff *skb; + struct ena_com_buf *ena_buf; + int i, nr_frags; + + rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, + &req_id); + if (rc) + break; + + rc = validate_tx_req_id(tx_ring, req_id); + if (rc) + break; + + tx_info = &tx_ring->tx_buffer_info[req_id]; + skb = tx_info->skb; + + /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ + prefetch(&skb->end); + + tx_info->skb = NULL; + tx_info->last_jiffies = 0; + + if (likely(tx_info->num_of_bufs != 0)) { + ena_buf = tx_info->bufs; + + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), + DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + nr_frags = tx_info->num_of_bufs - 1; + for (i = 0; i < nr_frags; i++) { + ena_buf++; + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), + DMA_TO_DEVICE); + } + } + + netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, + "tx_poll: q %d skb %p completed\n", tx_ring->qid, + skb); + + tx_bytes += skb->len; + dev_kfree_skb(skb); + tx_pkts++; + total_done += tx_info->tx_descs; + + tx_ring->free_tx_ids[next_to_clean] = req_id; + next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, + tx_ring->ring_size); + } + + tx_ring->next_to_clean = next_to_clean; + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); + ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); + + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); + + netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, + "tx_poll: q %d done. total pkts: %d\n", + tx_ring->qid, tx_pkts); + + /* need to make the rings circular update visible to + * ena_start_xmit() before checking for netif_queue_stopped(). + */ + smp_mb(); + + above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > + ENA_TX_WAKEUP_THRESH; + if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { + __netif_tx_lock(txq, smp_processor_id()); + above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > + ENA_TX_WAKEUP_THRESH; + if (netif_tx_queue_stopped(txq) && above_thresh) { + netif_tx_wake_queue(txq); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.queue_wakeup++; + u64_stats_update_end(&tx_ring->syncp); + } + __netif_tx_unlock(txq); + } + + tx_ring->per_napi_bytes += tx_bytes; + tx_ring->per_napi_packets += tx_pkts; + + return tx_pkts; +} + +static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, + struct ena_com_rx_buf_info *ena_bufs, + u32 descs, + u16 *next_to_clean) +{ + struct sk_buff *skb; + struct ena_rx_buffer *rx_info = + &rx_ring->rx_buffer_info[*next_to_clean]; + u32 len; + u32 buf = 0; + void *va; + + len = ena_bufs[0].len; + if (unlikely(!rx_info->page)) { + netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + "Page is NULL\n"); + return NULL; + } + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "rx_info %p page %p\n", + rx_info, rx_info->page); + + /* save virt address of first buffer */ + va = page_address(rx_info->page) + rx_info->page_offset; + prefetch(va + NET_IP_ALIGN); + + if (len <= rx_ring->rx_copybreak) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_copybreak); + if (unlikely(!skb)) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.skb_alloc_fail++; + u64_stats_update_end(&rx_ring->syncp); + netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + "Failed to allocate skb\n"); + return NULL; + } + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "rx allocated small packet. len %d. data_len %d\n", + skb->len, skb->data_len); + + /* sync this buffer for CPU use */ + dma_sync_single_for_cpu(rx_ring->dev, + dma_unmap_addr(&rx_info->ena_buf, paddr), + len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, va, len); + dma_sync_single_for_device(rx_ring->dev, + dma_unmap_addr(&rx_info->ena_buf, paddr), + len, + DMA_FROM_DEVICE); + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, + rx_ring->ring_size); + return skb; + } + + skb = napi_get_frags(rx_ring->napi); + if (unlikely(!skb)) { + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "Failed allocating skb\n"); + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.skb_alloc_fail++; + u64_stats_update_end(&rx_ring->syncp); + return NULL; + } + + do { + dma_unmap_page(rx_ring->dev, + dma_unmap_addr(&rx_info->ena_buf, paddr), + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, + rx_info->page_offset, len, PAGE_SIZE); + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "rx skb updated. len %d. data_len %d\n", + skb->len, skb->data_len); + + rx_info->page = NULL; + *next_to_clean = + ENA_RX_RING_IDX_NEXT(*next_to_clean, + rx_ring->ring_size); + if (likely(--descs == 0)) + break; + rx_info = &rx_ring->rx_buffer_info[*next_to_clean]; + len = ena_bufs[++buf].len; + } while (1); + + return skb; +} + +/* ena_rx_checksum - indicate in skb if hw indicated a good cksum + * @adapter: structure containing adapter specific data + * @ena_rx_ctx: received packet context/metadata + * @skb: skb currently being received and modified + */ +static inline void ena_rx_checksum(struct ena_ring *rx_ring, + struct ena_com_rx_ctx *ena_rx_ctx, + struct sk_buff *skb) +{ + /* Rx csum disabled */ + if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + /* For fragmented packets the checksum isn't valid */ + if (ena_rx_ctx->frag) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + /* if IP and error */ + if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && + (ena_rx_ctx->l3_csum_err))) { + /* ipv4 checksum error */ + skb->ip_summed = CHECKSUM_NONE; + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.bad_csum++; + u64_stats_update_end(&rx_ring->syncp); + netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + "RX IPv4 header checksum error\n"); + return; + } + + /* if TCP/UDP */ + if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || + (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { + if (unlikely(ena_rx_ctx->l4_csum_err)) { + /* TCP/UDP checksum error */ + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.bad_csum++; + u64_stats_update_end(&rx_ring->syncp); + netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + "RX L4 checksum error\n"); + skb->ip_summed = CHECKSUM_NONE; + return; + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +static void ena_set_rx_hash(struct ena_ring *rx_ring, + struct ena_com_rx_ctx *ena_rx_ctx, + struct sk_buff *skb) +{ + enum pkt_hash_types hash_type; + + if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { + if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || + (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) + + hash_type = PKT_HASH_TYPE_L4; + else + hash_type = PKT_HASH_TYPE_NONE; + + /* Override hash type if the packet is fragmented */ + if (ena_rx_ctx->frag) + hash_type = PKT_HASH_TYPE_NONE; + + skb_set_hash(skb, ena_rx_ctx->hash, hash_type); + } +} + +/* ena_clean_rx_irq - Cleanup RX irq + * @rx_ring: RX ring to clean + * @napi: napi handler + * @budget: how many packets driver is allowed to clean + * + * Returns the number of cleaned buffers. + */ +static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, + u32 budget) +{ + u16 next_to_clean = rx_ring->next_to_clean; + u32 res_budget, work_done; + + struct ena_com_rx_ctx ena_rx_ctx; + struct ena_adapter *adapter; + struct sk_buff *skb; + int refill_required; + int refill_threshold; + int rc = 0; + int total_len = 0; + int rx_copybreak_pkt = 0; + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "%s qid %d\n", __func__, rx_ring->qid); + res_budget = budget; + + do { + ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; + ena_rx_ctx.max_bufs = rx_ring->sgl_size; + ena_rx_ctx.descs = 0; + rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, + rx_ring->ena_com_io_sq, + &ena_rx_ctx); + if (unlikely(rc)) + goto error; + + if (unlikely(ena_rx_ctx.descs == 0)) + break; + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", + rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, + ena_rx_ctx.l4_proto, ena_rx_ctx.hash); + + /* allocate skb and fill it */ + skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, + &next_to_clean); + + /* exit if we failed to retrieve a buffer */ + if (unlikely(!skb)) { + next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean, + ena_rx_ctx.descs, + rx_ring->ring_size); + break; + } + + ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); + + ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); + + skb_record_rx_queue(skb, rx_ring->qid); + + if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { + total_len += rx_ring->ena_bufs[0].len; + rx_copybreak_pkt++; + napi_gro_receive(napi, skb); + } else { + total_len += skb->len; + napi_gro_frags(napi); + } + + res_budget--; + } while (likely(res_budget)); + + work_done = budget - res_budget; + rx_ring->per_napi_bytes += total_len; + rx_ring->per_napi_packets += work_done; + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.bytes += total_len; + rx_ring->rx_stats.cnt += work_done; + rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; + u64_stats_update_end(&rx_ring->syncp); + + rx_ring->next_to_clean = next_to_clean; + + refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); + refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; + + /* Optimization, try to batch new rx buffers */ + if (refill_required > refill_threshold) { + ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); + ena_refill_rx_bufs(rx_ring, refill_required); + } + + return work_done; + +error: + adapter = netdev_priv(rx_ring->netdev); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.bad_desc_num++; + u64_stats_update_end(&rx_ring->syncp); + + /* Too many desc from the device. Trigger reset */ + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + + return 0; +} + +inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, + struct ena_ring *tx_ring) +{ + /* We apply adaptive moderation on Rx path only. + * Tx uses static interrupt moderation. + */ + ena_com_calculate_interrupt_delay(rx_ring->ena_dev, + rx_ring->per_napi_packets, + rx_ring->per_napi_bytes, + &rx_ring->smoothed_interval, + &rx_ring->moder_tbl_idx); + + /* Reset per napi packets/bytes */ + tx_ring->per_napi_packets = 0; + tx_ring->per_napi_bytes = 0; + rx_ring->per_napi_packets = 0; + rx_ring->per_napi_bytes = 0; +} + +static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, + struct ena_ring *rx_ring) +{ + int cpu = get_cpu(); + int numa_node; + + /* Check only one ring since the 2 rings are running on the same cpu */ + if (likely(tx_ring->cpu == cpu)) + goto out; + + numa_node = cpu_to_node(cpu); + put_cpu(); + + if (numa_node != NUMA_NO_NODE) { + ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); + ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); + } + + tx_ring->cpu = cpu; + rx_ring->cpu = cpu; + + return; +out: + put_cpu(); +} + +static int ena_io_poll(struct napi_struct *napi, int budget) +{ + struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); + struct ena_ring *tx_ring, *rx_ring; + struct ena_eth_io_intr_reg intr_reg; + + u32 tx_work_done; + u32 rx_work_done; + int tx_budget; + int napi_comp_call = 0; + int ret; + + tx_ring = ena_napi->tx_ring; + rx_ring = ena_napi->rx_ring; + + tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; + + if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { + napi_complete_done(napi, 0); + return 0; + } + + tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); + rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); + + if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { + napi_complete_done(napi, rx_work_done); + + napi_comp_call = 1; + /* Tx and Rx share the same interrupt vector */ + if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) + ena_adjust_intr_moderation(rx_ring, tx_ring); + + /* Update intr register: rx intr delay, tx intr delay and + * interrupt unmask + */ + ena_com_update_intr_reg(&intr_reg, + rx_ring->smoothed_interval, + tx_ring->smoothed_interval, + true); + + /* It is a shared MSI-X. Tx and Rx CQ have pointer to it. + * So we use one of them to reach the intr reg + */ + ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); + + ena_update_ring_numa_node(tx_ring, rx_ring); + + ret = rx_work_done; + } else { + ret = budget; + } + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.napi_comp += napi_comp_call; + tx_ring->tx_stats.tx_poll++; + u64_stats_update_end(&tx_ring->syncp); + + return ret; +} + +static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) +{ + struct ena_adapter *adapter = (struct ena_adapter *)data; + + ena_com_admin_q_comp_intr_handler(adapter->ena_dev); + + /* Don't call the aenq handler before probe is done */ + if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) + ena_com_aenq_intr_handler(adapter->ena_dev, data); + + return IRQ_HANDLED; +} + +/* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx + * @irq: interrupt number + * @data: pointer to a network interface private napi device structure + */ +static irqreturn_t ena_intr_msix_io(int irq, void *data) +{ + struct ena_napi *ena_napi = data; + + napi_schedule(&ena_napi->napi); + + return IRQ_HANDLED; +} + +static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) +{ + int i, msix_vecs, rc; + + if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { + netif_err(adapter, probe, adapter->netdev, + "Error, MSI-X is already enabled\n"); + return -EPERM; + } + + /* Reserved the max msix vectors we might need */ + msix_vecs = ENA_MAX_MSIX_VEC(num_queues); + + netif_dbg(adapter, probe, adapter->netdev, + "trying to enable MSI-X, vectors %d\n", msix_vecs); + + adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry)); + + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_vecs; i++) + adapter->msix_entries[i].entry = i; + + rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs); + if (rc != 0) { + netif_err(adapter, probe, adapter->netdev, + "Failed to enable MSI-X, vectors %d rc %d\n", + msix_vecs, rc); + return -ENOSPC; + } + + netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n", + msix_vecs); + + if (msix_vecs >= 1) { + if (ena_init_rx_cpu_rmap(adapter)) + netif_warn(adapter, probe, adapter->netdev, + "Failed to map IRQs to CPUs\n"); + } + + adapter->msix_vecs = msix_vecs; + set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); + + return 0; +} + +static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) +{ + u32 cpu; + + snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, + ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", + pci_name(adapter->pdev)); + adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = + ena_intr_msix_mgmnt; + adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; + adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = + adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; + cpu = cpumask_first(cpu_online_mask); + adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; + cpumask_set_cpu(cpu, + &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); +} + +static void ena_setup_io_intr(struct ena_adapter *adapter) +{ + struct net_device *netdev; + int irq_idx, i, cpu; + + netdev = adapter->netdev; + + for (i = 0; i < adapter->num_queues; i++) { + irq_idx = ENA_IO_IRQ_IDX(i); + cpu = i % num_online_cpus(); + + snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, + "%s-Tx-Rx-%d", netdev->name, i); + adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; + adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; + adapter->irq_tbl[irq_idx].vector = + adapter->msix_entries[irq_idx].vector; + adapter->irq_tbl[irq_idx].cpu = cpu; + + cpumask_set_cpu(cpu, + &adapter->irq_tbl[irq_idx].affinity_hint_mask); + } +} + +static int ena_request_mgmnt_irq(struct ena_adapter *adapter) +{ + unsigned long flags = 0; + struct ena_irq *irq; + int rc; + + irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + irq->data); + if (rc) { + netif_err(adapter, probe, adapter->netdev, + "failed to request admin irq\n"); + return rc; + } + + netif_dbg(adapter, probe, adapter->netdev, + "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", + irq->affinity_hint_mask.bits[0], irq->vector); + + irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); + + return rc; +} + +static int ena_request_io_irq(struct ena_adapter *adapter) +{ + unsigned long flags = 0; + struct ena_irq *irq; + int rc = 0, i, k; + + if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to request I/O IRQ: MSI-X is not enabled\n"); + return -EINVAL; + } + + for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { + irq = &adapter->irq_tbl[i]; + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + irq->data); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to request I/O IRQ. index %d rc %d\n", + i, rc); + goto err; + } + + netif_dbg(adapter, ifup, adapter->netdev, + "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", + i, irq->affinity_hint_mask.bits[0], irq->vector); + + irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); + } + + return rc; + +err: + for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { + irq = &adapter->irq_tbl[k]; + free_irq(irq->vector, irq->data); + } + + return rc; +} + +static void ena_free_mgmnt_irq(struct ena_adapter *adapter) +{ + struct ena_irq *irq; + + irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; + synchronize_irq(irq->vector); + irq_set_affinity_hint(irq->vector, NULL); + free_irq(irq->vector, irq->data); +} + +static void ena_free_io_irq(struct ena_adapter *adapter) +{ + struct ena_irq *irq; + int i; + +#ifdef CONFIG_RFS_ACCEL + if (adapter->msix_vecs >= 1) { + free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); + adapter->netdev->rx_cpu_rmap = NULL; + } +#endif /* CONFIG_RFS_ACCEL */ + + for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { + irq = &adapter->irq_tbl[i]; + irq_set_affinity_hint(irq->vector, NULL); + free_irq(irq->vector, irq->data); + } +} + +static void ena_disable_msix(struct ena_adapter *adapter) +{ + if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) + pci_disable_msix(adapter->pdev); + + if (adapter->msix_entries) + vfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +static void ena_disable_io_intr_sync(struct ena_adapter *adapter) +{ + int i; + + if (!netif_running(adapter->netdev)) + return; + + for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) + synchronize_irq(adapter->irq_tbl[i].vector); +} + +static void ena_del_napi(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + netif_napi_del(&adapter->ena_napi[i].napi); +} + +static void ena_init_napi(struct ena_adapter *adapter) +{ + struct ena_napi *napi; + int i; + + for (i = 0; i < adapter->num_queues; i++) { + napi = &adapter->ena_napi[i]; + + netif_napi_add(adapter->netdev, + &adapter->ena_napi[i].napi, + ena_io_poll, + ENA_NAPI_BUDGET); + napi->rx_ring = &adapter->rx_ring[i]; + napi->tx_ring = &adapter->tx_ring[i]; + napi->qid = i; + } +} + +static void ena_napi_disable_all(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + napi_disable(&adapter->ena_napi[i].napi); +} + +static void ena_napi_enable_all(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + napi_enable(&adapter->ena_napi[i].napi); +} + +static void ena_restore_ethtool_params(struct ena_adapter *adapter) +{ + adapter->tx_usecs = 0; + adapter->rx_usecs = 0; + adapter->tx_frames = 1; + adapter->rx_frames = 1; +} + +/* Configure the Rx forwarding */ +static int ena_rss_configure(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int rc; + + /* In case the RSS table wasn't initialized by probe */ + if (!ena_dev->rss.tbl_log_size) { + rc = ena_rss_init_default(adapter); + if (rc && (rc != -EPERM)) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to init RSS rc: %d\n", rc); + return rc; + } + } + + /* Set indirect table */ + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && rc != -EPERM)) + return rc; + + /* Configure hash function (if supported) */ + rc = ena_com_set_hash_function(ena_dev); + if (unlikely(rc && (rc != -EPERM))) + return rc; + + /* Configure hash inputs (if supported) */ + rc = ena_com_set_hash_ctrl(ena_dev); + if (unlikely(rc && (rc != -EPERM))) + return rc; + + return 0; +} + +static int ena_up_complete(struct ena_adapter *adapter) +{ + int rc, i; + + rc = ena_rss_configure(adapter); + if (rc) + return rc; + + ena_init_napi(adapter); + + ena_change_mtu(adapter->netdev, adapter->netdev->mtu); + + ena_refill_all_rx_bufs(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + ena_restore_ethtool_params(adapter); + + ena_napi_enable_all(adapter); + + /* schedule napi in case we had pending packets + * from the last time we disable napi + */ + for (i = 0; i < adapter->num_queues; i++) + napi_schedule(&adapter->ena_napi[i].napi); + + return 0; +} + +static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) +{ + struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_com_dev *ena_dev; + struct ena_ring *tx_ring; + u32 msix_vector; + u16 ena_qid; + int rc; + + ena_dev = adapter->ena_dev; + + tx_ring = &adapter->tx_ring[qid]; + msix_vector = ENA_IO_IRQ_IDX(qid); + ena_qid = ENA_IO_TXQ_IDX(qid); + + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; + ctx.qid = ena_qid; + ctx.mem_queue_type = ena_dev->tx_mem_queue_type; + ctx.msix_vector = msix_vector; + ctx.queue_size = adapter->tx_ring_size; + ctx.numa_node = cpu_to_node(tx_ring->cpu); + + rc = ena_com_create_io_queue(ena_dev, &ctx); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to create I/O TX queue num %d rc: %d\n", + qid, rc); + return rc; + } + + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &tx_ring->ena_com_io_sq, + &tx_ring->ena_com_io_cq); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to get TX queue handlers. TX queue num %d rc: %d\n", + qid, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + } + + ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); + return rc; +} + +static int ena_create_all_io_tx_queues(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int rc, i; + + for (i = 0; i < adapter->num_queues; i++) { + rc = ena_create_io_tx_queue(adapter, i); + if (rc) + goto create_err; + } + + return 0; + +create_err: + while (i--) + ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); + + return rc; +} + +static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) +{ + struct ena_com_dev *ena_dev; + struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_ring *rx_ring; + u32 msix_vector; + u16 ena_qid; + int rc; + + ena_dev = adapter->ena_dev; + + rx_ring = &adapter->rx_ring[qid]; + msix_vector = ENA_IO_IRQ_IDX(qid); + ena_qid = ENA_IO_RXQ_IDX(qid); + + ctx.qid = ena_qid; + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; + ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + ctx.msix_vector = msix_vector; + ctx.queue_size = adapter->rx_ring_size; + ctx.numa_node = cpu_to_node(rx_ring->cpu); + + rc = ena_com_create_io_queue(ena_dev, &ctx); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to create I/O RX queue num %d rc: %d\n", + qid, rc); + return rc; + } + + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &rx_ring->ena_com_io_sq, + &rx_ring->ena_com_io_cq); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to get RX queue handlers. RX queue num %d rc: %d\n", + qid, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + } + + ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); + + return rc; +} + +static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int rc, i; + + for (i = 0; i < adapter->num_queues; i++) { + rc = ena_create_io_rx_queue(adapter, i); + if (rc) + goto create_err; + } + + return 0; + +create_err: + while (i--) + ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); + + return rc; +} + +static int ena_up(struct ena_adapter *adapter) +{ + int rc; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + ena_setup_io_intr(adapter); + + rc = ena_request_io_irq(adapter); + if (rc) + goto err_req_irq; + + /* allocate transmit descriptors */ + rc = ena_setup_all_tx_resources(adapter); + if (rc) + goto err_setup_tx; + + /* allocate receive descriptors */ + rc = ena_setup_all_rx_resources(adapter); + if (rc) + goto err_setup_rx; + + /* Create TX queues */ + rc = ena_create_all_io_tx_queues(adapter); + if (rc) + goto err_create_tx_queues; + + /* Create RX queues */ + rc = ena_create_all_io_rx_queues(adapter); + if (rc) + goto err_create_rx_queues; + + rc = ena_up_complete(adapter); + if (rc) + goto err_up; + + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) + netif_carrier_on(adapter->netdev); + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.interface_up++; + u64_stats_update_end(&adapter->syncp); + + set_bit(ENA_FLAG_DEV_UP, &adapter->flags); + + return rc; + +err_up: + ena_destroy_all_rx_queues(adapter); +err_create_rx_queues: + ena_destroy_all_tx_queues(adapter); +err_create_tx_queues: + ena_free_all_io_rx_resources(adapter); +err_setup_rx: + ena_free_all_io_tx_resources(adapter); +err_setup_tx: + ena_free_io_irq(adapter); +err_req_irq: + + return rc; +} + +static void ena_down(struct ena_adapter *adapter) +{ + netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); + + clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.interface_down++; + u64_stats_update_end(&adapter->syncp); + + /* After this point the napi handler won't enable the tx queue */ + ena_napi_disable_all(adapter); + netif_carrier_off(adapter->netdev); + netif_tx_disable(adapter->netdev); + + /* After destroy the queue there won't be any new interrupts */ + ena_destroy_all_io_queues(adapter); + + ena_disable_io_intr_sync(adapter); + ena_free_io_irq(adapter); + ena_del_napi(adapter); + + ena_free_all_tx_bufs(adapter); + ena_free_all_rx_bufs(adapter); + ena_free_all_io_tx_resources(adapter); + ena_free_all_io_rx_resources(adapter); +} + +/* ena_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int ena_open(struct net_device *netdev) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int rc; + + /* Notify the stack of the actual queue counts. */ + rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues); + if (rc) { + netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); + return rc; + } + + rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues); + if (rc) { + netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); + return rc; + } + + rc = ena_up(adapter); + if (rc) + return rc; + + return rc; +} + +/* ena_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int ena_close(struct net_device *netdev) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); + + if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + ena_down(adapter); + + return 0; +} + +static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) +{ + u32 mss = skb_shinfo(skb)->gso_size; + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + u8 l4_protocol = 0; + + if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { + ena_tx_ctx->l4_csum_enable = 1; + if (mss) { + ena_tx_ctx->tso_enable = 1; + ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; + ena_tx_ctx->l4_csum_partial = 0; + } else { + ena_tx_ctx->tso_enable = 0; + ena_meta->l4_hdr_len = 0; + ena_tx_ctx->l4_csum_partial = 1; + } + + switch (ip_hdr(skb)->version) { + case IPVERSION: + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; + if (ip_hdr(skb)->frag_off & htons(IP_DF)) + ena_tx_ctx->df = 1; + if (mss) + ena_tx_ctx->l3_csum_enable = 1; + l4_protocol = ip_hdr(skb)->protocol; + break; + case 6: + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; + l4_protocol = ipv6_hdr(skb)->nexthdr; + break; + default: + break; + } + + if (l4_protocol == IPPROTO_TCP) + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; + else + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; + + ena_meta->mss = mss; + ena_meta->l3_hdr_len = skb_network_header_len(skb); + ena_meta->l3_hdr_offset = skb_network_offset(skb); + ena_tx_ctx->meta_valid = 1; + + } else { + ena_tx_ctx->meta_valid = 0; + } +} + +static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, + struct sk_buff *skb) +{ + int num_frags, header_len, rc; + + num_frags = skb_shinfo(skb)->nr_frags; + header_len = skb_headlen(skb); + + if (num_frags < tx_ring->sgl_size) + return 0; + + if ((num_frags == tx_ring->sgl_size) && + (header_len < tx_ring->tx_max_header_size)) + return 0; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.linearize++; + u64_stats_update_end(&tx_ring->syncp); + + rc = skb_linearize(skb); + if (unlikely(rc)) { + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.linearize_failed++; + u64_stats_update_end(&tx_ring->syncp); + } + + return rc; +} + +/* Called with netif_tx_lock. */ +static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + struct ena_tx_buffer *tx_info; + struct ena_com_tx_ctx ena_tx_ctx; + struct ena_ring *tx_ring; + struct netdev_queue *txq; + struct ena_com_buf *ena_buf; + void *push_hdr; + u32 len, last_frag; + u16 next_to_use; + u16 req_id; + u16 push_len; + u16 header_len; + dma_addr_t dma; + int qid, rc, nb_hw_desc; + int i = -1; + + netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); + /* Determine which tx ring we will be placed on */ + qid = skb_get_queue_mapping(skb); + tx_ring = &adapter->tx_ring[qid]; + txq = netdev_get_tx_queue(dev, qid); + + rc = ena_check_and_linearize_skb(tx_ring, skb); + if (unlikely(rc)) + goto error_drop_packet; + + skb_tx_timestamp(skb); + len = skb_headlen(skb); + + next_to_use = tx_ring->next_to_use; + req_id = tx_ring->free_tx_ids[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + + WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); + ena_buf = tx_info->bufs; + tx_info->skb = skb; + + if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* prepared the push buffer */ + push_len = min_t(u32, len, tx_ring->tx_max_header_size); + header_len = push_len; + push_hdr = skb->data; + } else { + push_len = 0; + header_len = min_t(u32, len, tx_ring->tx_max_header_size); + push_hdr = NULL; + } + + netif_dbg(adapter, tx_queued, dev, + "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, + push_hdr, push_len); + + if (len > push_len) { + dma = dma_map_single(tx_ring->dev, skb->data + push_len, + len - push_len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto error_report_dma_error; + + ena_buf->paddr = dma; + ena_buf->len = len - push_len; + + ena_buf++; + tx_info->num_of_bufs++; + } + + last_frag = skb_shinfo(skb)->nr_frags; + + for (i = 0; i < last_frag; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto error_report_dma_error; + + ena_buf->paddr = dma; + ena_buf->len = len; + ena_buf++; + } + + tx_info->num_of_bufs += last_frag; + + memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); + ena_tx_ctx.ena_bufs = tx_info->bufs; + ena_tx_ctx.push_header = push_hdr; + ena_tx_ctx.num_bufs = tx_info->num_of_bufs; + ena_tx_ctx.req_id = req_id; + ena_tx_ctx.header_len = header_len; + + /* set flags and meta data */ + ena_tx_csum(&ena_tx_ctx, skb); + + /* prepare the packet's descriptors to dma engine */ + rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, + &nb_hw_desc); + + if (unlikely(rc)) { + netif_err(adapter, tx_queued, dev, + "failed to prepare tx bufs\n"); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.queue_stop++; + tx_ring->tx_stats.prepare_ctx_err++; + u64_stats_update_end(&tx_ring->syncp); + netif_tx_stop_queue(txq); + goto error_unmap_dma; + } + + netdev_tx_sent_queue(txq, skb->len); + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.cnt++; + tx_ring->tx_stats.bytes += skb->len; + u64_stats_update_end(&tx_ring->syncp); + + tx_info->tx_descs = nb_hw_desc; + tx_info->last_jiffies = jiffies; + + tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, + tx_ring->ring_size); + + /* This WMB is aimed to: + * 1 - perform smp barrier before reading next_to_completion + * 2 - make sure the desc were written before trigger DB + */ + wmb(); + + /* stop the queue when no more space available, the packet can have up + * to sgl_size + 2. one for the meta descriptor and one for header + * (if the header is larger than tx_max_header_size). + */ + if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < + (tx_ring->sgl_size + 2))) { + netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", + __func__, qid); + + netif_tx_stop_queue(txq); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.queue_stop++; + u64_stats_update_end(&tx_ring->syncp); + + /* There is a rare condition where this function decide to + * stop the queue but meanwhile clean_tx_irq updates + * next_to_completion and terminates. + * The queue will remain stopped forever. + * To solve this issue this function perform rmb, check + * the wakeup condition and wake up the queue if needed. + */ + smp_rmb(); + + if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) + > ENA_TX_WAKEUP_THRESH) { + netif_tx_wake_queue(txq); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.queue_wakeup++; + u64_stats_update_end(&tx_ring->syncp); + } + } + + if (netif_xmit_stopped(txq) || !skb->xmit_more) { + /* trigger the dma engine */ + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.doorbells++; + u64_stats_update_end(&tx_ring->syncp); + } + + return NETDEV_TX_OK; + +error_report_dma_error: + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.dma_mapping_err++; + u64_stats_update_end(&tx_ring->syncp); + netdev_warn(adapter->netdev, "failed to map skb\n"); + + tx_info->skb = NULL; + +error_unmap_dma: + if (i >= 0) { + /* save value of frag that failed */ + last_frag = i; + + /* start back at beginning and unmap skb */ + tx_info->skb = NULL; + ena_buf = tx_info->bufs; + dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + for (i = 0; i < last_frag; i++) { + ena_buf++; + dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); + } + } + +error_drop_packet: + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ena_netpoll(struct net_device *netdev) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int i; + + for (i = 0; i < adapter->num_queues; i++) + napi_schedule(&adapter->ena_napi[i].napi); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + u16 qid; + /* we suspect that this is good for in--kernel network services that + * want to loop incoming skb rx to tx in normal user generated traffic, + * most probably we will not get to this + */ + if (skb_rx_queue_recorded(skb)) + qid = skb_get_rx_queue(skb); + else + qid = fallback(dev, skb); + + return qid; +} + +static void ena_config_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_admin_host_info *host_info; + int rc; + + /* Allocate only the host info */ + rc = ena_com_allocate_host_info(ena_dev); + if (rc) { + pr_err("Cannot allocate host info\n"); + return; + } + + host_info = ena_dev->host_attr.host_info; + + host_info->os_type = ENA_ADMIN_OS_LINUX; + host_info->kernel_ver = LINUX_VERSION_CODE; + strncpy(host_info->kernel_ver_str, utsname()->version, + sizeof(host_info->kernel_ver_str) - 1); + host_info->os_dist = 0; + strncpy(host_info->os_dist_str, utsname()->release, + sizeof(host_info->os_dist_str) - 1); + host_info->driver_version = + (DRV_MODULE_VER_MAJOR) | + (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | + (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { + if (rc == -EPERM) + pr_warn("Cannot set host attributes\n"); + else + pr_err("Cannot set host attributes\n"); + + goto err; + } + + return; + +err: + ena_com_delete_host_info(ena_dev); +} + +static void ena_config_debug_area(struct ena_adapter *adapter) +{ + u32 debug_area_size; + int rc, ss_count; + + ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); + if (ss_count <= 0) { + netif_err(adapter, drv, adapter->netdev, + "SS count is negative\n"); + return; + } + + /* allocate 32 bytes for each string and 64bit for the value */ + debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; + + rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); + if (rc) { + pr_err("Cannot allocate debug area\n"); + return; + } + + rc = ena_com_set_host_attributes(adapter->ena_dev); + if (rc) { + if (rc == -EPERM) + netif_warn(adapter, drv, adapter->netdev, + "Cannot set host attributes\n"); + else + netif_err(adapter, drv, adapter->netdev, + "Cannot set host attributes\n"); + goto err; + } + + return; +err: + ena_com_delete_debug_area(adapter->ena_dev); +} + +static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_admin_basic_stats ena_stats; + int rc; + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return NULL; + + rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats); + if (rc) + return NULL; + + stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) | + ena_stats.tx_bytes_low; + stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) | + ena_stats.rx_bytes_low; + + stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) | + ena_stats.rx_pkts_low; + stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) | + ena_stats.tx_pkts_low; + + stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) | + ena_stats.rx_drops_low; + + stats->multicast = 0; + stats->collisions = 0; + + stats->rx_length_errors = 0; + stats->rx_crc_errors = 0; + stats->rx_frame_errors = 0; + stats->rx_fifo_errors = 0; + stats->rx_missed_errors = 0; + stats->tx_window_errors = 0; + + stats->rx_errors = 0; + stats->tx_errors = 0; + + return stats; +} + +static const struct net_device_ops ena_netdev_ops = { + .ndo_open = ena_open, + .ndo_stop = ena_close, + .ndo_start_xmit = ena_start_xmit, + .ndo_select_queue = ena_select_queue, + .ndo_get_stats64 = ena_get_stats64, + .ndo_tx_timeout = ena_tx_timeout, + .ndo_change_mtu = ena_change_mtu, + .ndo_set_mac_address = NULL, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ena_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ +}; + +static void ena_device_io_suspend(struct work_struct *work) +{ + struct ena_adapter *adapter = + container_of(work, struct ena_adapter, suspend_io_task); + struct net_device *netdev = adapter->netdev; + + /* ena_napi_disable_all disables only the IO handling. + * We are still subject to AENQ keep alive watchdog. + */ + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.io_suspend++; + u64_stats_update_begin(&adapter->syncp); + ena_napi_disable_all(adapter); + netif_tx_lock(netdev); + netif_device_detach(netdev); + netif_tx_unlock(netdev); +} + +static void ena_device_io_resume(struct work_struct *work) +{ + struct ena_adapter *adapter = + container_of(work, struct ena_adapter, resume_io_task); + struct net_device *netdev = adapter->netdev; + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.io_resume++; + u64_stats_update_end(&adapter->syncp); + + netif_device_attach(netdev); + ena_napi_enable_all(adapter); +} + +static int ena_device_validate_params(struct ena_adapter *adapter, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + struct net_device *netdev = adapter->netdev; + int rc; + + rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, + adapter->mac_addr); + if (!rc) { + netif_err(adapter, drv, netdev, + "Error, mac address are different\n"); + return -EINVAL; + } + + if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) || + (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) { + netif_err(adapter, drv, netdev, + "Error, device doesn't support enough queues\n"); + return -EINVAL; + } + + if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { + netif_err(adapter, drv, netdev, + "Error, device max mtu is smaller than netdev MTU\n"); + return -EINVAL; + } + + return 0; +} + +static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state) +{ + struct device *dev = &pdev->dev; + bool readless_supported; + u32 aenq_groups; + int dma_width; + int rc; + + rc = ena_com_mmio_reg_read_request_init(ena_dev); + if (rc) { + dev_err(dev, "failed to init mmio read less\n"); + return rc; + } + + /* The PCIe configuration space revision id indicate if mmio reg + * read is disabled + */ + readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); + ena_com_set_mmio_read_mode(ena_dev, readless_supported); + + rc = ena_com_dev_reset(ena_dev); + if (rc) { + dev_err(dev, "Can not reset device\n"); + goto err_mmio_read_less; + } + + rc = ena_com_validate_version(ena_dev); + if (rc) { + dev_err(dev, "device version is too low\n"); + goto err_mmio_read_less; + } + + dma_width = ena_com_get_dma_width(ena_dev); + if (dma_width < 0) { + dev_err(dev, "Invalid dma width value %d", dma_width); + rc = dma_width; + goto err_mmio_read_less; + } + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); + if (rc) { + dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc); + goto err_mmio_read_less; + } + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); + if (rc) { + dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n", + rc); + goto err_mmio_read_less; + } + + /* ENA admin level init */ + rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); + if (rc) { + dev_err(dev, + "Can not initialize ena admin queue with device\n"); + goto err_mmio_read_less; + } + + /* To enable the msix interrupts the driver needs to know the number + * of queues. So the driver uses polling mode to retrieve this + * information + */ + ena_com_set_admin_polling_mode(ena_dev, true); + + /* Get Device Attributes*/ + rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); + if (rc) { + dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); + goto err_admin_init; + } + + /* Try to turn all the available aenq groups */ + aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | + BIT(ENA_ADMIN_FATAL_ERROR) | + BIT(ENA_ADMIN_WARNING) | + BIT(ENA_ADMIN_NOTIFICATION) | + BIT(ENA_ADMIN_KEEP_ALIVE); + + aenq_groups &= get_feat_ctx->aenq.supported_groups; + + rc = ena_com_set_aenq_config(ena_dev, aenq_groups); + if (rc) { + dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); + goto err_admin_init; + } + + *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + + ena_config_host_info(ena_dev); + + return 0; + +err_admin_init: + ena_com_admin_destroy(ena_dev); +err_mmio_read_less: + ena_com_mmio_reg_read_request_destroy(ena_dev); + + return rc; +} + +static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, + int io_vectors) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct device *dev = &adapter->pdev->dev; + int rc; + + rc = ena_enable_msix(adapter, io_vectors); + if (rc) { + dev_err(dev, "Can not reserve msix vectors\n"); + return rc; + } + + ena_setup_mgmnt_intr(adapter); + + rc = ena_request_mgmnt_irq(adapter); + if (rc) { + dev_err(dev, "Can not setup management interrupts\n"); + goto err_disable_msix; + } + + ena_com_set_admin_polling_mode(ena_dev, false); + + ena_com_admin_aenq_enable(ena_dev); + + return 0; + +err_disable_msix: + ena_disable_msix(adapter); + + return rc; +} + +static void ena_fw_reset_device(struct work_struct *work) +{ + struct ena_com_dev_get_features_ctx get_feat_ctx; + struct ena_adapter *adapter = + container_of(work, struct ena_adapter, reset_task); + struct net_device *netdev = adapter->netdev; + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct pci_dev *pdev = adapter->pdev; + bool dev_up, wd_state; + int rc; + + del_timer_sync(&adapter->timer_service); + + rtnl_lock(); + + dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); + ena_com_set_admin_running_state(ena_dev, false); + + /* After calling ena_close the tx queues and the napi + * are disabled so no one can interfere or touch the + * data structures + */ + ena_close(netdev); + + rc = ena_com_dev_reset(ena_dev); + if (rc) { + dev_err(&pdev->dev, "Device reset failed\n"); + goto err; + } + + ena_free_mgmnt_irq(adapter); + + ena_disable_msix(adapter); + + ena_com_abort_admin_commands(ena_dev); + + ena_com_wait_for_abort_completion(ena_dev); + + ena_com_admin_destroy(ena_dev); + + ena_com_mmio_reg_read_request_destroy(ena_dev); + + /* Finish with the destroy part. Start the init part */ + + rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); + if (rc) { + dev_err(&pdev->dev, "Can not initialize device\n"); + goto err; + } + adapter->wd_state = wd_state; + + rc = ena_device_validate_params(adapter, &get_feat_ctx); + if (rc) { + dev_err(&pdev->dev, "Validation of device parameters failed\n"); + goto err_device_destroy; + } + + rc = ena_enable_msix_and_set_admin_interrupts(adapter, + adapter->num_queues); + if (rc) { + dev_err(&pdev->dev, "Enable MSI-X failed\n"); + goto err_device_destroy; + } + /* If the interface was up before the reset bring it up */ + if (dev_up) { + rc = ena_up(adapter); + if (rc) { + dev_err(&pdev->dev, "Failed to create I/O queues\n"); + goto err_disable_msix; + } + } + + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); + + rtnl_unlock(); + + dev_err(&pdev->dev, "Device reset completed successfully\n"); + + return; +err_disable_msix: + ena_free_mgmnt_irq(adapter); + ena_disable_msix(adapter); +err_device_destroy: + ena_com_admin_destroy(ena_dev); +err: + rtnl_unlock(); + + dev_err(&pdev->dev, + "Reset attempt failed. Can not reset the device\n"); +} + +static void check_for_missing_tx_completions(struct ena_adapter *adapter) +{ + struct ena_tx_buffer *tx_buf; + unsigned long last_jiffies; + struct ena_ring *tx_ring; + int i, j, budget; + u32 missed_tx; + + /* Make sure the driver doesn't turn the device in other process */ + smp_rmb(); + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return; + + budget = ENA_MONITORED_TX_QUEUES; + + for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { + tx_ring = &adapter->tx_ring[i]; + + for (j = 0; j < tx_ring->ring_size; j++) { + tx_buf = &tx_ring->tx_buffer_info[j]; + last_jiffies = tx_buf->last_jiffies; + if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { + netif_notice(adapter, tx_err, adapter->netdev, + "Found a Tx that wasn't completed on time, qid %d, index %d.\n", + tx_ring->qid, j); + + u64_stats_update_begin(&tx_ring->syncp); + missed_tx = tx_ring->tx_stats.missing_tx_comp++; + u64_stats_update_end(&tx_ring->syncp); + + /* Clear last jiffies so the lost buffer won't + * be counted twice. + */ + tx_buf->last_jiffies = 0; + + if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { + netif_err(adapter, tx_err, adapter->netdev, + "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", + missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + } + } + } + + budget--; + if (!budget) + break; + } + + adapter->last_monitored_tx_qid = i % adapter->num_queues; +} + +/* Check for keep alive expiration */ +static void check_for_missing_keep_alive(struct ena_adapter *adapter) +{ + unsigned long keep_alive_expired; + + if (!adapter->wd_state) + return; + + keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies + + ENA_DEVICE_KALIVE_TIMEOUT); + if (unlikely(time_is_before_jiffies(keep_alive_expired))) { + netif_err(adapter, drv, adapter->netdev, + "Keep alive watchdog timeout.\n"); + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.wd_expired++; + u64_stats_update_end(&adapter->syncp); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + } +} + +static void check_for_admin_com_state(struct ena_adapter *adapter) +{ + if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { + netif_err(adapter, drv, adapter->netdev, + "ENA admin queue is not in running state!\n"); + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.admin_q_pause++; + u64_stats_update_end(&adapter->syncp); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + } +} + +static void ena_update_host_info(struct ena_admin_host_info *host_info, + struct net_device *netdev) +{ + host_info->supported_network_features[0] = + netdev->features & GENMASK_ULL(31, 0); + host_info->supported_network_features[1] = + (netdev->features & GENMASK_ULL(63, 32)) >> 32; +} + +static void ena_timer_service(unsigned long data) +{ + struct ena_adapter *adapter = (struct ena_adapter *)data; + u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; + struct ena_admin_host_info *host_info = + adapter->ena_dev->host_attr.host_info; + + check_for_missing_keep_alive(adapter); + + check_for_admin_com_state(adapter); + + check_for_missing_tx_completions(adapter); + + if (debug_area) + ena_dump_stats_to_buf(adapter, debug_area); + + if (host_info) + ena_update_host_info(host_info, adapter->netdev); + + if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + netif_err(adapter, drv, adapter->netdev, + "Trigger reset is on\n"); + ena_dump_stats_to_dmesg(adapter); + queue_work(ena_wq, &adapter->reset_task); + return; + } + + /* Reset the timer */ + mod_timer(&adapter->timer_service, jiffies + HZ); +} + +static int ena_calc_io_queue_num(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + int io_sq_num, io_queue_num; + + /* In case of LLQ use the llq number in the get feature cmd */ + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + io_sq_num = get_feat_ctx->max_queues.max_llq_num; + + if (io_sq_num == 0) { + dev_err(&pdev->dev, + "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n"); + + ena_dev->tx_mem_queue_type = + ENA_ADMIN_PLACEMENT_POLICY_HOST; + io_sq_num = get_feat_ctx->max_queues.max_sq_num; + } + } else { + io_sq_num = get_feat_ctx->max_queues.max_sq_num; + } + + io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES); + io_queue_num = min_t(int, io_queue_num, io_sq_num); + io_queue_num = min_t(int, io_queue_num, + get_feat_ctx->max_queues.max_cq_num); + /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ + io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1); + if (unlikely(!io_queue_num)) { + dev_err(&pdev->dev, "The device doesn't have io queues\n"); + return -EFAULT; + } + + return io_queue_num; +} + +static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + bool has_mem_bar; + + has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); + + /* Enable push mode if device supports LLQ */ + if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; + else + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; +} + +static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, + struct net_device *netdev) +{ + netdev_features_t dev_features = 0; + + /* Set offload features */ + if (feat->offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) + dev_features |= NETIF_F_IP_CSUM; + + if (feat->offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) + dev_features |= NETIF_F_IPV6_CSUM; + + if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) + dev_features |= NETIF_F_TSO; + + if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) + dev_features |= NETIF_F_TSO6; + + if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) + dev_features |= NETIF_F_TSO_ECN; + + if (feat->offload.rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) + dev_features |= NETIF_F_RXCSUM; + + if (feat->offload.rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) + dev_features |= NETIF_F_RXCSUM; + + netdev->features = + dev_features | + NETIF_F_SG | + NETIF_F_NTUPLE | + NETIF_F_RXHASH | + NETIF_F_HIGHDMA; + + netdev->hw_features |= netdev->features; + netdev->vlan_features |= netdev->features; +} + +static void ena_set_conf_feat_params(struct ena_adapter *adapter, + struct ena_com_dev_get_features_ctx *feat) +{ + struct net_device *netdev = adapter->netdev; + + /* Copy mac address */ + if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->mac_addr, netdev->dev_addr); + } else { + ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); + ether_addr_copy(netdev->dev_addr, adapter->mac_addr); + } + + /* Set offload features */ + ena_set_dev_offloads(feat, netdev); + + adapter->max_mtu = feat->dev_attr.max_mtu; +} + +static int ena_rss_init_default(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct device *dev = &adapter->pdev->dev; + int rc, i; + u32 val; + + rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); + if (unlikely(rc)) { + dev_err(dev, "Cannot init indirect table\n"); + goto err_rss_init; + } + + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { + val = ethtool_rxfh_indir_default(i, adapter->num_queues); + rc = ena_com_indirect_table_fill_entry(ena_dev, i, + ENA_IO_RXQ_IDX(val)); + if (unlikely(rc && (rc != -EPERM))) { + dev_err(dev, "Cannot fill indirect table\n"); + goto err_fill_indir; + } + } + + rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, + ENA_HASH_KEY_SIZE, 0xFFFFFFFF); + if (unlikely(rc && (rc != -EPERM))) { + dev_err(dev, "Cannot fill hash function\n"); + goto err_fill_indir; + } + + rc = ena_com_set_default_hash_ctrl(ena_dev); + if (unlikely(rc && (rc != -EPERM))) { + dev_err(dev, "Cannot fill hash control\n"); + goto err_fill_indir; + } + + return 0; + +err_fill_indir: + ena_com_rss_destroy(ena_dev); +err_rss_init: + + return rc; +} + +static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) +{ + int release_bars; + + release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; + pci_release_selected_regions(pdev, release_bars); +} + +static int ena_calc_queue_size(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + u16 *max_tx_sgl_size, + u16 *max_rx_sgl_size, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + u32 queue_size = ENA_DEFAULT_RING_SIZE; + + queue_size = min_t(u32, queue_size, + get_feat_ctx->max_queues.max_cq_depth); + queue_size = min_t(u32, queue_size, + get_feat_ctx->max_queues.max_sq_depth); + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + queue_size = min_t(u32, queue_size, + get_feat_ctx->max_queues.max_llq_depth); + + queue_size = rounddown_pow_of_two(queue_size); + + if (unlikely(!queue_size)) { + dev_err(&pdev->dev, "Invalid queue size\n"); + return -EFAULT; + } + + *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + get_feat_ctx->max_queues.max_packet_tx_descs); + *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + get_feat_ctx->max_queues.max_packet_rx_descs); + + return queue_size; +} + +/* ena_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ena_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ena_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ena_com_dev_get_features_ctx get_feat_ctx; + static int version_printed; + struct net_device *netdev; + struct ena_adapter *adapter; + struct ena_com_dev *ena_dev = NULL; + static int adapters_found; + int io_queue_num, bars, rc; + int queue_size; + u16 tx_sgl_size = 0; + u16 rx_sgl_size = 0; + bool wd_state; + + dev_dbg(&pdev->dev, "%s\n", __func__); + + if (version_printed++ == 0) + dev_info(&pdev->dev, "%s", version); + + rc = pci_enable_device_mem(pdev); + if (rc) { + dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); + return rc; + } + + pci_set_master(pdev); + + ena_dev = vzalloc(sizeof(*ena_dev)); + if (!ena_dev) { + rc = -ENOMEM; + goto err_disable_device; + } + + bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; + rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); + if (rc) { + dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", + rc); + goto err_free_ena_dev; + } + + ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), + pci_resource_len(pdev, ENA_REG_BAR)); + if (!ena_dev->reg_bar) { + dev_err(&pdev->dev, "failed to remap regs bar\n"); + rc = -EFAULT; + goto err_free_region; + } + + ena_dev->dmadev = &pdev->dev; + + rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); + if (rc) { + dev_err(&pdev->dev, "ena device init failed\n"); + if (rc == -ETIME) + rc = -EPROBE_DEFER; + goto err_free_region; + } + + ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); + if (!ena_dev->mem_bar) { + rc = -EFAULT; + goto err_device_destroy; + } + } + + /* initial Tx interrupt delay, Assumes 1 usec granularity. + * Updated during device initialization with the real granularity + */ + ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; + io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx); + queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size, + &rx_sgl_size, &get_feat_ctx); + if ((queue_size <= 0) || (io_queue_num <= 0)) { + rc = -EFAULT; + goto err_device_destroy; + } + + dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", + io_queue_num, queue_size); + + /* dev zeroed in init_etherdev */ + netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); + if (!netdev) { + dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); + rc = -ENOMEM; + goto err_device_destroy; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + pci_set_drvdata(pdev, adapter); + + adapter->ena_dev = ena_dev; + adapter->netdev = netdev; + adapter->pdev = pdev; + + ena_set_conf_feat_params(adapter, &get_feat_ctx); + + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + adapter->tx_ring_size = queue_size; + adapter->rx_ring_size = queue_size; + + adapter->max_tx_sgl_size = tx_sgl_size; + adapter->max_rx_sgl_size = rx_sgl_size; + + adapter->num_queues = io_queue_num; + adapter->last_monitored_tx_qid = 0; + + adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; + adapter->wd_state = wd_state; + + snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); + + rc = ena_com_init_interrupt_moderation(adapter->ena_dev); + if (rc) { + dev_err(&pdev->dev, + "Failed to query interrupt moderation feature\n"); + goto err_netdev_destroy; + } + ena_init_io_rings(adapter); + + netdev->netdev_ops = &ena_netdev_ops; + netdev->watchdog_timeo = TX_TIMEOUT; + ena_set_ethtool_ops(netdev); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + u64_stats_init(&adapter->syncp); + + rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); + if (rc) { + dev_err(&pdev->dev, + "Failed to enable and set the admin interrupts\n"); + goto err_worker_destroy; + } + rc = ena_rss_init_default(adapter); + if (rc && (rc != -EPERM)) { + dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); + goto err_free_msix; + } + + ena_config_debug_area(adapter); + + memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); + + netif_carrier_off(netdev); + + rc = register_netdev(netdev); + if (rc) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto err_rss; + } + + INIT_WORK(&adapter->suspend_io_task, ena_device_io_suspend); + INIT_WORK(&adapter->resume_io_task, ena_device_io_resume); + INIT_WORK(&adapter->reset_task, ena_fw_reset_device); + + adapter->last_keep_alive_jiffies = jiffies; + + init_timer(&adapter->timer_service); + adapter->timer_service.expires = round_jiffies(jiffies + HZ); + adapter->timer_service.function = ena_timer_service; + adapter->timer_service.data = (unsigned long)adapter; + + add_timer(&adapter->timer_service); + + dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", + DEVICE_NAME, (long)pci_resource_start(pdev, 0), + netdev->dev_addr, io_queue_num); + + set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + + adapters_found++; + + return 0; + +err_rss: + ena_com_delete_debug_area(ena_dev); + ena_com_rss_destroy(ena_dev); +err_free_msix: + ena_com_dev_reset(ena_dev); + ena_free_mgmnt_irq(adapter); + ena_disable_msix(adapter); +err_worker_destroy: + ena_com_destroy_interrupt_moderation(ena_dev); + del_timer(&adapter->timer_service); + cancel_work_sync(&adapter->suspend_io_task); + cancel_work_sync(&adapter->resume_io_task); +err_netdev_destroy: + free_netdev(netdev); +err_device_destroy: + ena_com_delete_host_info(ena_dev); + ena_com_admin_destroy(ena_dev); +err_free_region: + ena_release_bars(ena_dev, pdev); +err_free_ena_dev: + vfree(ena_dev); +err_disable_device: + pci_disable_device(pdev); + return rc; +} + +/*****************************************************************************/ +static int ena_sriov_configure(struct pci_dev *dev, int numvfs) +{ + int rc; + + if (numvfs > 0) { + rc = pci_enable_sriov(dev, numvfs); + if (rc != 0) { + dev_err(&dev->dev, + "pci_enable_sriov failed to enable: %d vfs with the error: %d\n", + numvfs, rc); + return rc; + } + + return numvfs; + } + + if (numvfs == 0) { + pci_disable_sriov(dev); + return 0; + } + + return -EINVAL; +} + +/*****************************************************************************/ +/*****************************************************************************/ + +/* ena_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ena_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void ena_remove(struct pci_dev *pdev) +{ + struct ena_adapter *adapter = pci_get_drvdata(pdev); + struct ena_com_dev *ena_dev; + struct net_device *netdev; + + if (!adapter) + /* This device didn't load properly and it's resources + * already released, nothing to do + */ + return; + + ena_dev = adapter->ena_dev; + netdev = adapter->netdev; + +#ifdef CONFIG_RFS_ACCEL + if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; + } +#endif /* CONFIG_RFS_ACCEL */ + + unregister_netdev(netdev); + del_timer_sync(&adapter->timer_service); + + cancel_work_sync(&adapter->reset_task); + + cancel_work_sync(&adapter->suspend_io_task); + + cancel_work_sync(&adapter->resume_io_task); + + ena_com_dev_reset(ena_dev); + + ena_free_mgmnt_irq(adapter); + + ena_disable_msix(adapter); + + free_netdev(netdev); + + ena_com_mmio_reg_read_request_destroy(ena_dev); + + ena_com_abort_admin_commands(ena_dev); + + ena_com_wait_for_abort_completion(ena_dev); + + ena_com_admin_destroy(ena_dev); + + ena_com_rss_destroy(ena_dev); + + ena_com_delete_debug_area(ena_dev); + + ena_com_delete_host_info(ena_dev); + + ena_release_bars(ena_dev, pdev); + + pci_disable_device(pdev); + + ena_com_destroy_interrupt_moderation(ena_dev); + + vfree(ena_dev); +} + +static struct pci_driver ena_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = ena_pci_tbl, + .probe = ena_probe, + .remove = ena_remove, + .sriov_configure = ena_sriov_configure, +}; + +static int __init ena_init(void) +{ + pr_info("%s", version); + + ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); + if (!ena_wq) { + pr_err("Failed to create workqueue\n"); + return -ENOMEM; + } + + return pci_register_driver(&ena_pci_driver); +} + +static void __exit ena_cleanup(void) +{ + pci_unregister_driver(&ena_pci_driver); + + if (ena_wq) { + destroy_workqueue(ena_wq); + ena_wq = NULL; + } +} + +/****************************************************************************** + ******************************** AENQ Handlers ******************************* + *****************************************************************************/ +/* ena_update_on_link_change: + * Notify the network interface about the change in link status + */ +static void ena_update_on_link_change(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + struct ena_admin_aenq_link_change_desc *aenq_desc = + (struct ena_admin_aenq_link_change_desc *)aenq_e; + int status = aenq_desc->flags & + ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; + + if (status) { + netdev_dbg(adapter->netdev, "%s\n", __func__); + set_bit(ENA_FLAG_LINK_UP, &adapter->flags); + netif_carrier_on(adapter->netdev); + } else { + clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); + netif_carrier_off(adapter->netdev); + } +} + +static void ena_keep_alive_wd(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + + adapter->last_keep_alive_jiffies = jiffies; +} + +static void ena_notification(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + + WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, + "Invalid group(%x) expected %x\n", + aenq_e->aenq_common_desc.group, + ENA_ADMIN_NOTIFICATION); + + switch (aenq_e->aenq_common_desc.syndrom) { + case ENA_ADMIN_SUSPEND: + /* Suspend just the IO queues. + * We deliberately don't suspend admin so the timer and + * the keep_alive events should remain. + */ + queue_work(ena_wq, &adapter->suspend_io_task); + break; + case ENA_ADMIN_RESUME: + queue_work(ena_wq, &adapter->resume_io_task); + break; + default: + netif_err(adapter, drv, adapter->netdev, + "Invalid aenq notification link state %d\n", + aenq_e->aenq_common_desc.syndrom); + } +} + +/* This handler will called for unknown event group or unimplemented handlers*/ +static void unimplemented_aenq_handler(void *data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)data; + + netif_err(adapter, drv, adapter->netdev, + "Unknown event was received or event with unimplemented handler\n"); +} + +static struct ena_aenq_handlers aenq_handlers = { + .handlers = { + [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, + [ENA_ADMIN_NOTIFICATION] = ena_notification, + [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, + }, + .unimplemented_handler = unimplemented_aenq_handler +}; + +module_init(ena_init); +module_exit(ena_cleanup); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h new file mode 100644 index 000000000000..69d7e9ed5bc8 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -0,0 +1,324 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ENA_H +#define ENA_H + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/inetdevice.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> + +#include "ena_com.h" +#include "ena_eth_com.h" + +#define DRV_MODULE_VER_MAJOR 1 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 2 + +#define DRV_MODULE_NAME "ena" +#ifndef DRV_MODULE_VERSION +#define DRV_MODULE_VERSION \ + __stringify(DRV_MODULE_VER_MAJOR) "." \ + __stringify(DRV_MODULE_VER_MINOR) "." \ + __stringify(DRV_MODULE_VER_SUBMINOR) +#endif + +#define DEVICE_NAME "Elastic Network Adapter (ENA)" + +/* 1 for AENQ + ADMIN */ +#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues)) + +#define ENA_REG_BAR 0 +#define ENA_MEM_BAR 2 +#define ENA_BAR_MASK (BIT(ENA_REG_BAR) | BIT(ENA_MEM_BAR)) + +#define ENA_DEFAULT_RING_SIZE (1024) + +#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2) +#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN) + +/* limit the buffer size to 600 bytes to handle MTU changes from very + * small to very large, in which case the number of buffers per packet + * could exceed ENA_PKT_MAX_BUFS + */ +#define ENA_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600 + +#define ENA_MIN_MTU 128 + +#define ENA_NAME_MAX_LEN 20 +#define ENA_IRQNAME_SIZE 40 + +#define ENA_PKT_MAX_BUFS 19 + +#define ENA_RX_RSS_TABLE_LOG_SIZE 7 +#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) + +#define ENA_HASH_KEY_SIZE 40 + +/* The number of tx packet completions that will be handled each NAPI poll + * cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER. + */ +#define ENA_TX_POLL_BUDGET_DIVIDER 4 + +/* Refill Rx queue when number of available descriptors is below + * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER + */ +#define ENA_RX_REFILL_THRESH_DIVIDER 8 + +/* Number of queues to check for missing queues per timer service */ +#define ENA_MONITORED_TX_QUEUES 4 +/* Max timeout packets before device reset */ +#define MAX_NUM_OF_TIMEOUTED_PACKETS 32 + +#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) + +#define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) +#define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \ + (((idx) + (n)) & ((ring_size) - 1)) + +#define ENA_IO_TXQ_IDX(q) (2 * (q)) +#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) + +#define ENA_MGMNT_IRQ_IDX 0 +#define ENA_IO_IRQ_FIRST_IDX 1 +#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) + +/* ENA device should send keep alive msg every 1 sec. + * We wait for 3 sec just to be on the safe side. + */ +#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ) + +#define ENA_MMIO_DISABLE_REG_READ BIT(0) + +struct ena_irq { + irq_handler_t handler; + void *data; + int cpu; + u32 vector; + cpumask_t affinity_hint_mask; + char name[ENA_IRQNAME_SIZE]; +}; + +struct ena_napi { + struct napi_struct napi ____cacheline_aligned; + struct ena_ring *tx_ring; + struct ena_ring *rx_ring; + u32 qid; +}; + +struct ena_tx_buffer { + struct sk_buff *skb; + /* num of ena desc for this specific skb + * (includes data desc and metadata desc) + */ + u32 tx_descs; + /* num of buffers used by this skb */ + u32 num_of_bufs; + /* Save the last jiffies to detect missing tx packets */ + unsigned long last_jiffies; + struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; +} ____cacheline_aligned; + +struct ena_rx_buffer { + struct sk_buff *skb; + struct page *page; + u32 page_offset; + struct ena_com_buf ena_buf; +} ____cacheline_aligned; + +struct ena_stats_tx { + u64 cnt; + u64 bytes; + u64 queue_stop; + u64 prepare_ctx_err; + u64 queue_wakeup; + u64 dma_mapping_err; + u64 linearize; + u64 linearize_failed; + u64 napi_comp; + u64 tx_poll; + u64 doorbells; + u64 missing_tx_comp; + u64 bad_req_id; +}; + +struct ena_stats_rx { + u64 cnt; + u64 bytes; + u64 refil_partial; + u64 bad_csum; + u64 page_alloc_fail; + u64 skb_alloc_fail; + u64 dma_mapping_err; + u64 bad_desc_num; + u64 rx_copybreak_pkt; +}; + +struct ena_ring { + /* Holds the empty requests for TX out of order completions */ + u16 *free_tx_ids; + union { + struct ena_tx_buffer *tx_buffer_info; + struct ena_rx_buffer *rx_buffer_info; + }; + + /* cache ptr to avoid using the adapter */ + struct device *dev; + struct pci_dev *pdev; + struct napi_struct *napi; + struct net_device *netdev; + struct ena_com_dev *ena_dev; + struct ena_adapter *adapter; + struct ena_com_io_cq *ena_com_io_cq; + struct ena_com_io_sq *ena_com_io_sq; + + u16 next_to_use; + u16 next_to_clean; + u16 rx_copybreak; + u16 qid; + u16 mtu; + u16 sgl_size; + + /* The maximum header length the device can handle */ + u8 tx_max_header_size; + + /* cpu for TPH */ + int cpu; + /* number of tx/rx_buffer_info's entries */ + int ring_size; + + enum ena_admin_placement_policy_type tx_mem_queue_type; + + struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]; + u32 smoothed_interval; + u32 per_napi_packets; + u32 per_napi_bytes; + enum ena_intr_moder_level moder_tbl_idx; + struct u64_stats_sync syncp; + union { + struct ena_stats_tx tx_stats; + struct ena_stats_rx rx_stats; + }; +} ____cacheline_aligned; + +struct ena_stats_dev { + u64 tx_timeout; + u64 io_suspend; + u64 io_resume; + u64 wd_expired; + u64 interface_up; + u64 interface_down; + u64 admin_q_pause; +}; + +enum ena_flags_t { + ENA_FLAG_DEVICE_RUNNING, + ENA_FLAG_DEV_UP, + ENA_FLAG_LINK_UP, + ENA_FLAG_MSIX_ENABLED, + ENA_FLAG_TRIGGER_RESET +}; + +/* adapter specific private data structure */ +struct ena_adapter { + struct ena_com_dev *ena_dev; + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* rx packets that shorter that this len will be copied to the skb + * header + */ + u32 rx_copybreak; + u32 max_mtu; + + int num_queues; + + struct msix_entry *msix_entries; + int msix_vecs; + + u32 tx_usecs, rx_usecs; /* interrupt moderation */ + u32 tx_frames, rx_frames; /* interrupt moderation */ + + u32 tx_ring_size; + u32 rx_ring_size; + + u32 msg_enable; + + u16 max_tx_sgl_size; + u16 max_rx_sgl_size; + + u8 mac_addr[ETH_ALEN]; + + char name[ENA_NAME_MAX_LEN]; + + unsigned long flags; + /* TX */ + struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES] + ____cacheline_aligned_in_smp; + + /* RX */ + struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES] + ____cacheline_aligned_in_smp; + + struct ena_napi ena_napi[ENA_MAX_NUM_IO_QUEUES]; + + struct ena_irq irq_tbl[ENA_MAX_MSIX_VEC(ENA_MAX_NUM_IO_QUEUES)]; + + /* timer service */ + struct work_struct reset_task; + struct work_struct suspend_io_task; + struct work_struct resume_io_task; + struct timer_list timer_service; + + bool wd_state; + unsigned long last_keep_alive_jiffies; + + struct u64_stats_sync syncp; + struct ena_stats_dev dev_stats; + + /* last queue index that was checked for uncompleted tx packets */ + u32 last_monitored_tx_qid; +}; + +void ena_set_ethtool_ops(struct net_device *netdev); + +void ena_dump_stats_to_dmesg(struct ena_adapter *adapter); + +void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); + +int ena_get_sset_count(struct net_device *netdev, int sset); + +#endif /* !(ENA_H) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h new file mode 100644 index 000000000000..f80d2a47fa94 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h @@ -0,0 +1,67 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ENA_PCI_ID_TBL_H_ +#define ENA_PCI_ID_TBL_H_ + +#ifndef PCI_VENDOR_ID_AMAZON +#define PCI_VENDOR_ID_AMAZON 0x1d0f +#endif + +#ifndef PCI_DEV_ID_ENA_PF +#define PCI_DEV_ID_ENA_PF 0x0ec2 +#endif + +#ifndef PCI_DEV_ID_ENA_LLQ_PF +#define PCI_DEV_ID_ENA_LLQ_PF 0x1ec2 +#endif + +#ifndef PCI_DEV_ID_ENA_VF +#define PCI_DEV_ID_ENA_VF 0xec20 +#endif + +#ifndef PCI_DEV_ID_ENA_LLQ_VF +#define PCI_DEV_ID_ENA_LLQ_VF 0xec21 +#endif + +#define ENA_PCI_ID_TABLE_ENTRY(devid) \ + {PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)}, + +static const struct pci_device_id ena_pci_tbl[] = { + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF) + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF) + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF) + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_VF) + { } +}; + +#endif /* ENA_PCI_ID_TBL_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h new file mode 100644 index 000000000000..26097a2b6030 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -0,0 +1,133 @@ +/* + * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _ENA_REGS_H_ +#define _ENA_REGS_H_ + +/* ena_registers offsets */ +#define ENA_REGS_VERSION_OFF 0x0 +#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 +#define ENA_REGS_CAPS_OFF 0x8 +#define ENA_REGS_CAPS_EXT_OFF 0xc +#define ENA_REGS_AQ_BASE_LO_OFF 0x10 +#define ENA_REGS_AQ_BASE_HI_OFF 0x14 +#define ENA_REGS_AQ_CAPS_OFF 0x18 +#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 +#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 +#define ENA_REGS_ACQ_CAPS_OFF 0x28 +#define ENA_REGS_AQ_DB_OFF 0x2c +#define ENA_REGS_ACQ_TAIL_OFF 0x30 +#define ENA_REGS_AENQ_CAPS_OFF 0x34 +#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 +#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c +#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 +#define ENA_REGS_AENQ_TAIL_OFF 0x44 +#define ENA_REGS_INTR_MASK_OFF 0x4c +#define ENA_REGS_DEV_CTL_OFF 0x54 +#define ENA_REGS_DEV_STS_OFF 0x58 +#define ENA_REGS_MMIO_REG_READ_OFF 0x5c +#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 +#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 + +/* version register */ +#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff +#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 +#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 + +/* controller_version register */ +#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 + +/* caps register */ +#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 + +/* aq_caps register */ +#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 + +/* acq_caps register */ +#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 + +/* aenq_caps register */ +#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 + +/* dev_ctl register */ +#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 +#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 +#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 +#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 +#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 + +/* dev_sts register */ +#define ENA_REGS_DEV_STS_READY_MASK 0x1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 +#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 +#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 +#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 +#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 + +/* mmio_reg_read register */ +#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff +#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 +#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 + +/* rss_ind_entry_update register */ +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 + +#endif /*_ENA_REGS_H_ */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 3eee3201b58f..9de078819aa6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -861,9 +861,15 @@ static int xgbe_resume(struct device *dev) pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); - if (netif_running(netdev)) + if (netif_running(netdev)) { ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT); + /* Schedule a restart in case the link or phy state changed + * while we were powered down. + */ + schedule_work(&pdata->restart_work); + } + DBGPR("<--xgbe_resume\n"); return ret; diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index 300e3b5c54e0..afccb033177b 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -4,6 +4,7 @@ config NET_XGENE depends on ARCH_XGENE || COMPILE_TEST select PHYLIB select MDIO_XGENE + select GPIOLIB help This is the Ethernet driver for the on-chip ethernet interface on the APM X-Gene SoC. diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c index 472c0fb3f4c4..23d72af83d82 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -32,12 +32,19 @@ static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver, SET_VAL(SB_HDRLEN, len); } -static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel, +static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata, + u32 dstqid, u32 fpsel, u32 nfpsel, u32 *idt_reg) { - *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | - SET_VAL(IDT_FPSEL, fpsel) | - SET_VAL(IDT_NFPSEL, nfpsel); + if (pdata->enet_id == XGENE_ENET1) { + *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | + SET_VAL(IDT_FPSEL1, fpsel) | + SET_VAL(IDT_NFPSEL1, nfpsel); + } else { + *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | + SET_VAL(IDT_FPSEL, fpsel) | + SET_VAL(IDT_NFPSEL, nfpsel); + } } static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, @@ -344,7 +351,7 @@ static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) nfpsel = 0; idt_reg = 0; - xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg); + xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg); ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, RSS_IDT, CLE_CMD_WR); if (ret) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h index 33c5f6b25824..9ac9f8e145ec 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -196,9 +196,13 @@ enum xgene_cle_ptree_dbptrs { #define IDT_DSTQID_POS 0 #define IDT_DSTQID_LEN 12 #define IDT_FPSEL_POS 12 -#define IDT_FPSEL_LEN 4 -#define IDT_NFPSEL_POS 16 -#define IDT_NFPSEL_LEN 4 +#define IDT_FPSEL_LEN 5 +#define IDT_NFPSEL_POS 17 +#define IDT_NFPSEL_LEN 5 +#define IDT_FPSEL1_POS 12 +#define IDT_FPSEL1_LEN 4 +#define IDT_NFPSEL1_POS 16 +#define IDT_NFPSEL1_LEN 4 struct xgene_cle_ptree_branch { bool valid; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 18bb9556dd00..321fb197621e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -761,18 +761,18 @@ int xgene_enet_phy_connect(struct net_device *ndev) if (dev->of_node) { for (i = 0 ; i < 2; i++) { np = of_parse_phandle(dev->of_node, "phy-handle", i); - if (np) - break; - } - if (!np) { - netdev_dbg(ndev, "No phy-handle found in DT\n"); - return -ENODEV; + if (!np) + continue; + + phy_dev = of_phy_connect(ndev, np, + &xgene_enet_adjust_link, + 0, pdata->phy_mode); + of_node_put(np); + if (phy_dev) + break; } - phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link, - 0, pdata->phy_mode); - of_node_put(np); if (!phy_dev) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 179a44dceb29..8a8d05500894 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -124,6 +124,12 @@ enum xgene_enet_rm { #define MAC_READ_REG_OFFSET 0x0c #define MAC_COMMAND_DONE_REG_OFFSET 0x10 +#define PCS_ADDR_REG_OFFSET 0x00 +#define PCS_COMMAND_REG_OFFSET 0x04 +#define PCS_WRITE_REG_OFFSET 0x08 +#define PCS_READ_REG_OFFSET 0x0c +#define PCS_COMMAND_DONE_REG_OFFSET 0x10 + #define MII_MGMT_CONFIG_ADDR 0x20 #define MII_MGMT_COMMAND_ADDR 0x24 #define MII_MGMT_ADDRESS_ADDR 0x28 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d1d6b5eeb613..b8b9495e6da6 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -19,6 +19,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/gpio.h> #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_sgmac.h" @@ -72,7 +73,6 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, skb = netdev_alloc_skb_ip_align(ndev, len); if (unlikely(!skb)) return -ENOMEM; - buf_pool->rx_skb[tail] = skb; dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma_addr)) { @@ -81,6 +81,8 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, return -EINVAL; } + buf_pool->rx_skb[tail] = skb; + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | SET_VAL(BUFDATALEN, bufdatalen) | SET_BIT(COHERENT)); @@ -102,12 +104,21 @@ static u8 xgene_enet_hdr_len(const void *data) static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) { + struct device *dev = ndev_to_dev(buf_pool->ndev); + struct xgene_enet_raw_desc16 *raw_desc; + dma_addr_t dma_addr; int i; /* Free up the buffers held by hardware */ for (i = 0; i < buf_pool->slots; i++) { - if (buf_pool->rx_skb[i]) + if (buf_pool->rx_skb[i]) { dev_kfree_skb_any(buf_pool->rx_skb[i]); + + raw_desc = &buf_pool->raw_desc16[i]; + dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)); + dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU, + DMA_FROM_DEVICE); + } } } @@ -452,7 +463,6 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, struct xgene_enet_raw_desc *raw_desc) { struct net_device *ndev; - struct xgene_enet_pdata *pdata; struct device *dev; struct xgene_enet_desc_ring *buf_pool; u32 datalen, skb_index; @@ -461,7 +471,6 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, int ret = 0; ndev = rx_ring->ndev; - pdata = netdev_priv(ndev); dev = ndev_to_dev(rx_ring->ndev); buf_pool = rx_ring->buf_pool; @@ -1312,6 +1321,18 @@ static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) return 0; } +static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) +{ + struct device *dev = &pdata->pdev->dev; + + if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) + return; + + pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN); + if (IS_ERR(pdata->sfp_rdy)) + pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN); +} + static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { struct platform_device *pdev; @@ -1401,6 +1422,8 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; + xgene_enet_gpiod_get(pdata); + pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { /* Firmware may have set up the clock already. */ @@ -1425,6 +1448,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } else { pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; + pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET; } pdata->rx_buff_cnt = NUM_PKT_BUF; @@ -1454,10 +1478,8 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) buf_pool = pdata->rx_ring[i]->buf_pool; xgene_enet_init_bufpool(buf_pool); ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); - if (ret) { - xgene_enet_delete_desc_rings(pdata); - return ret; - } + if (ret) + goto err; } dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); @@ -1474,7 +1496,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) ret = pdata->cle_ops->cle_init(pdata); if (ret) { netdev_err(ndev, "Preclass Tree init error\n"); - return ret; + goto err; } } else { pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); @@ -1484,6 +1506,10 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) pdata->mac_ops->init(pdata); return ret; + +err: + xgene_enet_delete_desc_rings(pdata); + return ret; } static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) @@ -1631,8 +1657,8 @@ static int xgene_enet_probe(struct platform_device *pdev) } #endif if (!pdata->enet_id) { - free_netdev(ndev); - return -ENODEV; + ret = -ENODEV; + goto err; } ret = xgene_enet_get_resources(pdata); @@ -1655,7 +1681,7 @@ static int xgene_enet_probe(struct platform_device *pdev) ret = xgene_enet_init_hw(pdata); if (ret) - goto err_netdev; + goto err; link_state = pdata->mac_ops->link_state; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { @@ -1665,21 +1691,32 @@ static int xgene_enet_probe(struct platform_device *pdev) ret = xgene_enet_mdio_config(pdata); else INIT_DELAYED_WORK(&pdata->link_work, link_state); + + if (ret) + goto err1; } - if (ret) - goto err; xgene_enet_napi_add(pdata); ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); - goto err; + goto err2; } return 0; -err_netdev: - unregister_netdev(ndev); +err2: + /* + * If necessary, free_netdev() will call netif_napi_del() and undo + * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). + */ + + if (pdata->mdio_driver) + xgene_enet_phy_disconnect(pdata); + else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + xgene_enet_mdio_remove(pdata); +err1: + xgene_enet_delete_desc_rings(pdata); err: free_netdev(ndev); return ret; @@ -1688,11 +1725,9 @@ err: static int xgene_enet_remove(struct platform_device *pdev) { struct xgene_enet_pdata *pdata; - const struct xgene_mac_ops *mac_ops; struct net_device *ndev; pdata = platform_get_drvdata(pdev); - mac_ops = pdata->mac_ops; ndev = pdata->ndev; rtnl_lock(); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 217546e5714a..b339fc1e8841 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -196,6 +196,7 @@ struct xgene_enet_pdata { void __iomem *mcx_mac_addr; void __iomem *mcx_mac_csr_addr; void __iomem *base_addr; + void __iomem *pcs_addr; void __iomem *ring_csr_addr; void __iomem *ring_cmd_addr; int phy_mode; @@ -216,6 +217,7 @@ struct xgene_enet_pdata { u8 tx_delay; u8 rx_delay; bool mdio_driver; + struct gpio_desc *sfp_rdy; }; struct xgene_indirect_ctl { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 9c6ad0dce00f..279ee27004f7 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -18,6 +18,8 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/of_gpio.h> +#include <linux/gpio.h> #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_xgmac.h" @@ -84,6 +86,21 @@ static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, wr_addr); } +static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata, + u32 wr_addr, u32 wr_data) +{ + void __iomem *addr, *wr, *cmd, *cmd_done; + + addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET; + wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET; + cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET; + cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) + netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n", + wr_addr); +} + static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { @@ -122,6 +139,7 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, return true; } + static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr, u32 *rd_data) { @@ -137,6 +155,25 @@ static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, rd_addr); } +static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata, + u32 rd_addr, u32 *rd_data) +{ + void __iomem *addr, *rd, *cmd, *cmd_done; + bool success; + + addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET; + rd = pdata->pcs_addr + PCS_READ_REG_OFFSET; + cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET; + cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET; + + success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data); + if (!success) + netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n", + rd_addr); + + return success; +} + static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; @@ -171,6 +208,17 @@ static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata) xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0); } +static void xgene_pcs_reset(struct xgene_enet_pdata *pdata) +{ + u32 data; + + if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data)) + return; + + xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST); + xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST); +} + static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) { u32 addr0, addr1; @@ -216,12 +264,12 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data); - xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); - xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data); data |= BIT(12); xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data); xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82); + xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); + xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); } static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata) @@ -359,14 +407,17 @@ static void xgene_enet_link_state(struct work_struct *work) { struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), struct xgene_enet_pdata, link_work); + struct gpio_desc *sfp_rdy = pdata->sfp_rdy; struct net_device *ndev = pdata->ndev; u32 link_status, poll_interval; link_status = xgene_enet_link_status(pdata); + if (link_status && !IS_ERR(sfp_rdy) && !gpiod_get_value(sfp_rdy)) + link_status = 0; + if (link_status) { if (!netif_carrier_ok(ndev)) { netif_carrier_on(ndev); - xgene_xgmac_init(pdata); xgene_xgmac_rx_enable(pdata); xgene_xgmac_tx_enable(pdata); netdev_info(ndev, "Link is Up - 10Gbps\n"); @@ -380,6 +431,8 @@ static void xgene_enet_link_state(struct work_struct *work) netdev_info(ndev, "Link is Down\n"); } poll_interval = PHY_POLL_LINK_OFF; + + xgene_pcs_reset(pdata); } schedule_delayed_work(&pdata->link_work, poll_interval); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index f1ea485f916b..360ccbd95566 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -24,6 +24,7 @@ #define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000 #define BLOCK_AXG_MAC_OFFSET 0x0800 #define BLOCK_AXG_MAC_CSR_OFFSET 0x2000 +#define BLOCK_PCS_OFFSET 0x3800 #define XGENET_CONFIG_REG_ADDR 0x20 #define XGENET_SRST_ADDR 0x00 @@ -72,6 +73,9 @@ #define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0 #define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8 +#define PCS_CONTROL_1 0x0000 +#define PCS_CTRL_PCS_RST BIT(15) + extern const struct xgene_mac_ops xgene_xgmac_ops; extern const struct xgene_port_ops xgene_xgport_ops; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 4eb17daefc4f..d29a4f3102d6 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -993,6 +993,18 @@ static void alx_reset(struct work_struct *work) rtnl_unlock(); } +static int alx_tpd_req(struct sk_buff *skb) +{ + int num; + + num = skb_shinfo(skb)->nr_frags + 1; + /* we need one extra descriptor for LSOv2 */ + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + num++; + + return num; +} + static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) { u8 cso, css; @@ -1012,6 +1024,45 @@ static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) return 0; } +static int alx_tso(struct sk_buff *skb, struct alx_txd *first) +{ + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + first->word1 |= 1 << TPD_IPV4_SHIFT; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + /* LSOv2: the first TPD only provides the packet length */ + first->adrl.l.pkt_len = skb->len; + first->word1 |= 1 << TPD_LSO_V2_SHIFT; + } + + first->word1 |= 1 << TPD_LSO_EN_SHIFT; + first->word1 |= (skb_transport_offset(skb) & + TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT; + first->word1 |= (skb_shinfo(skb)->gso_size & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + return 1; +} + static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) { struct alx_tx_queue *txq = &alx->txq; @@ -1022,6 +1073,16 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) first_tpd = &txq->tpd[txq->write_idx]; tpd = first_tpd; + if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) { + if (++txq->write_idx == alx->tx_ringsz) + txq->write_idx = 0; + + tpd = &txq->tpd[txq->write_idx]; + tpd->len = first_tpd->len; + tpd->vlan_tag = first_tpd->vlan_tag; + tpd->word1 = first_tpd->word1; + } + maplen = skb_headlen(skb); dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, DMA_TO_DEVICE); @@ -1082,9 +1143,9 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb, struct alx_priv *alx = netdev_priv(netdev); struct alx_tx_queue *txq = &alx->txq; struct alx_txd *first; - int tpdreq = skb_shinfo(skb)->nr_frags + 1; + int tso; - if (alx_tpd_avail(alx) < tpdreq) { + if (alx_tpd_avail(alx) < alx_tpd_req(skb)) { netif_stop_queue(alx->dev); goto drop; } @@ -1092,7 +1153,10 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb, first = &txq->tpd[txq->write_idx]; memset(first, 0, sizeof(*first)); - if (alx_tx_csum(skb, first)) + tso = alx_tso(skb, first); + if (tso < 0) + goto drop; + else if (!tso && alx_tx_csum(skb, first)) goto drop; if (alx_map_tx_skb(alx, skb) < 0) @@ -1351,7 +1415,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; if (alx_get_perm_macaddr(hw, hw->perm_addr)) { dev_warn(&pdev->dev, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index b2d30863caeb..2059911014db 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -58,8 +58,8 @@ BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ u32 mask) \ { \ - intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ priv->irq##which##_mask &= ~(mask); \ + intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ } \ static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ u32 mask) \ diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 625235db644f..c16ec3a51876 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -92,6 +92,7 @@ MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ static int bgmac_probe(struct bcma_device *core) { + struct bcma_chipinfo *ci = &core->bus->chipinfo; struct ssb_sprom *sprom = &core->bus->sprom; struct mii_bus *mii_bus; struct bgmac *bgmac; @@ -157,7 +158,8 @@ static int bgmac_probe(struct bcma_device *core) dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr, bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); - if (!bgmac_is_bcm4707_family(core)) { + if (!bgmac_is_bcm4707_family(core) && + !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); if (IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); @@ -230,6 +232,21 @@ static int bgmac_probe(struct bcma_device *core) bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; break; + case BCMA_CHIP_ID_BCM53573: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + if (ci->pkg == BCMA_PKG_ID_BCM47189) + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + if (core->core_unit == 0) { + bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE; + if (ci->pkg == BCMA_PKG_ID_BCM47189) + bgmac->feature_flags |= + BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII; + } else if (core->core_unit == 1) { + bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6; + bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII; + } + break; default: bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index c4751ece76f6..6ea0e5ff1e44 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -932,7 +932,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac) et_swtype <<= 4; sw_type = et_swtype; } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) { - sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; + sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII | + BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) { sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | BGMAC_CHIPCTL_1_SW_TYPE_RGMII; @@ -940,6 +941,27 @@ static void bgmac_chip_reset(struct bgmac *bgmac) bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | BGMAC_CHIPCTL_1_SW_TYPE_MASK), sw_type); + } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) { + u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII | + BGMAC_CHIPCTL_4_SW_TYPE_EPHY; + u8 et_swtype = 0; + char buf[4]; + + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { + if (kstrtou8(buf, 0, &et_swtype)) + dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n", + buf); + sw_type = (et_swtype & 0x0f) << 12; + } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) { + sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII | + BGMAC_CHIPCTL_4_SW_TYPE_RGMII; + } + bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK | + BGMAC_CHIPCTL_4_SW_TYPE_MASK), + sw_type); + } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) { + bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK, + BGMAC_CHIPCTL_7_IF_TYPE_RGMII); } if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) @@ -1467,6 +1489,10 @@ int bgmac_enet_probe(struct bgmac *info) */ bgmac_clk_enable(bgmac, 0); + /* This seems to be fixing IRQ by assigning OOB #6 to the core */ + if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) + bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); + bgmac_chip_reset(bgmac); err = bgmac_dma_alloc(bgmac); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 24a250267b88..80836b4c9f38 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -369,6 +369,21 @@ #define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0 #define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000 +#define BGMAC_CHIPCTL_4_IF_TYPE_MASK 0x00003000 +#define BGMAC_CHIPCTL_4_IF_TYPE_RMII 0x00000000 +#define BGMAC_CHIPCTL_4_IF_TYPE_MII 0x00001000 +#define BGMAC_CHIPCTL_4_IF_TYPE_RGMII 0x00002000 +#define BGMAC_CHIPCTL_4_SW_TYPE_MASK 0x0000C000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHY 0x00000000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYMII 0x00004000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYRMII 0x00008000 +#define BGMAC_CHIPCTL_4_SW_TYPE_RGMII 0x0000C000 + +#define BGMAC_CHIPCTL_7_IF_TYPE_MASK 0x000000C0 +#define BGMAC_CHIPCTL_7_IF_TYPE_RMII 0x00000000 +#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040 +#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080 + #define BGMAC_WEIGHT 64 #define ETHER_MAX_LEN 1518 @@ -390,6 +405,10 @@ #define BGMAC_FEAT_NO_CLR_MIB BIT(13) #define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14) #define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15) +#define BGMAC_FEAT_IRQ_ID_OOB_6 BIT(16) +#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17) +#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18) +#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19) struct bgmac_slot_info { union { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 97e892511666..de2d32690394 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12560,8 +12560,10 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC); struct netdev_hw_addr *ha; - if (!mc_mac) + if (!mc_mac) { + BNX2X_ERR("Failed to allocate mc MAC list\n"); return -ENOMEM; + } INIT_LIST_HEAD(&p->mcast_list); @@ -12632,7 +12634,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp) BNX2X_UC_LIST_MAC, &ramrod_flags); } -static int bnx2x_set_mc_list(struct bnx2x *bp) +static int bnx2x_set_mc_list_e1x(struct bnx2x *bp) { struct net_device *dev = bp->dev; struct bnx2x_mcast_ramrod_params rparam = {NULL}; @@ -12650,11 +12652,8 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) /* then, configure a new MACs list */ if (netdev_mc_count(dev)) { rc = bnx2x_init_mcast_macs_list(bp, &rparam); - if (rc) { - BNX2X_ERR("Failed to create multicast MACs list: %d\n", - rc); + if (rc) return rc; - } /* Now add the new MACs */ rc = bnx2x_config_mcast(bp, &rparam, @@ -12669,6 +12668,42 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) return rc; } +static int bnx2x_set_mc_list(struct bnx2x *bp) +{ + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + struct net_device *dev = bp->dev; + int rc = 0; + + /* On older adapters, we need to flush and re-add filters */ + if (CHIP_IS_E1x(bp)) + return bnx2x_set_mc_list_e1x(bp); + + rparam.mcast_obj = &bp->mcast_obj; + + if (netdev_mc_count(dev)) { + rc = bnx2x_init_mcast_macs_list(bp, &rparam); + if (rc) + return rc; + + /* Override the curently configured set of mc filters */ + rc = bnx2x_config_mcast(bp, &rparam, + BNX2X_MCAST_CMD_SET); + if (rc < 0) + BNX2X_ERR("Failed to set a new multicast configuration: %d\n", + rc); + + bnx2x_free_mcast_macs_list(&rparam); + } else { + /* If no mc addresses are required, flush the configuration */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc) + BNX2X_ERR("Failed to clear multicast configuration %d\n", + rc); + } + + return rc; +} + /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ static void bnx2x_set_rx_mode(struct net_device *dev) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index ff702a707a91..d468380c2a23 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2600,6 +2600,12 @@ struct bnx2x_mcast_mac_elem { u8 pad[2]; /* For a natural alignment of the following buffer */ }; +struct bnx2x_mcast_bin_elem { + struct list_head link; + int bin; + int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */ +}; + struct bnx2x_pending_mcast_cmd { struct list_head link; int type; /* BNX2X_MCAST_CMD_X */ @@ -2609,6 +2615,11 @@ struct bnx2x_pending_mcast_cmd { int next_bin; /* Needed for RESTORE flow with aprox match */ } data; + bool set_convert; /* in case type == BNX2X_MCAST_CMD_SET, this is set + * when macs_head had been converted to a list of + * bnx2x_mcast_bin_elem. + */ + bool done; /* set to true, when the command has been handled, * practically used in 57712 handling only, where one pending * command may be handled in a few operations. As long as for @@ -2636,15 +2647,30 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, struct bnx2x_pending_mcast_cmd *new_cmd; struct bnx2x_mcast_mac_elem *cur_mac = NULL; struct bnx2x_mcast_list_elem *pos; - int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? - p->mcast_list_len : 0); + int macs_list_len = 0, macs_list_len_size; + + /* When adding MACs we'll need to store their values */ + if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET) + macs_list_len = p->mcast_list_len; /* If the command is empty ("handle pending commands only"), break */ if (!p->mcast_list_len) return 0; - total_sz = sizeof(*new_cmd) + - macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); + /* For a set command, we need to allocate sufficient memory for all + * the bins, since we can't analyze at this point how much memory would + * be required. + */ + macs_list_len_size = macs_list_len * + sizeof(struct bnx2x_mcast_mac_elem); + if (cmd == BNX2X_MCAST_CMD_SET) { + int bin_size = BNX2X_MCAST_BINS_NUM * + sizeof(struct bnx2x_mcast_bin_elem); + + if (bin_size > macs_list_len_size) + macs_list_len_size = bin_size; + } + total_sz = sizeof(*new_cmd) + macs_list_len_size; /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ new_cmd = kzalloc(total_sz, GFP_ATOMIC); @@ -2662,6 +2688,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, switch (cmd) { case BNX2X_MCAST_CMD_ADD: + case BNX2X_MCAST_CMD_SET: cur_mac = (struct bnx2x_mcast_mac_elem *) ((u8 *)new_cmd + sizeof(*new_cmd)); @@ -2771,7 +2798,8 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); int bin; - if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) + if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE) || + (cmd == BNX2X_MCAST_CMD_SET_ADD)) rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; data->rules[idx].cmd_general_data |= rx_tx_add_flag; @@ -2797,6 +2825,16 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, bin = cfg_data->bin; break; + case BNX2X_MCAST_CMD_SET_ADD: + bin = cfg_data->bin; + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); + break; + + case BNX2X_MCAST_CMD_SET_DEL: + bin = cfg_data->bin; + BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin); + break; + default: BNX2X_ERR("Unknown command: %d\n", cmd); return; @@ -2932,6 +2970,102 @@ static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, cmd_pos->data.next_bin++; } +static void +bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_pending_mcast_cmd *cmd_pos) +{ + u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ]; + struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; + struct bnx2x_mcast_bin_elem *p_item; + int i, cnt = 0, mac_cnt = 0; + + memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ); + memcpy(cur, o->registry.aprox_match.vec, + sizeof(u64) * BNX2X_MCAST_VEC_SZ); + + /* Fill `current' with the required set of bins to configure */ + list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, + link) { + int bin = bnx2x_mcast_bin_from_mac(pmac_pos->mac); + + DP(BNX2X_MSG_SP, "Set contains %pM mcast MAC\n", + pmac_pos->mac); + + BIT_VEC64_SET_BIT(req, bin); + list_del(&pmac_pos->link); + mac_cnt++; + } + + /* We no longer have use for the MACs; Need to re-use memory for + * a list that will be used to configure bins. + */ + cmd_pos->set_convert = true; + p_item = (struct bnx2x_mcast_bin_elem *)(cmd_pos + 1); + INIT_LIST_HEAD(&cmd_pos->data.macs_head); + + for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) { + bool b_current = !!BIT_VEC64_TEST_BIT(cur, i); + bool b_required = !!BIT_VEC64_TEST_BIT(req, i); + + if (b_current == b_required) + continue; + + p_item->bin = i; + p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD + : BNX2X_MCAST_CMD_SET_DEL; + list_add_tail(&p_item->link , &cmd_pos->data.macs_head); + p_item++; + cnt++; + } + + /* We now definitely know how many commands are hiding here. + * Also need to correct the disruption we've added to guarantee this + * would be enqueued. + */ + o->total_pending_num -= (o->max_cmd_len + mac_cnt); + o->total_pending_num += cnt; + + DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n", o->total_pending_num); +} + +static void +bnx2x_mcast_hdl_pending_set_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_pending_mcast_cmd *cmd_pos, + int *cnt) +{ + union bnx2x_mcast_config_data cfg_data = {NULL}; + struct bnx2x_mcast_bin_elem *p_item, *p_item_n; + + /* This is actually a 2-part scheme - it starts by converting the MACs + * into a list of bins to be added/removed, and correcting the numbers + * on the object. this is now allowed, as we're now sure that all + * previous configured requests have already applied. + * The second part is actually adding rules for the newly introduced + * entries [like all the rest of the hdl_pending functions]. + */ + if (!cmd_pos->set_convert) + bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos); + + list_for_each_entry_safe(p_item, p_item_n, &cmd_pos->data.macs_head, + link) { + cfg_data.bin = (u8)p_item->bin; + o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type); + (*cnt)++; + + list_del(&p_item->link); + + /* Break if we reached the maximum number of rules. */ + if (*cnt >= o->max_cmd_len) + break; + } + + /* if no more MACs to configure - we are done */ + if (list_empty(&cmd_pos->data.macs_head)) + cmd_pos->done = true; +} + static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) { @@ -2955,6 +3089,10 @@ static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, &cnt); break; + case BNX2X_MCAST_CMD_SET: + bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, &cnt); + break; + default: BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); return -EINVAL; @@ -3095,6 +3233,19 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, o->set_registry_size(o, reg_sz + p->mcast_list_len); break; + case BNX2X_MCAST_CMD_SET: + /* We can only learn how many commands would actually be used + * when this is being configured. So for now, simply guarantee + * the command will be enqueued [to refrain from adding logic + * that handles this and THEN learns it needs several ramrods]. + * Just like for ADD/Cont, the mcast_list_len might be an over + * estimation; or even more so, since we don't take into + * account the possibility of removal of existing bins. + */ + o->set_registry_size(o, reg_sz + p->mcast_list_len); + o->total_pending_num += o->max_cmd_len; + break; + default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; @@ -3108,12 +3259,16 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, static void bnx2x_mcast_revert_e2(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int old_num_bins) + int old_num_bins, + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; o->set_registry_size(o, old_num_bins); o->total_pending_num -= p->mcast_list_len; + + if (cmd == BNX2X_MCAST_CMD_SET) + o->total_pending_num -= o->max_cmd_len; } /** @@ -3223,9 +3378,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, bnx2x_mcast_refresh_registry_e2(bp, o); /* If CLEAR_ONLY was requested - don't send a ramrod and clear - * RAMROD_PENDING status immediately. + * RAMROD_PENDING status immediately. due to the SET option, it's also + * possible that after evaluating the differences there's no need for + * a ramrod. In that case, we can skip it as well. */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) { raw->clear_pending(raw); return 0; } else { @@ -3253,6 +3410,11 @@ static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, enum bnx2x_mcast_cmd cmd) { + if (cmd == BNX2X_MCAST_CMD_SET) { + BNX2X_ERR("Can't use `set' command on e1h!\n"); + return -EINVAL; + } + /* Mark, that there is a work to do */ if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) p->mcast_list_len = 1; @@ -3262,7 +3424,8 @@ static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int old_num_bins) + int old_num_bins, + enum bnx2x_mcast_cmd cmd) { /* Do nothing */ } @@ -3372,6 +3535,11 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, struct bnx2x_mcast_obj *o = p->mcast_obj; int reg_sz = o->get_registry_size(o); + if (cmd == BNX2X_MCAST_CMD_SET) { + BNX2X_ERR("Can't use `set' command on e1!\n"); + return -EINVAL; + } + switch (cmd) { /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: @@ -3422,7 +3590,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, static void bnx2x_mcast_revert_e1(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int old_num_macs) + int old_num_macs, + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; @@ -3816,7 +3985,7 @@ error_exit2: r->clear_pending(r); error_exit1: - o->revert(bp, p, old_reg_size); + o->revert(bp, p, old_reg_size, cmd); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 4048fc594cce..0bf2fd470819 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -536,6 +536,15 @@ enum bnx2x_mcast_cmd { BNX2X_MCAST_CMD_CONT, BNX2X_MCAST_CMD_DEL, BNX2X_MCAST_CMD_RESTORE, + + /* Following this, multicast configuration should equal to approx + * the set of MACs provided [i.e., remove all else]. + * The two sub-commands are used internally to decide whether a given + * bin is to be added or removed + */ + BNX2X_MCAST_CMD_SET, + BNX2X_MCAST_CMD_SET_ADD, + BNX2X_MCAST_CMD_SET_DEL, }; struct bnx2x_mcast_obj { @@ -635,7 +644,8 @@ struct bnx2x_mcast_obj { */ void (*revert)(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int old_num_bins); + int old_num_bins, + enum bnx2x_mcast_cmd cmd); int (*get_registry_size)(struct bnx2x_mcast_obj *o); void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 632daff117d3..6c586b045d1d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -573,17 +573,6 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, } } - /* clear existing mcasts */ - mcast.mcast_list_len = vf->mcast_list_len; - vf->mcast_list_len = mc_num; - rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); - if (rc) { - BNX2X_ERR("Failed to remove multicasts\n"); - kfree(mc); - return rc; - } - - /* update mcast list on the ramrod params */ if (mc_num) { INIT_LIST_HEAD(&mcast.mcast_list); for (i = 0; i < mc_num; i++) { @@ -594,12 +583,18 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, /* add new mcasts */ mcast.mcast_list_len = mc_num; - rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET); if (rc) - BNX2X_ERR("Faled to add multicasts\n"); - kfree(mc); + BNX2X_ERR("Faled to set multicasts\n"); + } else { + /* clear existing mcasts */ + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); + if (rc) + BNX2X_ERR("Failed to remove multicasts\n"); } + kfree(mc); + return rc; } @@ -1583,7 +1578,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) * It needs to be initialized here so that it can be safely * handled by a subsequent FLR flow. */ - vf->mcast_list_len = 0; bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 0xFF, 0xFF, 0xFF, bnx2x_vf_sp(bp, vf, mcast_rdata), diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 670a581ffabc..7a6d406f4c11 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -195,7 +195,6 @@ struct bnx2x_virtf { int leading_rss; /* MCAST object */ - int mcast_list_len; struct bnx2x_mcast_obj mcast_obj; /* RSS configuration object */ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 771cc267f217..f9df4b5ae90e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -54,9 +54,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," * Global variables */ static u32 bnad_rxqs_per_cq = 2; -static u32 bna_id; -static struct mutex bnad_list_mutex; -static LIST_HEAD(bnad_list); +static atomic_t bna_id; static const u8 bnad_bcast_addr[] __aligned(2) = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; @@ -76,23 +74,6 @@ do { \ (_res_info)->res_u.mem_info.len = (_size); \ } while (0) -static void -bnad_add_to_list(struct bnad *bnad) -{ - mutex_lock(&bnad_list_mutex); - list_add_tail(&bnad->list_entry, &bnad_list); - bnad->id = bna_id++; - mutex_unlock(&bnad_list_mutex); -} - -static void -bnad_remove_from_list(struct bnad *bnad) -{ - mutex_lock(&bnad_list_mutex); - list_del(&bnad->list_entry); - mutex_unlock(&bnad_list_mutex); -} - /* * Reinitialize completions in CQ, once Rx is taken down */ @@ -3573,14 +3554,12 @@ bnad_lock_init(struct bnad *bnad) { spin_lock_init(&bnad->bna_lock); mutex_init(&bnad->conf_mutex); - mutex_init(&bnad_list_mutex); } static void bnad_lock_uninit(struct bnad *bnad) { mutex_destroy(&bnad->conf_mutex); - mutex_destroy(&bnad_list_mutex); } /* PCI Initialization */ @@ -3653,7 +3632,7 @@ bnad_pci_probe(struct pci_dev *pdev, } bnad = netdev_priv(netdev); bnad_lock_init(bnad); - bnad_add_to_list(bnad); + bnad->id = atomic_inc_return(&bna_id) - 1; mutex_lock(&bnad->conf_mutex); /* @@ -3807,7 +3786,6 @@ pci_uninit: bnad_pci_uninit(pdev); unlock_mutex: mutex_unlock(&bnad->conf_mutex); - bnad_remove_from_list(bnad); bnad_lock_uninit(bnad); free_netdev(netdev); return err; @@ -3845,7 +3823,6 @@ bnad_pci_remove(struct pci_dev *pdev) bnad_disable_msix(bnad); bnad_pci_uninit(pdev); mutex_unlock(&bnad->conf_mutex); - bnad_remove_from_list(bnad); bnad_lock_uninit(bnad); /* Remove the debugfs node for this bnad */ kfree(bnad->regdata); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index f4ed816b93ee..46f7b842b39c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -288,7 +288,6 @@ struct bnad_rx_unmap_q { struct bnad { struct net_device *netdev; u32 id; - struct list_head list_entry; /* Data path */ struct bnad_tx_info tx_info[BNAD_MAX_TX]; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 89c0cfa9719f..32568392b9f9 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -541,6 +541,14 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) } } +static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) +{ + desc->addr = (u32)addr; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + desc->addrh = (u32)(addr >> 32); +#endif +} + static void macb_tx_error_task(struct work_struct *work) { struct macb_queue *queue = container_of(work, struct macb_queue, @@ -621,14 +629,17 @@ static void macb_tx_error_task(struct work_struct *work) /* Set end of TX queue */ desc = macb_tx_desc(queue, 0); - desc->addr = 0; + macb_set_addr(desc, 0); desc->ctrl = MACB_BIT(TX_USED); /* Make descriptor updates visible to hardware */ wmb(); /* Reinitialize the TX desc queue */ - queue_writel(queue, TBQP, queue->tx_ring_dma); + queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); +#endif /* Make TX ring reflect state of hardware */ queue->tx_head = 0; queue->tx_tail = 0; @@ -750,7 +761,7 @@ static void gem_rx_refill(struct macb *bp) if (entry == RX_RING_SIZE - 1) paddr |= MACB_BIT(RX_WRAP); - bp->rx_ring[entry].addr = paddr; + macb_set_addr(&(bp->rx_ring[entry]), paddr); bp->rx_ring[entry].ctrl = 0; /* properly align Ethernet header */ @@ -798,7 +809,9 @@ static int gem_rx(struct macb *bp, int budget) int count = 0; while (count < budget) { - u32 addr, ctrl; + u32 ctrl; + dma_addr_t addr; + bool rxused; entry = macb_rx_ring_wrap(bp->rx_tail); desc = &bp->rx_ring[entry]; @@ -806,10 +819,14 @@ static int gem_rx(struct macb *bp, int budget) /* Make hw descriptor updates visible to CPU */ rmb(); - addr = desc->addr; + rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + addr |= ((u64)(desc->addrh) << 32); +#endif ctrl = desc->ctrl; - if (!(addr & MACB_BIT(RX_USED))) + if (!rxused) break; bp->rx_tail++; @@ -835,7 +852,6 @@ static int gem_rx(struct macb *bp, int budget) netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); skb_put(skb, len); - addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, DMA_FROM_DEVICE); @@ -1299,7 +1315,7 @@ static unsigned int macb_tx_map(struct macb *bp, ctrl |= MACB_BIT(TX_WRAP); /* Set TX buffer descriptor */ - desc->addr = tx_skb->mapping; + macb_set_addr(desc, tx_skb->mapping); /* desc->addr must be visible to hardware before clearing * 'TX_USED' bit in desc->ctrl. */ @@ -1422,6 +1438,9 @@ static void gem_free_rx_buffers(struct macb *bp) desc = &bp->rx_ring[i]; addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + addr |= ((u64)(desc->addrh) << 32); +#endif dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); @@ -1547,7 +1566,7 @@ static void gem_init_rings(struct macb *bp) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { for (i = 0; i < TX_RING_SIZE; i++) { - queue->tx_ring[i].addr = 0; + macb_set_addr(&(queue->tx_ring[i]), 0); queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); } queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); @@ -1694,6 +1713,10 @@ static void macb_configure_dma(struct macb *bp) dmacfg |= GEM_BIT(TXCOEN); else dmacfg &= ~GEM_BIT(TXCOEN); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + dmacfg |= GEM_BIT(ADDR64); +#endif netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", dmacfg); gem_writel(bp, DMACFG, dmacfg); @@ -1739,9 +1762,15 @@ static void macb_init_hw(struct macb *bp) macb_configure_dma(bp); /* Initialize TX and RX buffers */ - macb_writel(bp, RBQP, bp->rx_ring_dma); + macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); +#endif for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, TBQP, queue->tx_ring_dma); + queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); +#endif /* Enable interrupts */ queue_writel(queue, IER, @@ -2303,7 +2332,8 @@ static void macb_probe_queues(void __iomem *mem, } static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, - struct clk **hclk, struct clk **tx_clk) + struct clk **hclk, struct clk **tx_clk, + struct clk **rx_clk) { int err; @@ -2325,6 +2355,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, if (IS_ERR(*tx_clk)) *tx_clk = NULL; + *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); + if (IS_ERR(*rx_clk)) + *rx_clk = NULL; + err = clk_prepare_enable(*pclk); if (err) { dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); @@ -2343,8 +2377,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, goto err_disable_hclk; } + err = clk_prepare_enable(*rx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + goto err_disable_txclk; + } + return 0; +err_disable_txclk: + clk_disable_unprepare(*tx_clk); + err_disable_hclk: clk_disable_unprepare(*hclk); @@ -2379,6 +2422,9 @@ static int macb_init(struct platform_device *pdev) queue->IDR = GEM_IDR(hw_q - 1); queue->IMR = GEM_IMR(hw_q - 1); queue->TBQP = GEM_TBQP(hw_q - 1); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue->TBQPH = GEM_TBQPH(hw_q -1); +#endif } else { /* queue0 uses legacy registers */ queue->ISR = MACB_ISR; @@ -2386,6 +2432,9 @@ static int macb_init(struct platform_device *pdev) queue->IDR = MACB_IDR; queue->IMR = MACB_IMR; queue->TBQP = MACB_TBQP; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue->TBQPH = MACB_TBQPH; +#endif } /* get irq: here we use the linux queue index, not the hardware @@ -2728,12 +2777,14 @@ static const struct net_device_ops at91ether_netdev_ops = { }; static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, - struct clk **hclk, struct clk **tx_clk) + struct clk **hclk, struct clk **tx_clk, + struct clk **rx_clk) { int err; *hclk = NULL; *tx_clk = NULL; + *rx_clk = NULL; *pclk = devm_clk_get(&pdev->dev, "ether_clk"); if (IS_ERR(*pclk)) @@ -2857,13 +2908,13 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids); static int macb_probe(struct platform_device *pdev) { int (*clk_init)(struct platform_device *, struct clk **, - struct clk **, struct clk **) + struct clk **, struct clk **, struct clk **) = macb_clk_init; int (*init)(struct platform_device *) = macb_init; struct device_node *np = pdev->dev.of_node; struct device_node *phy_node; const struct macb_config *macb_config = NULL; - struct clk *pclk, *hclk = NULL, *tx_clk = NULL; + struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; bool native_io; @@ -2891,7 +2942,7 @@ static int macb_probe(struct platform_device *pdev) } } - err = clk_init(pdev, &pclk, &hclk, &tx_clk); + err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); if (err) return err; @@ -2927,6 +2978,7 @@ static int macb_probe(struct platform_device *pdev) bp->pclk = pclk; bp->hclk = hclk; bp->tx_clk = tx_clk; + bp->rx_clk = rx_clk; if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; @@ -2935,6 +2987,11 @@ static int macb_probe(struct platform_device *pdev) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) + dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); +#endif + spin_lock_init(&bp->lock); /* setup capabilities */ @@ -2945,7 +3002,7 @@ static int macb_probe(struct platform_device *pdev) dev->irq = platform_get_irq(pdev, 0); if (dev->irq < 0) { err = dev->irq; - goto err_disable_clocks; + goto err_out_free_netdev; } mac = of_get_mac_address(np); @@ -3020,6 +3077,7 @@ err_disable_clocks: clk_disable_unprepare(tx_clk); clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); + clk_disable_unprepare(rx_clk); return err; } @@ -3046,6 +3104,7 @@ static int macb_remove(struct platform_device *pdev) clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); + clk_disable_unprepare(bp->rx_clk); free_netdev(dev); } @@ -3069,6 +3128,7 @@ static int __maybe_unused macb_suspend(struct device *dev) clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); + clk_disable_unprepare(bp->rx_clk); } return 0; @@ -3088,6 +3148,7 @@ static int __maybe_unused macb_resume(struct device *dev) clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); clk_prepare_enable(bp->tx_clk); + clk_prepare_enable(bp->rx_clk); } netif_device_attach(netdev); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index b6fcf10621b6..8bed4b52fef5 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -66,6 +66,8 @@ #define MACB_USRIO 0x00c0 #define MACB_WOL 0x00c4 #define MACB_MID 0x00fc +#define MACB_TBQPH 0x04C8 +#define MACB_RBQPH 0x04D4 /* GEM register offsets. */ #define GEM_NCFGR 0x0004 /* Network Config */ @@ -139,6 +141,7 @@ #define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) #define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) +#define GEM_TBQPH(hw_q) (0x04C8) #define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) #define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) #define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) @@ -249,6 +252,8 @@ #define GEM_RXBS_SIZE 8 #define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */ #define GEM_DDRP_SIZE 1 +#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */ +#define GEM_ADDR64_SIZE 1 /* Bitfields in NSR */ @@ -474,6 +479,10 @@ struct macb_dma_desc { u32 addr; u32 ctrl; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + u32 addrh; + u32 resvd; +#endif }; /* DMA descriptor bitfields */ @@ -763,7 +772,8 @@ struct macb_config { u32 caps; unsigned int dma_burst_length; int (*clk_init)(struct platform_device *pdev, struct clk **pclk, - struct clk **hclk, struct clk **tx_clk); + struct clk **hclk, struct clk **tx_clk, + struct clk **rx_clk); int (*init)(struct platform_device *pdev); int jumbo_max_len; }; @@ -777,6 +787,7 @@ struct macb_queue { unsigned int IDR; unsigned int IMR; unsigned int TBQP; + unsigned int TBQPH; unsigned int tx_head, tx_tail; struct macb_dma_desc *tx_ring; @@ -809,6 +820,7 @@ struct macb { struct clk *pclk; struct clk *hclk; struct clk *tx_clk; + struct clk *rx_clk; struct net_device *dev; struct napi_struct napi; struct net_device_stats stats; diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 0ef232d3331e..e1b78b500309 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -36,10 +36,20 @@ config THUNDER_NIC_BGX depends on 64BIT select PHYLIB select MDIO_THUNDER + select THUNDER_NIC_RGX ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. +config THUNDER_NIC_RGX + tristate "Thunder MAC interface driver (RGX)" + depends on 64BIT + select PHYLIB + select MDIO_THUNDER + ---help--- + This driver supports configuring XCV block of RGX interface + present on CN81XX chip. + config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 20d6942edf40..f659a95ffc94 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3190,8 +3190,8 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, * OCTNET_CMD_RXCSUM_DISABLE * @returns SUCCESS or FAILURE */ -int liquidio_set_rxcsum_command(struct net_device *netdev, int command, - u8 rx_cmd) +static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, + u8 rx_cmd) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile index 5c4615ccaa14..6b4d4add7353 100644 --- a/drivers/net/ethernet/cavium/thunder/Makefile +++ b/drivers/net/ethernet/cavium/thunder/Makefile @@ -2,6 +2,7 @@ # Makefile for Cavium's Thunder ethernet device # +obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 83025bb4737c..dd63f961827a 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -20,6 +20,17 @@ #define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034 #define PCI_DEVICE_ID_THUNDER_BGX 0xA026 +/* Subsystem device IDs */ +#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E +#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E +#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E + +#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E +#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134 +#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234 +#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334 + + /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 0 #define PCI_MSIX_REG_BAR_NUM 4 @@ -41,40 +52,8 @@ /* Max pkinds */ #define NIC_MAX_PKIND 16 -/* Rx Channels */ -/* Receive channel configuration in TNS bypass mode - * Below is configuration in TNS bypass mode - * BGX0-LMAC0-CHAN0 - VNIC CHAN0 - * BGX0-LMAC1-CHAN0 - VNIC CHAN16 - * ... - * BGX1-LMAC0-CHAN0 - VNIC CHAN128 - * ... - * BGX1-LMAC3-CHAN0 - VNIC CHAN174 - */ -#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */ -#define NIC_CHANS_PER_INF 128 -#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF) -#define NIC_CPI_COUNT 2048 /* No of channel parse indices */ - -/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */ -#define NIC_MAX_BGX MAX_BGX_PER_CN88XX -#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX) -#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */ -#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX) - -/* Tx scheduling */ -#define NIC_MAX_TL4 1024 -#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */ -#define NIC_MAX_TL3 256 -#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */ -#define NIC_MAX_TL2 64 -#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */ -#define NIC_MAX_TL1 2 - -/* TNS bypass mode */ -#define NIC_TL2_PER_BGX 32 -#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX) -#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF) +/* Max when CPI_ALG is IP diffserv */ +#define NIC_MAX_CPI_PER_LMAC 64 /* NIC VF Interrupts */ #define NICVF_INTR_CQ 0 @@ -148,7 +127,6 @@ struct nicvf_cq_poll { struct napi_struct napi; }; -#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */ #define NIC_MAX_RSS_HASH_BITS 8 #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS) #define RSS_HASH_KEY_SIZE 5 /* 320 bit key */ @@ -273,6 +251,7 @@ struct nicvf { struct net_device *netdev; struct pci_dev *pdev; void __iomem *reg_base; +#define MAX_QUEUES_PER_QSET 8 struct queue_set *qs; struct nicvf_cq_poll *napi[8]; u8 vf_id; @@ -368,6 +347,7 @@ struct nicvf { #define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */ #define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */ #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ +#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */ #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ @@ -484,6 +464,31 @@ struct set_loopback { bool enable; }; +/* Reset statistics counters */ +struct reset_stat_cfg { + u8 msg; + /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */ + u16 rx_stat_mask; + /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */ + u8 tx_stat_mask; + /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1) + * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1) + * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1) + * .. + * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1) + * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1) + */ + u16 rq_stat_mask; + /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1) + * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1) + * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1) + * .. + * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1) + * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1) + */ + u16 sq_stat_mask; +}; + /* 128 bit shared memory between PF and each VF */ union nic_mbx { struct { u8 msg; } msg; @@ -501,6 +506,7 @@ union nic_mbx { struct sqs_alloc sqs_alloc; struct nicvf_ptr nicvf; struct set_loopback lbk; + struct reset_stat_cfg reset_stat; }; #define NIC_NODE_ID_MASK 0x03 @@ -514,7 +520,14 @@ static inline int nic_get_node_id(struct pci_dev *pdev) static inline bool pass1_silicon(struct pci_dev *pdev) { - return pdev->revision < 8; + return (pdev->revision < 8) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF); +} + +static inline bool pass2_silicon(struct pci_dev *pdev) +{ + return (pdev->revision >= 8) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF); } int nicvf_set_real_num_queues(struct net_device *netdev, diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 16ed20357c5c..25618d203931 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -20,8 +20,25 @@ #define DRV_NAME "thunder-nic" #define DRV_VERSION "1.0" +struct hw_info { + u8 bgx_cnt; + u8 chans_per_lmac; + u8 chans_per_bgx; /* Rx/Tx chans */ + u8 chans_per_rgx; + u8 chans_per_lbk; + u16 cpi_cnt; + u16 rssi_cnt; + u16 rss_ind_tbl_size; + u16 tl4_cnt; + u16 tl3_cnt; + u8 tl2_cnt; + u8 tl1_cnt; + bool tl1_per_bgx; /* TL1 per BGX or per LMAC */ +}; + struct nicpf { struct pci_dev *pdev; + struct hw_info *hw; u8 node; unsigned int flags; u8 num_vf_en; /* No of VF enabled */ @@ -36,22 +53,22 @@ struct nicpf { #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) - u8 vf_lmac_map[MAX_LMAC]; + u8 *vf_lmac_map; struct delayed_work dwork; struct workqueue_struct *check_link; - u8 link[MAX_LMAC]; - u8 duplex[MAX_LMAC]; - u32 speed[MAX_LMAC]; + u8 *link; + u8 *duplex; + u32 *speed; u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; - u16 rss_ind_tbl_size; bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; /* MSI-X */ bool msix_enabled; u8 num_vec; - struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; + struct msix_entry *msix_entries; bool irq_allocated[NIC_PF_MSIX_VECTORS]; + char irq_name[NIC_PF_MSIX_VECTORS][20]; }; /* Supported devices */ @@ -89,9 +106,22 @@ static u64 nic_reg_read(struct nicpf *nic, u64 offset) /* PF -> VF mailbox communication APIs */ static void nic_enable_mbx_intr(struct nicpf *nic) { - /* Enable mailbox interrupt for all 128 VFs */ - nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull); - nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull); + int vf_cnt = pci_sriov_get_totalvfs(nic->pdev); + +#define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull)) + + /* Clear it, to avoid spurious interrupts (if any) */ + nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt)); + + /* Enable mailbox interrupt for all VFs */ + nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt)); + /* One mailbox intr enable reg per 64 VFs */ + if (vf_cnt > 64) { + nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64), + INTR_MASK(vf_cnt - 64)); + nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), + INTR_MASK(vf_cnt - 64)); + } } static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) @@ -144,7 +174,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf) mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; - if (vf < MAX_LMAC) { + if (vf < nic->num_vf_en) { bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); @@ -155,7 +185,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf) mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; mbx.nic_cfg.node_id = nic->node; - mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; + mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en; nic_send_msg_to_vf(nic, vf, &mbx); } @@ -248,14 +278,22 @@ static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) /* Set minimum transmit packet size */ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) { - int lmac; + int lmac, max_lmac; + u16 sdevid; u64 lmac_cfg; /* Max value that can be set is 60 */ if (size > 60) size = 60; - for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { + pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); + /* 81xx's RGX has only one LMAC */ + if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF) + max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1; + else + max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX; + + for (lmac = 0; lmac < max_lmac; lmac++) { lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); lmac_cfg &= ~(0xF << 2); lmac_cfg |= ((size / 4) << 2); @@ -275,7 +313,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) nic->num_vf_en = 0; - for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { + for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) { if (!(bgx_map & (1 << bgx))) continue; lmac_cnt = bgx_get_lmac_count(nic->node, bgx); @@ -295,28 +333,125 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credit); + + /* On CN81XX there are only 8 VFs but max possible no of + * interfaces are 9. + */ + if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) { + nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev); + break; + } } } +static void nic_free_lmacmem(struct nicpf *nic) +{ + kfree(nic->vf_lmac_map); + kfree(nic->link); + kfree(nic->duplex); + kfree(nic->speed); +} + +static int nic_get_hw_info(struct nicpf *nic) +{ + u8 max_lmac; + u16 sdevid; + struct hw_info *hw = nic->hw; + + pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); + + switch (sdevid) { + case PCI_SUBSYS_DEVID_88XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_CN88XX; + hw->chans_per_lmac = 16; + hw->chans_per_bgx = 128; + hw->cpi_cnt = 2048; + hw->rssi_cnt = 4096; + hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; + hw->tl3_cnt = 256; + hw->tl2_cnt = 64; + hw->tl1_cnt = 2; + hw->tl1_per_bgx = true; + break; + case PCI_SUBSYS_DEVID_81XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_CN81XX; + hw->chans_per_lmac = 8; + hw->chans_per_bgx = 32; + hw->chans_per_rgx = 8; + hw->chans_per_lbk = 24; + hw->cpi_cnt = 512; + hw->rssi_cnt = 256; + hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */ + hw->tl3_cnt = 64; + hw->tl2_cnt = 16; + hw->tl1_cnt = 10; + hw->tl1_per_bgx = false; + break; + case PCI_SUBSYS_DEVID_83XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_CN83XX; + hw->chans_per_lmac = 8; + hw->chans_per_bgx = 32; + hw->chans_per_lbk = 64; + hw->cpi_cnt = 2048; + hw->rssi_cnt = 1024; + hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */ + hw->tl3_cnt = 256; + hw->tl2_cnt = 64; + hw->tl1_cnt = 18; + hw->tl1_per_bgx = false; + break; + } + hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev); + + /* Allocate memory for LMAC tracking elements */ + max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX; + nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->vf_lmac_map) + goto error; + nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->link) + goto error; + nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->duplex) + goto error; + nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL); + if (!nic->speed) + goto error; + return 0; + +error: + nic_free_lmacmem(nic); + return -ENOMEM; +} + #define BGX0_BLOCK 8 #define BGX1_BLOCK 9 -static void nic_init_hw(struct nicpf *nic) +static int nic_init_hw(struct nicpf *nic) { - int i; + int i, err; u64 cqm_cfg; + /* Get HW capability info */ + err = nic_get_hw_info(nic); + if (err) + return err; + /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); /* Enable backpressure */ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); - /* Disable TNS mode on both interfaces */ - nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, - (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); - nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), - (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); + /* TNS and TNS bypass modes are present only on 88xx */ + if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) { + /* Disable TNS mode on both interfaces */ + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, + (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), + (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); + } + nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, (1ULL << 63) | BGX0_BLOCK); nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), @@ -346,11 +481,14 @@ static void nic_init_hw(struct nicpf *nic) cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); + + return 0; } /* Channel parse index configuration */ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) { + struct hw_info *hw = nic->hw; u32 vnic, bgx, lmac, chan; u32 padd, cpi_count = 0; u64 cpi_base, cpi, rssi_base, rssi; @@ -360,9 +498,9 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); - chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); - cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); - rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); + chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); + cpi_base = vnic * NIC_MAX_CPI_PER_LMAC; + rssi_base = vnic * hw->rss_ind_tbl_size; /* Rx channel configuration */ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), @@ -434,7 +572,7 @@ static void nic_send_rss_size(struct nicpf *nic, int vf) msg = (u64 *)&mbx; mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; - mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; + mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size; nic_send_msg_to_vf(nic, vf, &mbx); } @@ -481,7 +619,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) /* 4 level transmit side scheduler configutation * for TNS bypass mode * - * Sample configuration for SQ0 + * Sample configuration for SQ0 on 88xx * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 @@ -494,6 +632,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, struct sq_cfg_msg *sq) { + struct hw_info *hw = nic->hw; u32 bgx, lmac, chan; u32 tl2, tl3, tl4; u32 rr_quantum; @@ -512,21 +651,28 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, /* 24 bytes for FCS, IPG and preamble */ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); - if (!sq->sqs_mode) { - tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); - } else { - for (svf = 0; svf < MAX_SQS_PER_VF; svf++) { - if (nic->vf_sqs[pqs_vnic][svf] == vnic) - break; + /* For 88xx 0-511 TL4 transmits via BGX0 and + * 512-1023 TL4s transmit via BGX1. + */ + if (hw->tl1_per_bgx) { + tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt); + if (!sq->sqs_mode) { + tl4 += (lmac * MAX_QUEUES_PER_QSET); + } else { + for (svf = 0; svf < MAX_SQS_PER_VF; svf++) { + if (nic->vf_sqs[pqs_vnic][svf] == vnic) + break; + } + tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET); + tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF); + tl4 += (svf * MAX_QUEUES_PER_QSET); } - tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC); - tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF); - tl4 += (svf * NIC_TL4_PER_LMAC); - tl4 += (bgx * NIC_TL4_PER_BGX); + } else { + tl4 = (vnic * MAX_QUEUES_PER_QSET); } tl4 += sq_idx; - tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); + tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt); nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | ((u64)vnic << NIC_QS_ID_SHIFT) | ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); @@ -534,8 +680,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); - chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); - nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); + + /* On 88xx 0-127 channels are for BGX0 and + * 127-255 channels for BGX1. + * + * On 81xx/83xx TL3_CHAN reg should be configured with channel + * within LMAC i.e 0-7 and not the actual channel number like on 88xx + */ + chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); + if (hw->tl1_per_bgx) + nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); + else + nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0); + /* Enable backpressure on the channel */ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); @@ -544,6 +701,16 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); /* No priorities as of now */ nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); + + /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1' + * on 81xx/83xx TL2 needs to be configured to transmit to one of the + * possible LMACs. + * + * This register doesn't exist on 88xx. + */ + if (!hw->tl1_per_bgx) + nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3), + lmac + (bgx * MAX_LMAC_PER_BGX)); } /* Send primary nicvf pointer to secondary QS's VF */ @@ -615,7 +782,7 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) { int bgx_idx, lmac_idx; - if (lbk->vf_id > MAX_LMAC) + if (lbk->vf_id >= nic->num_vf_en) return -1; bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); @@ -626,6 +793,67 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) return 0; } +/* Reset statistics counters */ +static int nic_reset_stat_counters(struct nicpf *nic, + int vf, struct reset_stat_cfg *cfg) +{ + int i, stat, qnum; + u64 reg_addr; + + for (i = 0; i < RX_STATS_ENUM_LAST; i++) { + if (cfg->rx_stat_mask & BIT(i)) { + reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 | + (vf << NIC_QS_ID_SHIFT) | + (i << 3); + nic_reg_write(nic, reg_addr, 0); + } + } + + for (i = 0; i < TX_STATS_ENUM_LAST; i++) { + if (cfg->tx_stat_mask & BIT(i)) { + reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 | + (vf << NIC_QS_ID_SHIFT) | + (i << 3); + nic_reg_write(nic, reg_addr, 0); + } + } + + for (i = 0; i <= 15; i++) { + qnum = i >> 1; + stat = i & 1 ? 1 : 0; + reg_addr = (vf << NIC_QS_ID_SHIFT) | + (qnum << NIC_Q_NUM_SHIFT) | (stat << 3); + if (cfg->rq_stat_mask & BIT(i)) { + reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1; + nic_reg_write(nic, reg_addr, 0); + } + if (cfg->sq_stat_mask & BIT(i)) { + reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1; + nic_reg_write(nic, reg_addr, 0); + } + } + return 0; +} + +static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf) +{ + u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT; + u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) | + (IPV4_PROT_DEF) << 16 | ET_PROT_DEF; + + /* Configure tunnel parsing parameters */ + nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF, + (1ULL << 63 | UDP_GENEVE_PORT_NUM)); + nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF, + ((7ULL << 61) | prot_def)); + nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF, + ((7ULL << 61) | prot_def)); + nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1, + ((1ULL << 63) | UDP_VXLAN_PORT_NUM)); + nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF, + ((0xfULL << 60) | vxlan_prot_def)); +} + static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) { int bgx, lmac; @@ -664,18 +892,17 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) mbx_addr += sizeof(u64); } - dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n", + dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n", __func__, mbx.msg.msg, vf); switch (mbx.msg.msg) { case NIC_MBOX_MSG_READY: nic_mbx_send_ready(nic, vf); - if (vf < MAX_LMAC) { + if (vf < nic->num_vf_en) { nic->link[vf] = 0; nic->duplex[vf] = 0; nic->speed[vf] = 0; } - ret = 1; - break; + goto unlock; case NIC_MBOX_MSG_QS_CFG: reg_addr = NIC_PF_QSET_0_127_CFG | (mbx.qs.num << NIC_QS_ID_SHIFT); @@ -693,6 +920,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); nic_reg_write(nic, reg_addr, mbx.rq.cfg); + /* Enable CQE_RX2_S extension in CQE_RX descriptor. + * This gets appended by default on 81xx/83xx chips, + * for consistency enabling the same on 88xx pass2 + * where this is introduced. + */ + if (pass2_silicon(nic->pdev)) + nic_reg_write(nic, NIC_PF_RX_CFG, 0x01); + if (!pass1_silicon(nic->pdev)) + nic_enable_tunnel_parsing(nic, vf); break; case NIC_MBOX_MSG_RQ_BP_CFG: reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | @@ -717,8 +953,10 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); break; case NIC_MBOX_MSG_SET_MAC: - if (vf >= nic->num_vf_en) + if (vf >= nic->num_vf_en) { + ret = -1; /* NACK */ break; + } lmac = mbx.mac.vf_id; bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); @@ -767,25 +1005,38 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_LOOPBACK: ret = nic_config_loopback(nic, &mbx.lbk); break; + case NIC_MBOX_MSG_RESET_STAT_COUNTER: + ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat); + break; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); break; } - if (!ret) + if (!ret) { nic_mbx_send_ack(nic, vf); - else if (mbx.msg.msg != NIC_MBOX_MSG_READY) + } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) { + dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n", + mbx.msg.msg, vf); nic_mbx_send_nack(nic, vf); + } unlock: nic->mbx_lock[vf] = false; } -static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) +static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) { + struct nicpf *nic = (struct nicpf *)nic_irq; + int mbx; u64 intr; u8 vf, vf_per_mbx_reg = 64; + if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector) + mbx = 0; + else + mbx = 1; + intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); for (vf = 0; vf < vf_per_mbx_reg; vf++) { @@ -797,23 +1048,6 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) nic_clear_mbx_intr(nic, vf, mbx); } } -} - -static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq) -{ - struct nicpf *nic = (struct nicpf *)nic_irq; - - nic_mbx_intr_handler(nic, 0); - - return IRQ_HANDLED; -} - -static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq) -{ - struct nicpf *nic = (struct nicpf *)nic_irq; - - nic_mbx_intr_handler(nic, 1); - return IRQ_HANDLED; } @@ -821,7 +1055,13 @@ static int nic_enable_msix(struct nicpf *nic) { int i, ret; - nic->num_vec = NIC_PF_MSIX_VECTORS; + nic->num_vec = pci_msix_vec_count(nic->pdev); + + nic->msix_entries = kmalloc_array(nic->num_vec, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!nic->msix_entries) + return -ENOMEM; for (i = 0; i < nic->num_vec; i++) nic->msix_entries[i].entry = i; @@ -829,8 +1069,9 @@ static int nic_enable_msix(struct nicpf *nic) ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); if (ret) { dev_err(&nic->pdev->dev, - "Request for #%d msix vectors failed\n", - nic->num_vec); + "Request for #%d msix vectors failed, returned %d\n", + nic->num_vec, ret); + kfree(nic->msix_entries); return ret; } @@ -842,6 +1083,7 @@ static void nic_disable_msix(struct nicpf *nic) { if (nic->msix_enabled) { pci_disable_msix(nic->pdev); + kfree(nic->msix_entries); nic->msix_enabled = 0; nic->num_vec = 0; } @@ -860,27 +1102,26 @@ static void nic_free_all_interrupts(struct nicpf *nic) static int nic_register_interrupts(struct nicpf *nic) { - int ret; + int i, ret; /* Enable MSI-X */ ret = nic_enable_msix(nic); if (ret) return ret; - /* Register mailbox interrupt handlers */ - ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector, - nic_mbx0_intr_handler, 0, "NIC Mbox0", nic); - if (ret) - goto fail; - - nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true; + /* Register mailbox interrupt handler */ + for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) { + sprintf(nic->irq_name[i], + "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0)); - ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector, - nic_mbx1_intr_handler, 0, "NIC Mbox1", nic); - if (ret) - goto fail; + ret = request_irq(nic->msix_entries[i].vector, + nic_mbx_intr_handler, 0, + nic->irq_name[i], nic); + if (ret) + goto fail; - nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true; + nic->irq_allocated[i] = true; + } /* Enable mailbox interrupt */ nic_enable_mbx_intr(nic); @@ -889,6 +1130,7 @@ static int nic_register_interrupts(struct nicpf *nic) fail: dev_err(&nic->pdev->dev, "Request irq failed\n"); nic_free_all_interrupts(nic); + nic_disable_msix(nic); return ret; } @@ -903,6 +1145,12 @@ static int nic_num_sqs_en(struct nicpf *nic, int vf_en) int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE; u16 total_vf; + /* Secondary Qsets are needed only if CPU count is + * morethan MAX_QUEUES_PER_QSET. + */ + if (num_online_cpus() <= MAX_QUEUES_PER_QSET) + return 0; + /* Check if its a multi-node environment */ if (nr_node_ids > 1) sqs_per_vf = MAX_SQS_PER_VF; @@ -1008,6 +1256,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!nic) return -ENOMEM; + nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL); + if (!nic->hw) { + devm_kfree(dev, nic); + return -ENOMEM; + } + pci_set_drvdata(pdev, nic); nic->pdev = pdev; @@ -1047,13 +1301,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->node = nic_get_node_id(pdev); - nic_set_lmac_vf_mapping(nic); - /* Initialize hardware */ - nic_init_hw(nic); + err = nic_init_hw(nic); + if (err) + goto err_release_regions; - /* Set RSS TBL size for each VF */ - nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; + nic_set_lmac_vf_mapping(nic); /* Register interrupts */ err = nic_register_interrupts(nic); @@ -1086,6 +1339,9 @@ err_unregister_interrupts: err_release_regions: pci_release_regions(pdev); err_disable_device: + nic_free_lmacmem(nic); + devm_kfree(dev, nic->hw); + devm_kfree(dev, nic); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; @@ -1106,6 +1362,11 @@ static void nic_remove(struct pci_dev *pdev) nic_unregister_interrupts(nic); pci_release_regions(pdev); + + nic_free_lmacmem(nic); + devm_kfree(&pdev->dev, nic->hw); + devm_kfree(&pdev->dev, nic); + pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index fab35a593898..edf779f5a227 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -36,6 +36,20 @@ #define NIC_PF_MAILBOX_ENA_W1C (0x0450) #define NIC_PF_MAILBOX_ENA_W1S (0x0470) #define NIC_PF_RX_ETYPE_0_7 (0x0500) +#define NIC_PF_RX_GENEVE_DEF (0x0580) +#define UDP_GENEVE_PORT_NUM 0x17C1ULL +#define NIC_PF_RX_GENEVE_PROT_DEF (0x0588) +#define IPV6_PROT 0x86DDULL +#define IPV4_PROT 0x800ULL +#define ET_PROT 0x6558ULL +#define NIC_PF_RX_NVGRE_PROT_DEF (0x0598) +#define NIC_PF_RX_VXLAN_DEF_0_1 (0x05A0) +#define UDP_VXLAN_PORT_NUM 0x12B5 +#define NIC_PF_RX_VXLAN_PROT_DEF (0x05B0) +#define IPV6_PROT_DEF 0x2ULL +#define IPV4_PROT_DEF 0x1ULL +#define ET_PROT_DEF 0x3ULL +#define NIC_PF_RX_CFG (0x05D0) #define NIC_PF_PKIND_0_15_CFG (0x0600) #define NIC_PF_ECC0_FLIP0 (0x1000) #define NIC_PF_ECC1_FLIP0 (0x1008) @@ -103,6 +117,7 @@ #define NIC_PF_SW_SYNC_RX_DONE (0x490008) #define NIC_PF_TL2_0_63_CFG (0x500000) #define NIC_PF_TL2_0_63_PRI (0x520000) +#define NIC_PF_TL2_LMAC (0x540000) #define NIC_PF_TL2_0_63_SH_STATUS (0x580000) #define NIC_PF_TL3A_0_63_CFG (0x5F0000) #define NIC_PF_TL3_0_255_CFG (0x600000) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a19e73f11d73..06c014edf762 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -29,10 +29,20 @@ static const struct pci_device_id nicvf_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_VF, - PCI_VENDOR_ID_CAVIUM, 0xA134) }, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_88XX_NIC_VF) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, - PCI_VENDOR_ID_CAVIUM, 0xA11E) }, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_THUNDER_NIC_VF, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_81XX_NIC_VF) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_THUNDER_NIC_VF, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_83XX_NIC_VF) }, { 0, } /* end of table */ }; @@ -134,15 +144,19 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) /* Wait for previous message to be acked, timeout 2sec */ while (!nic->pf_acked) { - if (nic->pf_nacked) + if (nic->pf_nacked) { + netdev_err(nic->netdev, + "PF NACK to mbox msg 0x%02x from VF%d\n", + (mbx->msg.msg & 0xFF), nic->vf_id); return -EINVAL; + } msleep(sleep); if (nic->pf_acked) break; timeout -= sleep; if (!timeout) { netdev_err(nic->netdev, - "PF didn't ack to mbox msg %d from VF%d\n", + "PF didn't ACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); return -EBUSY; } @@ -352,13 +366,7 @@ static int nicvf_rss_init(struct nicvf *nic) rss->enable = true; - /* Using the HW reset value for now */ - rss->key[0] = 0xFEED0BADFEED0BADULL; - rss->key[1] = 0xFEED0BADFEED0BADULL; - rss->key[2] = 0xFEED0BADFEED0BADULL; - rss->key[3] = 0xFEED0BADFEED0BADULL; - rss->key[4] = 0xFEED0BADFEED0BADULL; - + netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); nicvf_set_rss_key(nic); rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; @@ -507,7 +515,8 @@ static int nicvf_init_resources(struct nicvf *nic) static void nicvf_snd_pkt_handler(struct net_device *netdev, struct cmp_queue *cq, - struct cqe_send_t *cqe_tx, int cqe_type) + struct cqe_send_t *cqe_tx, + int cqe_type, int budget) { struct sk_buff *skb = NULL; struct nicvf *nic = netdev_priv(netdev); @@ -531,7 +540,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, if (skb) { nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); prefetch(skb); - dev_consume_skb_any(skb); + napi_consume_skb(skb, budget); sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; } else { /* In case of HW TSO, HW sends a CQE for each segment of a TSO @@ -686,7 +695,8 @@ loop: break; case CQE_TYPE_SEND: nicvf_snd_pkt_handler(netdev, cq, - (void *)cq_desc, CQE_TYPE_SEND); + (void *)cq_desc, CQE_TYPE_SEND, + budget); tx_done++; break; case CQE_TYPE_INVALID: @@ -928,16 +938,19 @@ static int nicvf_register_interrupts(struct nicvf *nic) int vector; for_each_cq_irq(irq) - sprintf(nic->irq_name[irq], "NICVF%d CQ%d", - nic->vf_id, irq); + sprintf(nic->irq_name[irq], "%s-rxtx-%d", + nic->pnicvf->netdev->name, + nicvf_netdev_qidx(nic, irq)); for_each_sq_irq(irq) - sprintf(nic->irq_name[irq], "NICVF%d SQ%d", - nic->vf_id, irq - NICVF_INTR_ID_SQ); + sprintf(nic->irq_name[irq], "%s-sq-%d", + nic->pnicvf->netdev->name, + nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ)); for_each_rbdr_irq(irq) - sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", - nic->vf_id, irq - NICVF_INTR_ID_RBDR); + sprintf(nic->irq_name[irq], "%s-rbdr-%d", + nic->pnicvf->netdev->name, + nic->sqs_mode ? (nic->sqs_id + 1) : 0); /* Register CQ interrupts */ for (irq = 0; irq < nic->qs->cq_cnt; irq++) { @@ -961,8 +974,9 @@ static int nicvf_register_interrupts(struct nicvf *nic) } /* Register QS error interrupt */ - sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], - "NICVF%d Qset error", nic->vf_id); + sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d", + nic->pnicvf->netdev->name, + nic->sqs_mode ? (nic->sqs_id + 1) : 0); irq = NICVF_INTR_ID_QS_ERR; ret = request_irq(nic->msix_entries[irq].vector, nicvf_qs_err_intr_handler, @@ -1191,7 +1205,7 @@ int nicvf_open(struct net_device *netdev) } /* Check if we got MAC address from PF or else generate a radom MAC */ - if (is_zero_ether_addr(netdev->dev_addr)) { + if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) { eth_hw_addr_random(netdev); nicvf_hw_set_mac_addr(nic, netdev); } @@ -1527,14 +1541,13 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_release_regions; } - qcount = MAX_CMP_QUEUES_PER_QS; + qcount = netif_get_num_default_rss_queues(); /* Restrict multiqset support only for host bound VFs */ if (pdev->is_virtfn) { /* Set max number of queues per VF */ - qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS); - qcount = min(qcount, - (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS); + qcount = min_t(int, num_online_cpus(), + (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS); } netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 0ff8e60deccb..7d90856c9783 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -479,6 +479,16 @@ void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); } +static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + /* Reset all RXQ's stats */ + mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; + mbx.reset_stat.rq_stat_mask = 0xFFFF; + nicvf_send_msg_to_pf(nic, &mbx); +} + /* Configures receive queue */ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) @@ -762,10 +772,10 @@ int nicvf_set_qset_resources(struct nicvf *nic) nic->qs = qs; /* Set count of each queue */ - qs->rbdr_cnt = RBDR_CNT; - qs->rq_cnt = RCV_QUEUE_CNT; - qs->sq_cnt = SND_QUEUE_CNT; - qs->cq_cnt = CMP_QUEUE_CNT; + qs->rbdr_cnt = DEFAULT_RBDR_CNT; + qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus()); + qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus()); + qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt); /* Set queue lengths */ qs->rbdr_len = RCV_BUF_COUNT; @@ -812,6 +822,11 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable) nicvf_free_resources(nic); } + /* Reset RXQ's stats. + * SQ's stats will get reset automatically once SQ is reset. + */ + nicvf_reset_rcv_queue_stats(nic); + return 0; } @@ -1184,13 +1199,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) int frag; int payload_len = 0; struct sk_buff *skb = NULL; - struct sk_buff *skb_frag = NULL; - struct sk_buff *prev_frag = NULL; + struct page *page; + int offset; u16 *rb_lens = NULL; u64 *rb_ptrs = NULL; rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); - rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); + /* Except 88xx pass1 on all other chips CQE_RX2_S is added to + * CQE_RX at word6, hence buffer pointers move by word + * + * Use existing 'hw_tso' flag which will be set for all chips + * except 88xx pass1 instead of a additional cache line + * access (or miss) by using pci dev's revision. + */ + if (!nic->hw_tso) + rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); + else + rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); @@ -1208,22 +1233,10 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) skb_put(skb, payload_len); } else { /* Add fragments */ - skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, - payload_len); - if (!skb_frag) { - dev_kfree_skb(skb); - return NULL; - } - - if (!skb_shinfo(skb)->frag_list) - skb_shinfo(skb)->frag_list = skb_frag; - else - prev_frag->next = skb_frag; - - prev_frag = skb_frag; - skb->len += payload_len; - skb->data_len += payload_len; - skb_frag->len = payload_len; + page = virt_to_page(phys_to_virt(*rb_ptrs)); + offset = phys_to_virt(*rb_ptrs) - page_address(page); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + offset, payload_len, RCV_FRAG_LEN); } /* Next buffer pointer */ rb_ptrs++; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 6673e1133523..869f3386028b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -57,10 +57,7 @@ #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ /* Default queue count per QS, its lengths and threshold values */ -#define RBDR_CNT 1 -#define RCV_QUEUE_CNT 8 -#define SND_QUEUE_CNT 8 -#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ +#define DEFAULT_RBDR_CNT 1 #define SND_QSIZE SND_QUEUE_SIZE2 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 63a39ac97d53..8bbaedbb7b94 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -28,6 +28,9 @@ struct lmac { struct bgx *bgx; int dmac; u8 mac[ETH_ALEN]; + u8 lmac_type; + u8 lane_to_sds; + bool use_training; bool link_up; int lmacid; /* ID within BGX */ int lmacid_bd; /* ID on board */ @@ -43,14 +46,13 @@ struct lmac { struct bgx { u8 bgx_id; - u8 qlm_mode; struct lmac lmac[MAX_LMAC_PER_BGX]; int lmac_count; - int lmac_type; - int lane_to_sds; - int use_training; + u8 max_lmac; void __iomem *reg_base; struct pci_dev *pdev; + bool is_dlm; + bool is_rgx; }; static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; @@ -61,6 +63,7 @@ static int bgx_xaui_check_link(struct lmac *lmac); /* Supported devices */ static const struct pci_device_id bgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) }, { 0, } /* end of table */ }; @@ -124,8 +127,8 @@ unsigned bgx_get_map(int node) int i; unsigned map = 0; - for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { - if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) + for (i = 0; i < MAX_BGX_PER_NODE; i++) { + if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i]) map |= (1 << i); } @@ -138,7 +141,7 @@ int bgx_get_lmac_count(int node, int bgx_idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (bgx) return bgx->lmac_count; @@ -153,7 +156,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) struct bgx *bgx; struct lmac *lmac; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return; @@ -166,7 +169,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state); const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (bgx) return bgx->lmac[lmacid].mac; @@ -177,7 +180,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac); void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return; @@ -188,11 +191,13 @@ EXPORT_SYMBOL(bgx_set_lmac_mac); void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + struct lmac *lmac; u64 cfg; if (!bgx) return; + lmac = &bgx->lmac[lmacid]; cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); if (enable) @@ -200,6 +205,9 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) else cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + if (bgx->is_rgx) + xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed); } EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); @@ -266,9 +274,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); - /* renable lmac */ + /* Re-enable lmac */ cmr_cfg |= CMR_EN; bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); + + if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) + xcv_setup_link(lmac->link_up, lmac->last_speed); } static void bgx_lmac_handler(struct net_device *netdev) @@ -314,7 +325,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return 0; @@ -328,7 +339,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return 0; @@ -356,7 +367,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx, struct lmac *lmac; u64 cfg; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return; @@ -379,8 +390,9 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx, } EXPORT_SYMBOL(bgx_lmac_internal_loopback); -static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) +static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) { + int lmacid = lmac->lmacid; u64 cfg; bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); @@ -409,18 +421,29 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); - if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, - PCS_MRX_STATUS_AN_CPT, false)) { - dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); - return -1; + if (lmac->lmac_type == BGX_MODE_QSGMII) { + /* Disable disparity check for QSGMII */ + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL); + cfg &= ~PCS_MISC_CTL_DISP_EN; + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg); + return 0; + } + + if (lmac->lmac_type == BGX_MODE_SGMII) { + if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, + PCS_MRX_STATUS_AN_CPT, false)) { + dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); + return -1; + } } return 0; } -static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) +static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac) { u64 cfg; + int lmacid = lmac->lmacid; /* Reset SPU */ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); @@ -436,12 +459,14 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); /* Set interleaved running disparity for RXAUI */ - if (bgx->lmac_type != BGX_MODE_RXAUI) - bgx_reg_modify(bgx, lmacid, - BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); - else + if (lmac->lmac_type == BGX_MODE_RXAUI) bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, - SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); + SPU_MISC_CTL_INTLV_RDISP); + + /* Clear receive packet disable */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); + cfg &= ~SPU_MISC_CTL_RX_DIS; + bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); /* clear all interrupts */ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); @@ -451,7 +476,7 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); - if (bgx->use_training) { + if (lmac->use_training) { bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); @@ -474,9 +499,9 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); - if (bgx->lmac_type == BGX_MODE_10G_KR) + if (lmac->lmac_type == BGX_MODE_10G_KR) cfg |= (1 << 23); - else if (bgx->lmac_type == BGX_MODE_40G_KR) + else if (lmac->lmac_type == BGX_MODE_40G_KR) cfg |= (1 << 24); else cfg &= ~((1 << 23) | (1 << 24)); @@ -511,11 +536,10 @@ static int bgx_xaui_check_link(struct lmac *lmac) { struct bgx *bgx = lmac->bgx; int lmacid = lmac->lmacid; - int lmac_type = bgx->lmac_type; + int lmac_type = lmac->lmac_type; u64 cfg; - bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); - if (bgx->use_training) { + if (lmac->use_training) { cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); if (!(cfg & (1ull << 13))) { cfg = (1ull << 13) | (1ull << 14); @@ -556,7 +580,7 @@ static int bgx_xaui_check_link(struct lmac *lmac) BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); - if (bgx->use_training) { + if (lmac->use_training) { cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); if (!(cfg & (1ull << 13))) { cfg = (1ull << 13) | (1ull << 14); @@ -584,11 +608,6 @@ static int bgx_xaui_check_link(struct lmac *lmac) return -1; } - /* Clear receive packet disable */ - cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); - cfg &= ~SPU_MISC_CTL_RX_DIS; - bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); - /* Check for MAC RX faults */ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ @@ -599,7 +618,7 @@ static int bgx_xaui_check_link(struct lmac *lmac) /* Rx local/remote fault seen. * Do lmac reinit to see if condition recovers */ - bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type); + bgx_lmac_xaui_init(bgx, lmac); return -1; } @@ -623,7 +642,7 @@ static void bgx_poll_for_link(struct work_struct *work) if ((spu_link & SPU_STATUS1_RCV_LNK) && !(smu_link & SMU_RX_CTL_STATUS)) { lmac->link_up = 1; - if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) + if (lmac->lmac_type == BGX_MODE_XLAUI) lmac->last_speed = 40000; else lmac->last_speed = 10000; @@ -649,6 +668,16 @@ static void bgx_poll_for_link(struct work_struct *work) queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); } +static int phy_interface_mode(u8 lmac_type) +{ + if (lmac_type == BGX_MODE_QSGMII) + return PHY_INTERFACE_MODE_QSGMII; + if (lmac_type == BGX_MODE_RGMII) + return PHY_INTERFACE_MODE_RGMII; + + return PHY_INTERFACE_MODE_SGMII; +} + static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) { struct lmac *lmac; @@ -657,13 +686,15 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) lmac = &bgx->lmac[lmacid]; lmac->bgx = bgx; - if (bgx->lmac_type == BGX_MODE_SGMII) { + if ((lmac->lmac_type == BGX_MODE_SGMII) || + (lmac->lmac_type == BGX_MODE_QSGMII) || + (lmac->lmac_type == BGX_MODE_RGMII)) { lmac->is_sgmii = 1; - if (bgx_lmac_sgmii_init(bgx, lmacid)) + if (bgx_lmac_sgmii_init(bgx, lmac)) return -1; } else { lmac->is_sgmii = 0; - if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) + if (bgx_lmac_xaui_init(bgx, lmac)) return -1; } @@ -685,10 +716,10 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) /* Restore default cfg, incase low level firmware changed it */ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); - if ((bgx->lmac_type != BGX_MODE_XFI) && - (bgx->lmac_type != BGX_MODE_XLAUI) && - (bgx->lmac_type != BGX_MODE_40G_KR) && - (bgx->lmac_type != BGX_MODE_10G_KR)) { + if ((lmac->lmac_type != BGX_MODE_XFI) && + (lmac->lmac_type != BGX_MODE_XLAUI) && + (lmac->lmac_type != BGX_MODE_40G_KR) && + (lmac->lmac_type != BGX_MODE_10G_KR)) { if (!lmac->phydev) return -ENODEV; @@ -696,7 +727,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) if (phy_connect_direct(&lmac->netdev, lmac->phydev, bgx_lmac_handler, - PHY_INTERFACE_MODE_SGMII)) + phy_interface_mode(lmac->lmac_type))) return -ENODEV; phy_start_aneg(lmac->phydev); @@ -753,76 +784,19 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) bgx_flush_dmac_addrs(bgx, lmacid); - if ((bgx->lmac_type != BGX_MODE_XFI) && - (bgx->lmac_type != BGX_MODE_XLAUI) && - (bgx->lmac_type != BGX_MODE_40G_KR) && - (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) + if ((lmac->lmac_type != BGX_MODE_XFI) && + (lmac->lmac_type != BGX_MODE_XLAUI) && + (lmac->lmac_type != BGX_MODE_40G_KR) && + (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) phy_disconnect(lmac->phydev); lmac->phydev = NULL; } -static void bgx_set_num_ports(struct bgx *bgx) -{ - u64 lmac_count; - - switch (bgx->qlm_mode) { - case QLM_MODE_SGMII: - bgx->lmac_count = 4; - bgx->lmac_type = BGX_MODE_SGMII; - bgx->lane_to_sds = 0; - break; - case QLM_MODE_XAUI_1X4: - bgx->lmac_count = 1; - bgx->lmac_type = BGX_MODE_XAUI; - bgx->lane_to_sds = 0xE4; - break; - case QLM_MODE_RXAUI_2X2: - bgx->lmac_count = 2; - bgx->lmac_type = BGX_MODE_RXAUI; - bgx->lane_to_sds = 0xE4; - break; - case QLM_MODE_XFI_4X1: - bgx->lmac_count = 4; - bgx->lmac_type = BGX_MODE_XFI; - bgx->lane_to_sds = 0; - break; - case QLM_MODE_XLAUI_1X4: - bgx->lmac_count = 1; - bgx->lmac_type = BGX_MODE_XLAUI; - bgx->lane_to_sds = 0xE4; - break; - case QLM_MODE_10G_KR_4X1: - bgx->lmac_count = 4; - bgx->lmac_type = BGX_MODE_10G_KR; - bgx->lane_to_sds = 0; - bgx->use_training = 1; - break; - case QLM_MODE_40G_KR4_1X4: - bgx->lmac_count = 1; - bgx->lmac_type = BGX_MODE_40G_KR; - bgx->lane_to_sds = 0xE4; - bgx->use_training = 1; - break; - default: - bgx->lmac_count = 0; - break; - } - - /* Check if low level firmware has programmed LMAC count - * based on board type, if yes consider that otherwise - * the default static values - */ - lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; - if (lmac_count != 4) - bgx->lmac_count = lmac_count; -} - static void bgx_init_hw(struct bgx *bgx) { int i; - - bgx_set_num_ports(bgx); + struct lmac *lmac; bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) @@ -830,17 +804,9 @@ static void bgx_init_hw(struct bgx *bgx) /* Set lmac type and lane2serdes mapping */ for (i = 0; i < bgx->lmac_count; i++) { - if (bgx->lmac_type == BGX_MODE_RXAUI) { - if (i) - bgx->lane_to_sds = 0x0e; - else - bgx->lane_to_sds = 0x04; - bgx_reg_write(bgx, i, BGX_CMRX_CFG, - (bgx->lmac_type << 8) | bgx->lane_to_sds); - continue; - } + lmac = &bgx->lmac[i]; bgx_reg_write(bgx, i, BGX_CMRX_CFG, - (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); + (lmac->lmac_type << 8) | lmac->lane_to_sds); bgx->lmac[i].lmacid_bd = lmac_count; lmac_count++; } @@ -863,55 +829,212 @@ static void bgx_init_hw(struct bgx *bgx) bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); } -static void bgx_get_qlm_mode(struct bgx *bgx) +static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) +{ + return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF); +} + +static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) { struct device *dev = &bgx->pdev->dev; - int lmac_type; - int train_en; + struct lmac *lmac; + char str[20]; + u8 dlm; - /* Read LMAC0 type to figure out QLM mode - * This is configured by low level firmware - */ - lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); - lmac_type = (lmac_type >> 8) & 0x07; + if (lmacid > bgx->max_lmac) + return; - train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & - SPU_PMD_CRTL_TRAIN_EN; + lmac = &bgx->lmac[lmacid]; + dlm = (lmacid / 2) + (bgx->bgx_id * 2); + if (!bgx->is_dlm) + sprintf(str, "BGX%d QLM mode", bgx->bgx_id); + else + sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); - switch (lmac_type) { + switch (lmac->lmac_type) { case BGX_MODE_SGMII: - bgx->qlm_mode = QLM_MODE_SGMII; - dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); + dev_info(dev, "%s: SGMII\n", (char *)str); break; case BGX_MODE_XAUI: - bgx->qlm_mode = QLM_MODE_XAUI_1X4; - dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); + dev_info(dev, "%s: XAUI\n", (char *)str); break; case BGX_MODE_RXAUI: - bgx->qlm_mode = QLM_MODE_RXAUI_2X2; - dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); + dev_info(dev, "%s: RXAUI\n", (char *)str); break; case BGX_MODE_XFI: - if (!train_en) { - bgx->qlm_mode = QLM_MODE_XFI_4X1; - dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); - } else { - bgx->qlm_mode = QLM_MODE_10G_KR_4X1; - dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); - } + if (!lmac->use_training) + dev_info(dev, "%s: XFI\n", (char *)str); + else + dev_info(dev, "%s: 10G_KR\n", (char *)str); break; case BGX_MODE_XLAUI: - if (!train_en) { - bgx->qlm_mode = QLM_MODE_XLAUI_1X4; - dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); - } else { - bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; - dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); - } + if (!lmac->use_training) + dev_info(dev, "%s: XLAUI\n", (char *)str); + else + dev_info(dev, "%s: 40G_KR4\n", (char *)str); + break; + case BGX_MODE_QSGMII: + if ((lmacid == 0) && + (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid)) + return; + if ((lmacid == 2) && + (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid)) + return; + dev_info(dev, "%s: QSGMII\n", (char *)str); + break; + case BGX_MODE_RGMII: + dev_info(dev, "%s: RGMII\n", (char *)str); + break; + case BGX_MODE_INVALID: + /* Nothing to do */ + break; + } +} + +static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac) +{ + switch (lmac->lmac_type) { + case BGX_MODE_SGMII: + case BGX_MODE_XFI: + lmac->lane_to_sds = lmac->lmacid; + break; + case BGX_MODE_XAUI: + case BGX_MODE_XLAUI: + case BGX_MODE_RGMII: + lmac->lane_to_sds = 0xE4; + break; + case BGX_MODE_RXAUI: + lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4; + break; + case BGX_MODE_QSGMII: + /* There is no way to determine if DLM0/2 is QSGMII or + * DLM1/3 is configured to QSGMII as bootloader will + * configure all LMACs, so take whatever is configured + * by low level firmware. + */ + lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac); break; default: - bgx->qlm_mode = QLM_MODE_SGMII; - dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); + lmac->lane_to_sds = 0; + break; + } +} + +static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) +{ + if ((lmac->lmac_type != BGX_MODE_10G_KR) && + (lmac->lmac_type != BGX_MODE_40G_KR)) { + lmac->use_training = 0; + return; + } + + lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) & + SPU_PMD_CRTL_TRAIN_EN; +} + +static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) +{ + struct lmac *lmac; + struct lmac *olmac; + u64 cmr_cfg; + u8 lmac_type; + u8 lane_to_sds; + + lmac = &bgx->lmac[idx]; + + if (!bgx->is_dlm || bgx->is_rgx) { + /* Read LMAC0 type to figure out QLM mode + * This is configured by low level firmware + */ + cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); + lmac->lmac_type = (cmr_cfg >> 8) & 0x07; + if (bgx->is_rgx) + lmac->lmac_type = BGX_MODE_RGMII; + lmac_set_training(bgx, lmac, 0); + lmac_set_lane2sds(bgx, lmac); + return; + } + + /* On 81xx BGX can be split across 2 DLMs + * firmware programs lmac_type of LMAC0 and LMAC2 + */ + if ((idx == 0) || (idx == 2)) { + cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); + lmac_type = (u8)((cmr_cfg >> 8) & 0x07); + lane_to_sds = (u8)(cmr_cfg & 0xFF); + /* Check if config is not reset value */ + if ((lmac_type == 0) && (lane_to_sds == 0xE4)) + lmac->lmac_type = BGX_MODE_INVALID; + else + lmac->lmac_type = lmac_type; + lmac_set_training(bgx, lmac, lmac->lmacid); + lmac_set_lane2sds(bgx, lmac); + + /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */ + olmac = &bgx->lmac[idx + 1]; + olmac->lmac_type = lmac->lmac_type; + lmac_set_training(bgx, olmac, olmac->lmacid); + lmac_set_lane2sds(bgx, olmac); + } +} + +static bool is_dlm0_in_bgx_mode(struct bgx *bgx) +{ + struct lmac *lmac; + + if (!bgx->is_dlm) + return true; + + lmac = &bgx->lmac[0]; + if (lmac->lmac_type == BGX_MODE_INVALID) + return false; + + return true; +} + +static void bgx_get_qlm_mode(struct bgx *bgx) +{ + struct lmac *lmac; + struct lmac *lmac01; + struct lmac *lmac23; + u8 idx; + + /* Init all LMAC's type to invalid */ + for (idx = 0; idx < bgx->max_lmac; idx++) { + lmac = &bgx->lmac[idx]; + lmac->lmacid = idx; + lmac->lmac_type = BGX_MODE_INVALID; + lmac->use_training = false; + } + + /* It is assumed that low level firmware sets this value */ + bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; + if (bgx->lmac_count > bgx->max_lmac) + bgx->lmac_count = bgx->max_lmac; + + for (idx = 0; idx < bgx->max_lmac; idx++) + bgx_set_lmac_config(bgx, idx); + + if (!bgx->is_dlm || bgx->is_rgx) { + bgx_print_qlm_mode(bgx, 0); + return; + } + + if (bgx->lmac_count) { + bgx_print_qlm_mode(bgx, 0); + bgx_print_qlm_mode(bgx, 2); + } + + /* If DLM0 is not in BGX mode then LMAC0/1 have + * to be configured with serdes lanes of DLM1 + */ + if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) + return; + for (idx = 0; idx < bgx->lmac_count; idx++) { + lmac01 = &bgx->lmac[idx]; + lmac23 = &bgx->lmac[idx + 2]; + lmac01->lmac_type = lmac23->lmac_type; + lmac01->lane_to_sds = lmac23->lane_to_sds; } } @@ -1042,7 +1165,7 @@ static int bgx_init_of_phy(struct bgx *bgx) } lmac++; - if (lmac == MAX_LMAC_PER_BGX) { + if (lmac == bgx->max_lmac) { of_node_put(node); break; } @@ -1087,6 +1210,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct device *dev = &pdev->dev; struct bgx *bgx = NULL; u8 lmac; + u16 sdevid; bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); if (!bgx) @@ -1115,10 +1239,30 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto err_release_regions; } - bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; - bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX; - bgx_vnic[bgx->bgx_id] = bgx; + pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); + if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { + bgx->bgx_id = + (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; + bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; + bgx->max_lmac = MAX_LMAC_PER_BGX; + bgx_vnic[bgx->bgx_id] = bgx; + } else { + bgx->is_rgx = true; + bgx->max_lmac = 1; + bgx->bgx_id = MAX_BGX_PER_CN81XX - 1; + bgx_vnic[bgx->bgx_id] = bgx; + xcv_init_hw(); + } + + /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one + * BGX i.e BGX2 can be split across 2 DLMs. + */ + pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); + if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) || + ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2))) + bgx->is_dlm = true; + bgx_get_qlm_mode(bgx); err = bgx_init_phy(bgx); @@ -1133,6 +1277,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { dev_err(dev, "BGX%d failed to enable lmac%d\n", bgx->bgx_id, lmac); + while (lmac) + bgx_lmac_disable(bgx, --lmac); goto err_enable; } } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 42010d2e5ddf..d59c71e4a000 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -9,8 +9,20 @@ #ifndef THUNDER_BGX_H #define THUNDER_BGX_H -#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */ +/* PCI device ID */ +#define PCI_DEVICE_ID_THUNDER_BGX 0xA026 +#define PCI_DEVICE_ID_THUNDER_RGX 0xA054 + +/* Subsystem device IDs */ +#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126 +#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226 +#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326 + +#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */ #define MAX_BGX_PER_CN88XX 2 +#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */ +#define MAX_BGX_PER_CN83XX 4 +#define MAX_BGX_PER_NODE 4 #define MAX_LMAC_PER_BGX 4 #define MAX_BGX_CHANS_PER_LMAC 16 #define MAX_DMAC_PER_LMAC 8 @@ -18,8 +30,6 @@ #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 -#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX) - /* Registers */ #define BGX_CMRX_CFG 0x00 #define CMR_PKT_TX_EN BIT_ULL(13) @@ -136,6 +146,7 @@ #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 #define BGX_GMP_PCS_SGM_AN_ADV 0x30068 #define BGX_GMP_PCS_MISCX_CTL 0x30078 +#define PCS_MISC_CTL_DISP_EN BIT_ULL(13) #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full #define BGX_GMP_GMI_PRTX_CFG 0x38020 @@ -194,6 +205,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); void bgx_lmac_internal_loopback(int node, int bgx_idx, int lmac_idx, bool enable); +void xcv_init_hw(void); +void xcv_setup_link(bool link_up, int link_speed); + u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); #define BGX_RX_STATS_COUNT 11 @@ -213,16 +227,9 @@ enum LMAC_TYPE { BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */ -}; - -enum qlm_mode { - QLM_MODE_SGMII, /* SGMII, each lane independent */ - QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */ - QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */ - QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */ - QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */ - QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */ - QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */ + BGX_MODE_RGMII = 5, + BGX_MODE_QSGMII = 6, + BGX_MODE_INVALID = 7, }; #endif /* THUNDER_BGX_H */ diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c new file mode 100644 index 000000000000..67befedef709 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2016 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include "nic.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-xcv" +#define DRV_VERSION "1.0" + +/* Register offsets */ +#define XCV_RESET 0x00 +#define PORT_EN BIT_ULL(63) +#define CLK_RESET BIT_ULL(15) +#define DLL_RESET BIT_ULL(11) +#define COMP_EN BIT_ULL(7) +#define TX_PKT_RESET BIT_ULL(3) +#define TX_DATA_RESET BIT_ULL(2) +#define RX_PKT_RESET BIT_ULL(1) +#define RX_DATA_RESET BIT_ULL(0) +#define XCV_DLL_CTL 0x10 +#define CLKRX_BYP BIT_ULL(23) +#define CLKTX_BYP BIT_ULL(15) +#define XCV_COMP_CTL 0x20 +#define DRV_BYP BIT_ULL(63) +#define XCV_CTL 0x30 +#define XCV_INT 0x40 +#define XCV_INT_W1S 0x48 +#define XCV_INT_ENA_W1C 0x50 +#define XCV_INT_ENA_W1S 0x58 +#define XCV_INBND_STATUS 0x80 +#define XCV_BATCH_CRD_RET 0x100 + +struct xcv { + void __iomem *reg_base; + struct pci_dev *pdev; +}; + +static struct xcv *xcv; + +/* Supported devices */ +static const struct pci_device_id xcv_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA056) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Cavium Inc"); +MODULE_DESCRIPTION("Cavium Thunder RGX/XCV Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, xcv_id_table); + +void xcv_init_hw(void) +{ + u64 cfg; + + /* Take DLL out of reset */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg &= ~DLL_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + + /* Take clock tree out of reset */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg &= ~CLK_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + /* Wait for DLL to lock */ + msleep(1); + + /* Configure DLL - enable or bypass + * TX no bypass, RX bypass + */ + cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL); + cfg &= ~0xFF03; + cfg |= CLKRX_BYP; + writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL); + + /* Enable compensation controller and force the + * write to be visible to HW by readig back. + */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= COMP_EN; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + readq_relaxed(xcv->reg_base + XCV_RESET); + /* Wait for compensation state machine to lock */ + msleep(10); + + /* enable the XCV block */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= PORT_EN; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= CLK_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); +} +EXPORT_SYMBOL(xcv_init_hw); + +void xcv_setup_link(bool link_up, int link_speed) +{ + u64 cfg; + int speed = 2; + + if (!xcv) { + dev_err(&xcv->pdev->dev, + "XCV init not done, probe may have failed\n"); + return; + } + + if (link_speed == 100) + speed = 1; + else if (link_speed == 10) + speed = 0; + + if (link_up) { + /* set operating speed */ + cfg = readq_relaxed(xcv->reg_base + XCV_CTL); + cfg &= ~0x03; + cfg |= speed; + writeq_relaxed(cfg, xcv->reg_base + XCV_CTL); + + /* Reset datapaths */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= TX_DATA_RESET | RX_DATA_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + + /* Enable the packet flow */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= TX_PKT_RESET | RX_PKT_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + + /* Return credits to RGX */ + writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET); + } else { + /* Disable packet flow */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg &= ~(TX_PKT_RESET | RX_PKT_RESET); + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + readq_relaxed(xcv->reg_base + XCV_RESET); + } +} +EXPORT_SYMBOL(xcv_setup_link); + +static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err; + struct device *dev = &pdev->dev; + + xcv = devm_kzalloc(dev, sizeof(struct xcv), GFP_KERNEL); + if (!xcv) + return -ENOMEM; + xcv->pdev = pdev; + + pci_set_drvdata(pdev, xcv); + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + goto err_kfree; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + /* MAP configuration registers */ + xcv->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!xcv->reg_base) { + dev_err(dev, "XCV: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + return 0; + +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_kfree: + devm_kfree(dev, xcv); + xcv = NULL; + return err; +} + +static void xcv_remove(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + + if (xcv) { + devm_kfree(dev, xcv); + xcv = NULL; + } + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver xcv_driver = { + .name = DRV_NAME, + .id_table = xcv_id_table, + .probe = xcv_probe, + .remove = xcv_remove, +}; + +static int __init xcv_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); + + return pci_register_driver(&xcv_driver); +} + +static void __exit xcv_cleanup_module(void) +{ + pci_unregister_driver(&xcv_driver); +} + +module_init(xcv_init_module); +module_exit(xcv_cleanup_module); diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index ace0ab98d0f1..246129650967 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 2e2aa9fec9bb..3f7b33aa5ec5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1,7 +1,7 @@ /* * This file is part of the Chelsio T4 Ethernet driver for Linux. * - * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -53,6 +53,8 @@ #include "cxgb4_uld.h" #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) +extern struct list_head adapter_list; +extern struct mutex uld_mutex; enum { MAX_NPORTS = 4, /* max # of ports */ @@ -338,12 +340,14 @@ struct adapter_params { enum chip_type chip; /* chip code */ struct arch_specific_params arch; /* chip specific params */ unsigned char offload; + unsigned char crypto; /* HW capability for crypto */ unsigned char bypass; unsigned int ofldq_wr_cred; bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ + unsigned int nsched_cls; /* number of traffic classes */ unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ unsigned int max_ird_adapter; /* Max read depth per adapter */ }; @@ -403,7 +407,6 @@ struct fw_info { struct fw_hdr fw_hdr; }; - struct trace_params { u32 data[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4]; @@ -493,6 +496,7 @@ struct port_info { #endif /* CONFIG_CHELSIO_T4_FCOE */ bool rxtstamp; /* Enable TS */ struct hwtstamp_config tstamp_config; + struct sched_table *sched_tbl; }; struct dentry; @@ -510,6 +514,10 @@ enum { /* adapter flags */ FW_OFLD_CONN = (1 << 9), }; +enum { + ULP_CRYPTO_LOOKASIDE = 1 << 0, +}; + struct rx_sw_desc; struct sge_fl { /* SGE free-buffer queue state */ @@ -680,6 +688,16 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */ u8 full; /* the Tx ring is full */ } ____cacheline_aligned_in_smp; +struct sge_uld_rxq_info { + char name[IFNAMSIZ]; /* name of ULD driver */ + struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */ + u16 *msix_tbl; /* msix_tbl for uld */ + u16 *rspq_id; /* response queue id's of rxq */ + u16 nrxq; /* # of ingress uld queues */ + u16 nciq; /* # of completion queues */ + u8 uld; /* uld type */ +}; + struct sge { struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; @@ -691,6 +709,7 @@ struct sge { struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + struct sge_uld_rxq_info **uld_rxq_info; struct sge_rspq intrq ____cacheline_aligned_in_smp; spinlock_t intrq_lock; @@ -702,6 +721,7 @@ struct sge { u16 niscsitq; /* # of available iSCST Rx queues */ u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */ + u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 iscsi_rxq[MAX_OFLD_QSETS]; u16 iscsit_rxq[MAX_ISCSIT_QUEUES]; u16 rdma_rxq[MAX_RDMA_QUEUES]; @@ -757,6 +777,17 @@ struct hash_mac_addr { u8 addr[ETH_ALEN]; }; +struct uld_msix_bmap { + unsigned long *msix_bmap; + unsigned int mapsize; + spinlock_t lock; /* lock for acquiring bitmap */ +}; + +struct uld_msix_info { + unsigned short vec; + char desc[IFNAMSIZ + 10]; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -767,6 +798,7 @@ struct adapter { unsigned int mbox; unsigned int pf; unsigned int flags; + unsigned int adap_idx; enum chip_type chip; int msg_enable; @@ -779,6 +811,9 @@ struct adapter { unsigned short vec; char desc[IFNAMSIZ + 10]; } msix_info[MAX_INGQ + 1]; + struct uld_msix_info *msix_info_ulds; /* msix info for uld's */ + struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */ + unsigned int msi_idx; struct doorbell_stats db_stats; struct sge sge; @@ -793,7 +828,9 @@ struct adapter { unsigned int clipt_start; unsigned int clipt_end; struct clip_tbl *clipt; + struct cxgb4_pci_uld_info *uld; void *uld_handle[CXGB4_ULD_MAX]; + unsigned int num_uld; struct list_head list_node; struct list_head rcu_node; struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ @@ -824,6 +861,55 @@ struct adapter { spinlock_t win0_lock ____cacheline_aligned_in_smp; }; +/* Support for "sched-class" command to allow a TX Scheduling Class to be + * programmed with various parameters. + */ +struct ch_sched_params { + s8 type; /* packet or flow */ + union { + struct { + s8 level; /* scheduler hierarchy level */ + s8 mode; /* per-class or per-flow */ + s8 rateunit; /* bit or packet rate */ + s8 ratemode; /* %port relative or kbps absolute */ + s8 channel; /* scheduler channel [0..N] */ + s8 class; /* scheduler class [0..N] */ + s32 minrate; /* minimum rate */ + s32 maxrate; /* maximum rate */ + s16 weight; /* percent weight */ + s16 pktsize; /* average packet size */ + } params; + } u; +}; + +enum { + SCHED_CLASS_TYPE_PACKET = 0, /* class type */ +}; + +enum { + SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */ +}; + +enum { + SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */ +}; + +enum { + SCHED_CLASS_RATEUNIT_BITS = 0, /* bit rate scheduling */ +}; + +enum { + SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */ +}; + +/* Support for "sched_queue" command to allow one or more NIC TX Queues + * to be bound to a TX Scheduling Class. + */ +struct ch_sched_queue { + s8 queue; /* queue index */ + s8 class; /* class index */ +}; + /* Defined bit width of user definable filter tuples */ #define ETHTYPE_BITWIDTH 16 @@ -952,6 +1038,11 @@ static inline int is_offload(const struct adapter *adap) return adap->params.offload; } +static inline int is_pci_uld(const struct adapter *adap) +{ + return adap->params.crypto; +} + static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) { return readl(adap->regs + reg_addr); @@ -1185,8 +1276,6 @@ int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); int cxgb_busy_poll(struct napi_struct *napi); -int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, - unsigned int cnt); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); extern int dbfifo_int_thresh; @@ -1289,6 +1378,18 @@ static inline int hash_mac_addr(const u8 *addr) return a & 0x3f; } +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt); +static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt, + unsigned int size, unsigned int iqe_size) +{ + q->adap = adap; + cxgb4_set_rspq_intr_params(q, us, cnt); + q->iqe_len = iqe_size; + q->size = size; +} + void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -1514,6 +1615,9 @@ void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, int filter_index, int *enabled); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); +int t4_sched_params(struct adapter *adapter, int type, int level, int mode, + int rateunit, int ratemode, int channel, int class, + int minrate, int maxrate, int weight, int pktsize); void t4_sge_decode_idma_state(struct adapter *adapter, int state); void t4_free_mem(void *addr); void t4_idma_monitor_init(struct adapter *adapter, @@ -1521,4 +1625,9 @@ void t4_idma_monitor_init(struct adapter *adapter, void t4_idma_monitor(struct adapter *adapter, struct sge_idma_monitor_state *idma, int hz, int ticks); +int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, + unsigned int naddr, u8 *addr); +void uld_mem_free(struct adapter *adap); +int uld_mem_alloc(struct adapter *adap); +void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c762a8c8c954..44019bdd526d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1,7 +1,7 @@ /* * This file is part of the Chelsio T4 Ethernet driver for Linux. * - * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -76,6 +76,7 @@ #include "cxgb4_debugfs.h" #include "clip_tbl.h" #include "l2t.h" +#include "sched.h" char cxgb4_driver_name[] = KBUILD_MODNAME; @@ -223,8 +224,8 @@ MODULE_PARM_DESC(select_queue, static struct dentry *cxgb4_debugfs_root; -static LIST_HEAD(adapter_list); -static DEFINE_MUTEX(uld_mutex); +LIST_HEAD(adapter_list); +DEFINE_MUTEX(uld_mutex); /* Adapter list to be accessed from atomic context */ static LIST_HEAD(adap_rcu_list); static DEFINE_SPINLOCK(adap_rcu_lock); @@ -1066,20 +1067,20 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, */ static int setup_sge_queues(struct adapter *adap) { - int err, msi_idx, i, j; + int err, i, j; struct sge *s = &adap->sge; bitmap_zero(s->starving_fl, s->egr_sz); bitmap_zero(s->txq_maperr, s->egr_sz); if (adap->flags & USING_MSIX) - msi_idx = 1; /* vector 0 is for non-queue interrupts */ + adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ else { err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, NULL, NULL, NULL, -1); if (err) return err; - msi_idx = -((int)s->intrq.abs_id + 1); + adap->msi_idx = -((int)s->intrq.abs_id + 1); } /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, @@ -1096,7 +1097,7 @@ static int setup_sge_queues(struct adapter *adap) * new/deleted queues. */ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], - msi_idx, NULL, fwevtq_handler, NULL, -1); + adap->msi_idx, NULL, fwevtq_handler, NULL, -1); if (err) { freeout: t4_free_sge_resources(adap); return err; @@ -1109,10 +1110,10 @@ freeout: t4_free_sge_resources(adap); struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; for (j = 0; j < pi->nqsets; j++, q++) { - if (msi_idx > 0) - msi_idx++; + if (adap->msi_idx > 0) + adap->msi_idx++; err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, - msi_idx, &q->fl, + adap->msi_idx, &q->fl, t4_ethrx_handler, NULL, t4_get_mps_bg_map(adap, @@ -1141,11 +1142,11 @@ freeout: t4_free_sge_resources(adap); } #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \ - err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \ + err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, adap->msi_idx, ids, lro); \ if (err) \ goto freeout; \ - if (msi_idx > 0) \ - msi_idx += nq; \ + if (adap->msi_idx > 0) \ + adap->msi_idx += nq; \ } while (0) ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false); @@ -2565,6 +2566,12 @@ static void detach_ulds(struct adapter *adap) CXGB4_STATE_DETACH); adap->uld_handle[i] = NULL; } + for (i = 0; i < CXGB4_PCI_ULD_MAX; i++) + if (adap->uld && adap->uld[i].handle) { + adap->uld[i].state_change(adap->uld[i].handle, + CXGB4_STATE_DETACH); + adap->uld[i].handle = NULL; + } if (netevent_registered && list_empty(&adapter_list)) { unregister_netevent_notifier(&cxgb4_netevent_nb); netevent_registered = false; @@ -2584,6 +2591,10 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) for (i = 0; i < CXGB4_ULD_MAX; i++) if (adap->uld_handle[i]) ulds[i].state_change(adap->uld_handle[i], new_state); + for (i = 0; i < CXGB4_PCI_ULD_MAX; i++) + if (adap->uld && adap->uld[i].handle) + adap->uld[i].state_change(adap->uld[i].handle, + new_state); mutex_unlock(&uld_mutex); } @@ -2922,7 +2933,6 @@ EXPORT_SYMBOL(cxgb4_create_server_filter); int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, unsigned int queue, bool ipv6) { - int ret; struct filter_entry *f; struct adapter *adap; @@ -2936,11 +2946,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, /* Unlock the filter */ f->locked = 0; - ret = delete_filter(adap, stid); - if (ret) - return ret; - - return 0; + return delete_filter(adap, stid); } EXPORT_SYMBOL(cxgb4_remove_server_filter); @@ -3078,6 +3084,35 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) return ret; } +#ifdef CONFIG_PCI_IOV +static int dummy_open(struct net_device *dev) +{ + /* Turn carrier off since we don't have to transmit anything on this + * interface. + */ + netif_carrier_off(dev); + return 0; +} + +static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + /* verify MAC addr is valid */ + if (!is_valid_ether_addr(mac)) { + dev_err(pi->adapter->pdev_dev, + "Invalid Ethernet address %pM for VF %d\n", + mac, vf); + return -EINVAL; + } + + dev_info(pi->adapter->pdev_dev, + "Setting MAC %pM on VF %d\n", mac, vf); + return t4_set_vf_mac_acl(adap, vf + 1, 1, mac); +} +#endif + static int cxgb_set_mac_addr(struct net_device *dev, void *p) { int ret; @@ -3114,6 +3149,87 @@ static void cxgb_netpoll(struct net_device *dev) } #endif +static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sched_class *e; + struct ch_sched_params p; + struct ch_sched_queue qe; + u32 req_rate; + int err = 0; + + if (!can_sched(dev)) + return -ENOTSUPP; + + if (index < 0 || index > pi->nqsets - 1) + return -EINVAL; + + if (!(adap->flags & FULL_INIT_DONE)) { + dev_err(adap->pdev_dev, + "Failed to rate limit on queue %d. Link Down?\n", + index); + return -EINVAL; + } + + /* Convert from Mbps to Kbps */ + req_rate = rate << 10; + + /* Max rate is 10 Gbps */ + if (req_rate >= SCHED_MAX_RATE_KBPS) { + dev_err(adap->pdev_dev, + "Invalid rate %u Mbps, Max rate is %u Gbps\n", + rate, SCHED_MAX_RATE_KBPS); + return -ERANGE; + } + + /* First unbind the queue from any existing class */ + memset(&qe, 0, sizeof(qe)); + qe.queue = index; + qe.class = SCHED_CLS_NONE; + + err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE); + if (err) { + dev_err(adap->pdev_dev, + "Unbinding Queue %d on port %d fail. Err: %d\n", + index, pi->port_id, err); + return err; + } + + /* Queue already unbound */ + if (!req_rate) + return 0; + + /* Fetch any available unused or matching scheduling class */ + memset(&p, 0, sizeof(p)); + p.type = SCHED_CLASS_TYPE_PACKET; + p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; + p.u.params.mode = SCHED_CLASS_MODE_CLASS; + p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS; + p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS; + p.u.params.channel = pi->tx_chan; + p.u.params.class = SCHED_CLS_NONE; + p.u.params.minrate = 0; + p.u.params.maxrate = req_rate; + p.u.params.weight = 0; + p.u.params.pktsize = dev->mtu; + + e = cxgb4_sched_class_alloc(dev, &p); + if (!e) + return -ENOMEM; + + /* Bind the queue to a scheduling class */ + memset(&qe, 0, sizeof(qe)); + qe.queue = index; + qe.class = e->idx; + + err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE); + if (err) + dev_err(adap->pdev_dev, + "Queue rate limiting failed. Err: %d\n", err); + return err; +} + static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, @@ -3136,7 +3252,29 @@ static const struct net_device_ops cxgb4_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = cxgb_busy_poll, #endif + .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, +}; + +#ifdef CONFIG_PCI_IOV +static const struct net_device_ops cxgb4_mgmt_netdev_ops = { + .ndo_open = dummy_open, + .ndo_set_vf_mac = cxgb_set_vf_mac, +}; +#endif + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = netdev2adap(dev); + strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); + strlcpy(info->version, cxgb4_driver_version, + sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { + .get_drvinfo = get_drvinfo, }; void t4_fatal_err(struct adapter *adap) @@ -3979,6 +4117,12 @@ static int adap_init0(struct adapter *adap) adap->clipt_start = val[0]; adap->clipt_end = val[1]; + /* We don't yet have a PARAMs calls to retrieve the number of Traffic + * Classes supported by the hardware/firmware so we hard code it here + * for now. + */ + adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; + /* query params related to active filter region */ params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); @@ -4130,6 +4274,11 @@ static int adap_init0(struct adapter *adap) adap->vres.iscsi.start = val[0]; adap->vres.iscsi.size = val[1] - val[0] + 1; } + if (caps_cmd.cryptocaps) { + /* Should query params here...TODO */ + adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; + adap->num_uld += 1; + } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV @@ -4311,16 +4460,6 @@ static inline bool is_x_10g_port(const struct link_config *lc) (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; } -static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, - unsigned int us, unsigned int cnt, - unsigned int size, unsigned int iqe_size) -{ - q->adap = adap; - cxgb4_set_rspq_intr_params(q, us, cnt); - q->iqe_len = iqe_size; - q->size = size; -} - /* * Perform default configuration of DMA queues depending on the number and type * of ports we found and the number of available CPUs. Most settings can be @@ -4337,8 +4476,12 @@ static void cfg_queues(struct adapter *adap) /* Reduce memory usage in kdump environment, disable all offload. */ - if (is_kdump_kernel()) + if (is_kdump_kernel()) { adap->params.offload = 0; + adap->params.crypto = 0; + } else if (adap->num_uld && uld_mem_alloc(adap)) { + adap->params.crypto = 0; + } for_each_port(adap, i) n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); @@ -4498,23 +4641,58 @@ static void reduce_ethqs(struct adapter *adap, int n) } } +static int get_msix_info(struct adapter *adap) +{ + struct uld_msix_info *msix_info; + int max_ingq = (MAX_OFLD_QSETS * adap->num_uld); + + msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL); + if (!msix_info) + return -ENOMEM; + + adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), + sizeof(long), GFP_KERNEL); + if (!adap->msix_bmap_ulds.msix_bmap) { + kfree(msix_info); + return -ENOMEM; + } + spin_lock_init(&adap->msix_bmap_ulds.lock); + adap->msix_info_ulds = msix_info; + return 0; +} + +static void free_msix_info(struct adapter *adap) +{ + if (!adap->num_uld) + return; + + kfree(adap->msix_info_ulds); + kfree(adap->msix_bmap_ulds.msix_bmap); +} + /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ #define EXTRA_VECS 2 static int enable_msix(struct adapter *adap) { - int ofld_need = 0; - int i, want, need, allocated; + int ofld_need = 0, uld_need = 0; + int i, j, want, need, allocated; struct sge *s = &adap->sge; unsigned int nchan = adap->params.nports; struct msix_entry *entries; + int max_ingq = MAX_INGQ; - entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1), + max_ingq += (MAX_OFLD_QSETS * adap->num_uld); + entries = kmalloc(sizeof(*entries) * (max_ingq + 1), GFP_KERNEL); if (!entries) return -ENOMEM; - for (i = 0; i < MAX_INGQ + 1; ++i) + /* map for msix */ + if (is_pci_uld(adap) && get_msix_info(adap)) + adap->params.crypto = 0; + + for (i = 0; i < max_ingq + 1; ++i) entries[i].entry = i; want = s->max_ethqsets + EXTRA_VECS; @@ -4527,13 +4705,17 @@ static int enable_msix(struct adapter *adap) else ofld_need = 4 * nchan; } + if (is_pci_uld(adap)) { + want += netif_get_num_default_rss_queues() * nchan; + uld_need = nchan; + } #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for * each port. */ - need = 8 * adap->params.nports + EXTRA_VECS + ofld_need; + need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; #else - need = adap->params.nports + EXTRA_VECS + ofld_need; + need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; #endif allocated = pci_enable_msix_range(adap->pdev, entries, need, want); if (allocated < 0) { @@ -4547,12 +4729,20 @@ static int enable_msix(struct adapter *adap) * Every group gets its minimum requirement and NIC gets top * priority for leftovers. */ - i = allocated - EXTRA_VECS - ofld_need; + i = allocated - EXTRA_VECS - ofld_need - uld_need; if (i < s->max_ethqsets) { s->max_ethqsets = i; if (i < s->ethqsets) reduce_ethqs(adap, i); } + if (is_pci_uld(adap)) { + if (allocated < want) + s->nqs_per_uld = nchan; + else + s->nqs_per_uld = netif_get_num_default_rss_queues() * + nchan; + } + if (is_offload(adap)) { if (allocated < want) { s->rdmaqs = nchan; @@ -4564,16 +4754,24 @@ static int enable_msix(struct adapter *adap) /* leftovers go to OFLD */ i = allocated - EXTRA_VECS - s->max_ethqsets - - s->rdmaqs - s->rdmaciqs - s->niscsitq; + s->rdmaqs - s->rdmaciqs - s->niscsitq; + if (is_pci_uld(adap)) + i -= s->nqs_per_uld * adap->num_uld; s->iscsiqsets = (i / nchan) * nchan; /* round down */ } - for (i = 0; i < allocated; ++i) + + for (i = 0; i < (allocated - (s->nqs_per_uld * adap->num_uld)); ++i) adap->msix_info[i].vec = entries[i].vector; + if (is_pci_uld(adap)) { + for (j = 0 ; i < allocated; ++i, j++) + adap->msix_info_ulds[j].vec = entries[i].vector; + adap->msix_bmap_ulds.mapsize = j; + } dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " - "nic %d iscsi %d rdma cpl %d rdma ciq %d\n", + "nic %d iscsi %d rdma cpl %d rdma ciq %d uld %d\n", allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs, - s->rdmaciqs); + s->rdmaciqs, s->nqs_per_uld); kfree(entries); return 0; @@ -4783,6 +4981,7 @@ static void free_some_resources(struct adapter *adapter) unsigned int i; t4_free_mem(adapter->l2t); + t4_cleanup_sched(adapter); t4_free_mem(adapter->tids.tid_tab); kfree(adapter->sge.egr_map); kfree(adapter->sge.ingr_map); @@ -4834,21 +5033,59 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) } #ifdef CONFIG_PCI_IOV +static void dummy_setup(struct net_device *dev) +{ + dev->type = ARPHRD_NONE; + dev->mtu = 0; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->tx_queue_len = 0; + dev->flags |= IFF_NOARP; + dev->priv_flags |= IFF_NO_QUEUE; + + /* Initialize the device structure. */ + dev->netdev_ops = &cxgb4_mgmt_netdev_ops; + dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; + dev->destructor = free_netdev; +} + +static int config_mgmt_dev(struct pci_dev *pdev) +{ + struct adapter *adap = pci_get_drvdata(pdev); + struct net_device *netdev; + struct port_info *pi; + char name[IFNAMSIZ]; + int err; + + snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); + netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup); + if (!netdev) + return -ENOMEM; + + pi = netdev_priv(netdev); + pi->adapter = adap; + SET_NETDEV_DEV(netdev, &pdev->dev); + + adap->port[0] = netdev; + + err = register_netdev(adap->port[0]); + if (err) { + pr_info("Unable to register VF mgmt netdev %s\n", name); + free_netdev(adap->port[0]); + adap->port[0] = NULL; + return err; + } + return 0; +} + static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) { + struct adapter *adap = pci_get_drvdata(pdev); int err = 0; int current_vfs = pci_num_vf(pdev); u32 pcie_fw; - void __iomem *regs; - regs = pci_ioremap_bar(pdev, 0); - if (!regs) { - dev_err(&pdev->dev, "cannot map device registers\n"); - return -ENOMEM; - } - - pcie_fw = readl(regs + PCIE_FW_A); - iounmap(regs); + pcie_fw = readl(adap->regs + PCIE_FW_A); /* Check if cxgb4 is the MASTER and fw is initialized */ if (!(pcie_fw & PCIE_FW_INIT_F) || !(pcie_fw & PCIE_FW_MASTER_VLD_F) || @@ -4875,6 +5112,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) */ if (!num_vfs) { pci_disable_sriov(pdev); + if (adap->port[0]) { + unregister_netdev(adap->port[0]); + adap->port[0] = NULL; + } return num_vfs; } @@ -4882,6 +5123,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) err = pci_enable_sriov(pdev, num_vfs); if (err) return err; + + err = config_mgmt_dev(pdev); + if (err) + return err; } return num_vfs; } @@ -4893,9 +5138,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct port_info *pi; bool highdma = false; struct adapter *adapter = NULL; + struct net_device *netdev; void __iomem *regs; u32 whoami, pl_rev; enum chip_type chip; + static int adap_idx = 1; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -4930,7 +5177,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); if (func != ent->driver_data) { +#ifndef CONFIG_PCI_IOV iounmap(regs); +#endif pci_disable_device(pdev); pci_save_state(pdev); /* to restore SR-IOV later */ goto sriov; @@ -4962,6 +5211,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto out_unmap_bar0; } + adap_idx++; adapter->workq = create_singlethread_workqueue("cxgb4"); if (!adapter->workq) { @@ -5048,8 +5298,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) T6_STATMODE_V(0))); for_each_port(adapter, i) { - struct net_device *netdev; - netdev = alloc_etherdev_mq(sizeof(struct port_info), MAX_ETH_QSETS); if (!netdev) { @@ -5143,6 +5391,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } #endif + + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls); + if (!pi->sched_tbl) + dev_warn(&pdev->dev, + "could not activate scheduling on port %d\n", + i); + } + if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { dev_warn(&pdev->dev, "could not allocate TID table, " "continuing\n"); @@ -5168,8 +5426,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* See what interrupts we'll be using */ if (msi > 1 && enable_msix(adapter) == 0) adapter->flags |= USING_MSIX; - else if (msi > 0 && pci_enable_msi(pdev) == 0) + else if (msi > 0 && pci_enable_msi(pdev) == 0) { adapter->flags |= USING_MSI; + if (msi > 1) + free_msix_info(adapter); + } /* check for PCI Express bandwidth capabiltites */ cxgb4_check_pcie_caps(adapter); @@ -5217,6 +5478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) attach_ulds(adapter); print_adapter_info(adapter); + return 0; sriov: #ifdef CONFIG_PCI_IOV @@ -5230,11 +5492,48 @@ sriov: "instantiated %u virtual functions\n", num_vf[func]); } -#endif + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto free_pci_region; + } + + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + adapter->name = pci_name(pdev); + adapter->mbox = func; + adapter->pf = func; + adapter->regs = regs; + adapter->adap_idx = adap_idx; + adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + + (sizeof(struct mbox_cmd) * + T4_OS_LOG_MBOX_CMDS), + GFP_KERNEL); + if (!adapter->mbox_log) { + err = -ENOMEM; + goto free_adapter; + } + pci_set_drvdata(pdev, adapter); + return 0; + + free_adapter: + kfree(adapter); + free_pci_region: + iounmap(regs); + pci_disable_sriov(pdev); + pci_release_regions(pdev); + return err; +#else return 0; +#endif out_free_dev: free_some_resources(adapter); + if (adapter->flags & USING_MSIX) + free_msix_info(adapter); + if (adapter->num_uld) + uld_mem_free(adapter); out_unmap_bar: if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); @@ -5258,12 +5557,12 @@ static void remove_one(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); -#ifdef CONFIG_PCI_IOV - pci_disable_sriov(pdev); - -#endif + if (!adapter) { + pci_release_regions(pdev); + return; + } - if (adapter) { + if (adapter->pf == 4) { int i; /* Tear down per-adapter Work Queue first since it can contain @@ -5296,6 +5595,10 @@ static void remove_one(struct pci_dev *pdev) if (adapter->flags & FULL_INIT_DONE) cxgb_down(adapter); + if (adapter->flags & USING_MSIX) + free_msix_info(adapter); + if (adapter->num_uld) + uld_mem_free(adapter); free_some_resources(adapter); #if IS_ENABLED(CONFIG_IPV6) t4_cleanup_clip_tbl(adapter); @@ -5312,8 +5615,17 @@ static void remove_one(struct pci_dev *pdev) kfree(adapter->mbox_log); synchronize_rcu(); kfree(adapter); - } else + } +#ifdef CONFIG_PCI_IOV + else { + if (adapter->port[0]) + unregister_netdev(adapter->port[0]); + iounmap(adapter->regs); + kfree(adapter); + pci_disable_sriov(pdev); pci_release_regions(pdev); + } +#endif } static struct pci_driver cxgb4_driver = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c new file mode 100644 index 000000000000..5d402bace6c1 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -0,0 +1,554 @@ +/* + * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written by: Atul Gupta (atul.gupta@chelsio.com) + * Written by: Hariprasad Shenai (hariprasad@chelsio.com) + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/pci.h> + +#include "cxgb4.h" +#include "cxgb4_uld.h" +#include "t4_regs.h" +#include "t4fw_api.h" +#include "t4_msg.h" + +#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) + +static int get_msix_idx_from_bmap(struct adapter *adap) +{ + struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; + unsigned long flags; + unsigned int msix_idx; + + spin_lock_irqsave(&bmap->lock, flags); + msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); + if (msix_idx < bmap->mapsize) { + __set_bit(msix_idx, bmap->msix_bmap); + } else { + spin_unlock_irqrestore(&bmap->lock, flags); + return -ENOSPC; + } + + spin_unlock_irqrestore(&bmap->lock, flags); + return msix_idx; +} + +static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) +{ + struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; + unsigned long flags; + + spin_lock_irqsave(&bmap->lock, flags); + __clear_bit(msix_idx, bmap->msix_bmap); + spin_unlock_irqrestore(&bmap->lock, flags); +} + +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct adapter *adap = q->adap; + struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + int ret; + + /* FW can send CPLs encapsulated in a CPL_FW4_MSG */ + if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) + rsp += 2; + + if (q->flush_handler) + ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, + rsp, gl, &q->lro_mgr, + &q->napi); + else + ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, + rsp, gl); + + if (ret) { + rxq->stats.nomem++; + return -1; + } + + if (!gl) + rxq->stats.imm++; + else if (gl == CXGB4_MSG_AN) + rxq->stats.an++; + else + rxq->stats.pkts++; + return 0; +} + +static int alloc_uld_rxqs(struct adapter *adap, + struct sge_uld_rxq_info *rxq_info, + unsigned int nq, unsigned int offset, bool lro) +{ + struct sge *s = &adap->sge; + struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; + unsigned short *ids = rxq_info->rspq_id + offset; + unsigned int per_chan = nq / adap->params.nports; + unsigned int msi_idx, bmap_idx; + int i, err; + + if (adap->flags & USING_MSIX) + msi_idx = 1; + else + msi_idx = -((int)s->intrq.abs_id + 1); + + for (i = 0; i < nq; i++, q++) { + if (msi_idx >= 0) { + bmap_idx = get_msix_idx_from_bmap(adap); + adap->msi_idx++; + } + err = t4_sge_alloc_rxq(adap, &q->rspq, false, + adap->port[i / per_chan], + adap->msi_idx, + q->fl.size ? &q->fl : NULL, + uldrx_handler, + NULL, + 0); + if (err) + goto freeout; + if (msi_idx >= 0) + rxq_info->msix_tbl[i + offset] = bmap_idx; + memset(&q->stats, 0, sizeof(q->stats)); + if (ids) + ids[i] = q->rspq.abs_id; + } + return 0; +freeout: + q = rxq_info->uldrxq + offset; + for ( ; i; i--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } + + /* We need to free rxq also in case of ciq allocation failure */ + if (offset) { + q = rxq_info->uldrxq + offset; + for ( ; i; i--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } + } + return err; +} + +int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + if (adap->flags & USING_MSIX) { + rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq, + GFP_KERNEL); + if (!rxq_info->msix_tbl) + return -ENOMEM; + } + + return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && + !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, + rxq_info->nrxq, lro)); +} + +static void t4_free_uld_rxqs(struct adapter *adap, int n, + struct sge_ofld_rxq *q) +{ + for ( ; n; n--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } +} + +void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + if (rxq_info->nciq) + t4_free_uld_rxqs(adap, rxq_info->nciq, + rxq_info->uldrxq + rxq_info->nrxq); + t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); + if (adap->flags & USING_MSIX) + kfree(rxq_info->msix_tbl); +} + +int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, + const struct cxgb4_pci_uld_info *uld_info) +{ + struct sge *s = &adap->sge; + struct sge_uld_rxq_info *rxq_info; + int i, nrxq; + + rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); + if (!rxq_info) + return -ENOMEM; + + if (uld_info->nrxq > s->nqs_per_uld) + rxq_info->nrxq = s->nqs_per_uld; + else + rxq_info->nrxq = uld_info->nrxq; + if (!uld_info->nciq) + rxq_info->nciq = 0; + else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld) + rxq_info->nciq = s->nqs_per_uld; + else + rxq_info->nciq = uld_info->nciq; + + nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ + rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), + GFP_KERNEL); + if (!rxq_info->uldrxq) { + kfree(rxq_info); + return -ENOMEM; + } + + rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL); + if (!rxq_info->uldrxq) { + kfree(rxq_info->uldrxq); + kfree(rxq_info); + return -ENOMEM; + } + + for (i = 0; i < rxq_info->nrxq; i++) { + struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); + r->rspq.uld = uld_type; + r->fl.size = 72; + } + + for (i = rxq_info->nrxq; i < nrxq; i++) { + struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64); + r->rspq.uld = uld_type; + r->fl.size = 72; + } + + memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); + adap->sge.uld_rxq_info[uld_type] = rxq_info; + + return 0; +} + +void free_queues_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + kfree(rxq_info->rspq_id); + kfree(rxq_info->uldrxq); + kfree(rxq_info); +} + +int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx, bmap_idx, err = 0; + + for_each_uldrxq(rxq_info, idx) { + bmap_idx = rxq_info->msix_tbl[idx]; + err = request_irq(adap->msix_info_ulds[bmap_idx].vec, + t4_sge_intr_msix, 0, + adap->msix_info_ulds[bmap_idx].desc, + &rxq_info->uldrxq[idx].rspq); + if (err) + goto unwind; + } + return 0; +unwind: + while (--idx >= 0) { + bmap_idx = rxq_info->msix_tbl[idx]; + free_msix_idx_in_bmap(adap, bmap_idx); + free_irq(adap->msix_info_ulds[bmap_idx].vec, + &rxq_info->uldrxq[idx].rspq); + } + return err; +} + +void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) { + unsigned int bmap_idx = rxq_info->msix_tbl[idx]; + + free_msix_idx_in_bmap(adap, bmap_idx); + free_irq(adap->msix_info_ulds[bmap_idx].vec, + &rxq_info->uldrxq[idx].rspq); + } +} + +void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int n = sizeof(adap->msix_info_ulds[0].desc); + int idx; + + for_each_uldrxq(rxq_info, idx) { + unsigned int bmap_idx = rxq_info->msix_tbl[idx]; + + snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", + adap->port[0]->name, rxq_info->name, idx); + } +} + +static void enable_rx(struct adapter *adap, struct sge_rspq *q) +{ + if (!q) + return; + + if (q->handler) { + cxgb_busy_poll_init_lock(q); + napi_enable(&q->napi); + } + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), + SEINTARM_V(q->intr_params) | + INGRESSQID_V(q->cntxt_id)); +} + +static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) +{ + if (q && q->handler) { + napi_disable(&q->napi); + local_bh_disable(); + while (!cxgb_poll_lock_napi(q)) + mdelay(1); + local_bh_enable(); + } +} + +void enable_rx_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) + enable_rx(adap, &rxq_info->uldrxq[idx].rspq); +} + +void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) + quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); +} + +static void uld_queue_init(struct adapter *adap, unsigned int uld_type, + struct cxgb4_lld_info *lli) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + lli->rxq_ids = rxq_info->rspq_id; + lli->nrxq = rxq_info->nrxq; + lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; + lli->nciq = rxq_info->nciq; +} + +int uld_mem_alloc(struct adapter *adap) +{ + struct sge *s = &adap->sge; + + adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL); + if (!adap->uld) + return -ENOMEM; + + s->uld_rxq_info = kzalloc(adap->num_uld * + sizeof(struct sge_uld_rxq_info *), + GFP_KERNEL); + if (!s->uld_rxq_info) + goto err_uld; + + return 0; +err_uld: + kfree(adap->uld); + return -ENOMEM; +} + +void uld_mem_free(struct adapter *adap) +{ + struct sge *s = &adap->sge; + + kfree(s->uld_rxq_info); + kfree(adap->uld); +} + +static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) +{ + int i; + + lld->pdev = adap->pdev; + lld->pf = adap->pf; + lld->l2t = adap->l2t; + lld->tids = &adap->tids; + lld->ports = adap->port; + lld->vr = &adap->vres; + lld->mtus = adap->params.mtus; + lld->ntxq = adap->sge.iscsiqsets; + lld->nchan = adap->params.nports; + lld->nports = adap->params.nports; + lld->wr_cred = adap->params.ofldq_wr_cred; + lld->adapter_type = adap->params.chip; + lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; + lld->udb_density = 1 << adap->params.sge.eq_qpp; + lld->ucq_density = 1 << adap->params.sge.iq_qpp; + lld->filt_mode = adap->params.tp.vlan_pri_map; + /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ + for (i = 0; i < NCHAN; i++) + lld->tx_modq[i] = i; + lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); + lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); + lld->fw_vers = adap->params.fw_vers; + lld->dbfifo_int_thresh = dbfifo_int_thresh; + lld->sge_ingpadboundary = adap->sge.fl_align; + lld->sge_egrstatuspagesize = adap->sge.stat_len; + lld->sge_pktshift = adap->sge.pktshift; + lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; + lld->max_ordird_qp = adap->params.max_ordird_qp; + lld->max_ird_adapter = adap->params.max_ird_adapter; + lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; + lld->nodeid = dev_to_node(adap->pdev_dev); +} + +static void uld_attach(struct adapter *adap, unsigned int uld) +{ + void *handle; + struct cxgb4_lld_info lli; + + uld_init(adap, &lli); + uld_queue_init(adap, uld, &lli); + + handle = adap->uld[uld].add(&lli); + if (IS_ERR(handle)) { + dev_warn(adap->pdev_dev, + "could not attach to the %s driver, error %ld\n", + adap->uld[uld].name, PTR_ERR(handle)); + return; + } + + adap->uld[uld].handle = handle; + + if (adap->flags & FULL_INIT_DONE) + adap->uld[uld].state_change(handle, CXGB4_STATE_UP); +} + +int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, + struct cxgb4_pci_uld_info *p) +{ + int ret = 0; + struct adapter *adap; + + if (type >= CXGB4_PCI_ULD_MAX) + return -EINVAL; + + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) { + if (!is_pci_uld(adap)) + continue; + ret = cfg_queues_uld(adap, type, p); + if (ret) + goto out; + ret = setup_sge_queues_uld(adap, type, p->lro); + if (ret) + goto free_queues; + if (adap->flags & USING_MSIX) { + name_msix_vecs_uld(adap, type); + ret = request_msix_queue_irqs_uld(adap, type); + if (ret) + goto free_rxq; + } + if (adap->flags & FULL_INIT_DONE) + enable_rx_uld(adap, type); + if (adap->uld[type].add) { + ret = -EBUSY; + goto free_irq; + } + adap->uld[type] = *p; + uld_attach(adap, type); + } + mutex_unlock(&uld_mutex); + return 0; + +free_irq: + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); +free_rxq: + free_sge_queues_uld(adap, type); +free_queues: + free_queues_uld(adap, type); +out: + mutex_unlock(&uld_mutex); + return ret; +} +EXPORT_SYMBOL(cxgb4_register_pci_uld); + +int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type) +{ + struct adapter *adap; + + if (type >= CXGB4_PCI_ULD_MAX) + return -EINVAL; + + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) { + if (!is_pci_uld(adap)) + continue; + adap->uld[type].handle = NULL; + adap->uld[type].add = NULL; + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); + free_sge_queues_uld(adap, type); + free_queues_uld(adap, type); + } + mutex_unlock(&uld_mutex); + + return 0; +} +EXPORT_SYMBOL(cxgb4_unregister_pci_uld); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index f3c58aaa932d..ab4037222f8d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -32,8 +32,8 @@ * SOFTWARE. */ -#ifndef __CXGB4_OFLD_H -#define __CXGB4_OFLD_H +#ifndef __CXGB4_ULD_H +#define __CXGB4_ULD_H #include <linux/cache.h> #include <linux/spinlock.h> @@ -296,8 +296,36 @@ struct cxgb4_uld_info { void (*lro_flush)(struct t4_lro_mgr *); }; +enum cxgb4_pci_uld { + CXGB4_PCI_ULD1, + CXGB4_PCI_ULD_MAX +}; + +struct cxgb4_pci_uld_info { + const char *name; + bool lro; + void *handle; + unsigned int nrxq; + unsigned int nciq; + unsigned int rxq_size; + unsigned int ciq_size; + void *(*add)(const struct cxgb4_lld_info *p); + int (*rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl); + int (*state_change)(void *handle, enum cxgb4_state new_state); + int (*control)(void *handle, enum cxgb4_control control, ...); + int (*lro_rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl, + struct t4_lro_mgr *lro_mgr, + struct napi_struct *napi); + void (*lro_flush)(struct t4_lro_mgr *); +}; + int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); +int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, + struct cxgb4_pci_uld_info *p); +int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); unsigned int cxgb4_port_chan(const struct net_device *dev); @@ -330,4 +358,4 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev, u64 *pbar2_qoffset, unsigned int *pbar2_qid); -#endif /* !__CXGB4_OFLD_H */ +#endif /* !__CXGB4_ULD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c new file mode 100644 index 000000000000..539de764bbd3 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -0,0 +1,556 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/netdevice.h> + +#include "cxgb4.h" +#include "sched.h" + +/* Spinlock must be held by caller */ +static int t4_sched_class_fw_cmd(struct port_info *pi, + struct ch_sched_params *p, + enum sched_fw_ops op) +{ + struct adapter *adap = pi->adapter; + struct sched_table *s = pi->sched_tbl; + struct sched_class *e; + int err = 0; + + e = &s->tab[p->u.params.class]; + switch (op) { + case SCHED_FW_OP_ADD: + err = t4_sched_params(adap, p->type, + p->u.params.level, p->u.params.mode, + p->u.params.rateunit, + p->u.params.ratemode, + p->u.params.channel, e->idx, + p->u.params.minrate, p->u.params.maxrate, + p->u.params.weight, p->u.params.pktsize); + break; + default: + err = -ENOTSUPP; + break; + } + + return err; +} + +/* Spinlock must be held by caller */ +static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, + enum sched_bind_type type, bool bind) +{ + struct adapter *adap = pi->adapter; + u32 fw_mnem, fw_class, fw_param; + unsigned int pf = adap->pf; + unsigned int vf = 0; + int err = 0; + + switch (type) { + case SCHED_QUEUE: { + struct sched_queue_entry *qe; + + qe = (struct sched_queue_entry *)arg; + + /* Create a template for the FW_PARAMS_CMD mnemonic and + * value (TX Scheduling Class in this case). + */ + fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); + fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE; + fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id)); + + pf = adap->pf; + vf = 0; + break; + } + default: + err = -ENOTSUPP; + goto out; + } + + err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class); + +out: + return err; +} + +static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, + const unsigned int qid, + int *index) +{ + struct sched_table *s = pi->sched_tbl; + struct sched_class *e, *end; + struct sched_class *found = NULL; + int i; + + /* Look for a class with matching bound queue parameters */ + end = &s->tab[s->sched_size]; + for (e = &s->tab[0]; e != end; ++e) { + struct sched_queue_entry *qe; + + i = 0; + if (e->state == SCHED_STATE_UNUSED) + continue; + + list_for_each_entry(qe, &e->queue_list, list) { + if (qe->cntxt_id == qid) { + found = e; + if (index) + *index = i; + break; + } + i++; + } + + if (found) + break; + } + + return found; +} + +static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) +{ + struct adapter *adap = pi->adapter; + struct sched_class *e; + struct sched_queue_entry *qe = NULL; + struct sge_eth_txq *txq; + unsigned int qid; + int index = -1; + int err = 0; + + if (p->queue < 0 || p->queue >= pi->nqsets) + return -ERANGE; + + txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; + qid = txq->q.cntxt_id; + + /* Find the existing class that the queue is bound to */ + e = t4_sched_queue_lookup(pi, qid, &index); + if (e && index >= 0) { + int i = 0; + + spin_lock(&e->lock); + list_for_each_entry(qe, &e->queue_list, list) { + if (i == index) + break; + i++; + } + err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, + false); + if (err) { + spin_unlock(&e->lock); + goto out; + } + + list_del(&qe->list); + t4_free_mem(qe); + if (atomic_dec_and_test(&e->refcnt)) { + e->state = SCHED_STATE_UNUSED; + memset(&e->info, 0, sizeof(e->info)); + } + spin_unlock(&e->lock); + } +out: + return err; +} + +static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) +{ + struct adapter *adap = pi->adapter; + struct sched_table *s = pi->sched_tbl; + struct sched_class *e; + struct sched_queue_entry *qe = NULL; + struct sge_eth_txq *txq; + unsigned int qid; + int err = 0; + + if (p->queue < 0 || p->queue >= pi->nqsets) + return -ERANGE; + + qe = t4_alloc_mem(sizeof(struct sched_queue_entry)); + if (!qe) + return -ENOMEM; + + txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; + qid = txq->q.cntxt_id; + + /* Unbind queue from any existing class */ + err = t4_sched_queue_unbind(pi, p); + if (err) + goto out; + + /* Bind queue to specified class */ + memset(qe, 0, sizeof(*qe)); + qe->cntxt_id = qid; + memcpy(&qe->param, p, sizeof(qe->param)); + + e = &s->tab[qe->param.class]; + spin_lock(&e->lock); + err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); + if (err) { + t4_free_mem(qe); + spin_unlock(&e->lock); + goto out; + } + + list_add_tail(&qe->list, &e->queue_list); + atomic_inc(&e->refcnt); + spin_unlock(&e->lock); +out: + return err; +} + +static void t4_sched_class_unbind_all(struct port_info *pi, + struct sched_class *e, + enum sched_bind_type type) +{ + if (!e) + return; + + switch (type) { + case SCHED_QUEUE: { + struct sched_queue_entry *qe; + + list_for_each_entry(qe, &e->queue_list, list) + t4_sched_queue_unbind(pi, &qe->param); + break; + } + default: + break; + } +} + +static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg, + enum sched_bind_type type, bool bind) +{ + int err = 0; + + if (!arg) + return -EINVAL; + + switch (type) { + case SCHED_QUEUE: { + struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; + + if (bind) + err = t4_sched_queue_bind(pi, qe); + else + err = t4_sched_queue_unbind(pi, qe); + break; + } + default: + err = -ENOTSUPP; + break; + } + + return err; +} + +/** + * cxgb4_sched_class_bind - Bind an entity to a scheduling class + * @dev: net_device pointer + * @arg: Entity opaque data + * @type: Entity type (Queue) + * + * Binds an entity (queue) to a scheduling class. If the entity + * is bound to another class, it will be unbound from the other class + * and bound to the class specified in @arg. + */ +int cxgb4_sched_class_bind(struct net_device *dev, void *arg, + enum sched_bind_type type) +{ + struct port_info *pi = netdev2pinfo(dev); + struct sched_table *s; + int err = 0; + u8 class_id; + + if (!can_sched(dev)) + return -ENOTSUPP; + + if (!arg) + return -EINVAL; + + switch (type) { + case SCHED_QUEUE: { + struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; + + class_id = qe->class; + break; + } + default: + return -ENOTSUPP; + } + + if (!valid_class_id(dev, class_id)) + return -EINVAL; + + if (class_id == SCHED_CLS_NONE) + return -ENOTSUPP; + + s = pi->sched_tbl; + write_lock(&s->rw_lock); + err = t4_sched_class_bind_unbind_op(pi, arg, type, true); + write_unlock(&s->rw_lock); + + return err; +} + +/** + * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class + * @dev: net_device pointer + * @arg: Entity opaque data + * @type: Entity type (Queue) + * + * Unbinds an entity (queue) from a scheduling class. + */ +int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, + enum sched_bind_type type) +{ + struct port_info *pi = netdev2pinfo(dev); + struct sched_table *s; + int err = 0; + u8 class_id; + + if (!can_sched(dev)) + return -ENOTSUPP; + + if (!arg) + return -EINVAL; + + switch (type) { + case SCHED_QUEUE: { + struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; + + class_id = qe->class; + break; + } + default: + return -ENOTSUPP; + } + + if (!valid_class_id(dev, class_id)) + return -EINVAL; + + s = pi->sched_tbl; + write_lock(&s->rw_lock); + err = t4_sched_class_bind_unbind_op(pi, arg, type, false); + write_unlock(&s->rw_lock); + + return err; +} + +/* If @p is NULL, fetch any available unused class */ +static struct sched_class *t4_sched_class_lookup(struct port_info *pi, + const struct ch_sched_params *p) +{ + struct sched_table *s = pi->sched_tbl; + struct sched_class *e, *end; + struct sched_class *found = NULL; + + if (!p) { + /* Get any available unused class */ + end = &s->tab[s->sched_size]; + for (e = &s->tab[0]; e != end; ++e) { + if (e->state == SCHED_STATE_UNUSED) { + found = e; + break; + } + } + } else { + /* Look for a class with matching scheduling parameters */ + struct ch_sched_params info; + struct ch_sched_params tp; + + memset(&info, 0, sizeof(info)); + memset(&tp, 0, sizeof(tp)); + + memcpy(&tp, p, sizeof(tp)); + /* Don't try to match class parameter */ + tp.u.params.class = SCHED_CLS_NONE; + + end = &s->tab[s->sched_size]; + for (e = &s->tab[0]; e != end; ++e) { + if (e->state == SCHED_STATE_UNUSED) + continue; + + memset(&info, 0, sizeof(info)); + memcpy(&info, &e->info, sizeof(info)); + /* Don't try to match class parameter */ + info.u.params.class = SCHED_CLS_NONE; + + if ((info.type == tp.type) && + (!memcmp(&info.u.params, &tp.u.params, + sizeof(info.u.params)))) { + found = e; + break; + } + } + } + + return found; +} + +static struct sched_class *t4_sched_class_alloc(struct port_info *pi, + struct ch_sched_params *p) +{ + struct sched_table *s = pi->sched_tbl; + struct sched_class *e; + u8 class_id; + int err; + + if (!p) + return NULL; + + class_id = p->u.params.class; + + /* Only accept search for existing class with matching params + * or allocation of new class with specified params + */ + if (class_id != SCHED_CLS_NONE) + return NULL; + + write_lock(&s->rw_lock); + /* See if there's an exisiting class with same + * requested sched params + */ + e = t4_sched_class_lookup(pi, p); + if (!e) { + struct ch_sched_params np; + + /* Fetch any available unused class */ + e = t4_sched_class_lookup(pi, NULL); + if (!e) + goto out; + + memset(&np, 0, sizeof(np)); + memcpy(&np, p, sizeof(np)); + np.u.params.class = e->idx; + + spin_lock(&e->lock); + /* New class */ + err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); + if (err) { + spin_unlock(&e->lock); + e = NULL; + goto out; + } + memcpy(&e->info, &np, sizeof(e->info)); + atomic_set(&e->refcnt, 0); + e->state = SCHED_STATE_ACTIVE; + spin_unlock(&e->lock); + } + +out: + write_unlock(&s->rw_lock); + return e; +} + +/** + * cxgb4_sched_class_alloc - allocate a scheduling class + * @dev: net_device pointer + * @p: new scheduling class to create. + * + * Returns pointer to the scheduling class created. If @p is NULL, then + * it allocates and returns any available unused scheduling class. If a + * scheduling class with matching @p is found, then the matching class is + * returned. + */ +struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, + struct ch_sched_params *p) +{ + struct port_info *pi = netdev2pinfo(dev); + u8 class_id; + + if (!can_sched(dev)) + return NULL; + + class_id = p->u.params.class; + if (!valid_class_id(dev, class_id)) + return NULL; + + return t4_sched_class_alloc(pi, p); +} + +static void t4_sched_class_free(struct port_info *pi, struct sched_class *e) +{ + t4_sched_class_unbind_all(pi, e, SCHED_QUEUE); +} + +struct sched_table *t4_init_sched(unsigned int sched_size) +{ + struct sched_table *s; + unsigned int i; + + s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class)); + if (!s) + return NULL; + + s->sched_size = sched_size; + rwlock_init(&s->rw_lock); + + for (i = 0; i < s->sched_size; i++) { + memset(&s->tab[i], 0, sizeof(struct sched_class)); + s->tab[i].idx = i; + s->tab[i].state = SCHED_STATE_UNUSED; + INIT_LIST_HEAD(&s->tab[i].queue_list); + spin_lock_init(&s->tab[i].lock); + atomic_set(&s->tab[i].refcnt, 0); + } + return s; +} + +void t4_cleanup_sched(struct adapter *adap) +{ + struct sched_table *s; + unsigned int i; + + for_each_port(adap, i) { + struct port_info *pi = netdev2pinfo(adap->port[i]); + + s = pi->sched_tbl; + for (i = 0; i < s->sched_size; i++) { + struct sched_class *e; + + write_lock(&s->rw_lock); + e = &s->tab[i]; + if (e->state == SCHED_STATE_ACTIVE) + t4_sched_class_free(pi, e); + write_unlock(&s->rw_lock); + } + t4_free_mem(s); + } +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h new file mode 100644 index 000000000000..77b2b3fd9021 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -0,0 +1,110 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_SCHED_H +#define __CXGB4_SCHED_H + +#include <linux/spinlock.h> +#include <linux/atomic.h> + +#define SCHED_CLS_NONE 0xff + +#define FW_SCHED_CLS_NONE 0xffffffff + +/* Max rate that can be set to a scheduling class is 10 Gbps */ +#define SCHED_MAX_RATE_KBPS 10000000U + +enum { + SCHED_STATE_ACTIVE, + SCHED_STATE_UNUSED, +}; + +enum sched_fw_ops { + SCHED_FW_OP_ADD, +}; + +enum sched_bind_type { + SCHED_QUEUE, +}; + +struct sched_queue_entry { + struct list_head list; + unsigned int cntxt_id; + struct ch_sched_queue param; +}; + +struct sched_class { + u8 state; + u8 idx; + struct ch_sched_params info; + struct list_head queue_list; + spinlock_t lock; /* Per class lock */ + atomic_t refcnt; +}; + +struct sched_table { /* per port scheduling table */ + u8 sched_size; + rwlock_t rw_lock; /* Table lock */ + struct sched_class tab[0]; +}; + +static inline bool can_sched(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + + return !pi->sched_tbl ? false : true; +} + +static inline bool valid_class_id(struct net_device *dev, u8 class_id) +{ + struct port_info *pi = netdev2pinfo(dev); + + if ((class_id > pi->sched_tbl->sched_size - 1) && + (class_id != SCHED_CLS_NONE)) + return false; + + return true; +} + +int cxgb4_sched_class_bind(struct net_device *dev, void *arg, + enum sched_bind_type type); +int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, + enum sched_bind_type type); + +struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, + struct ch_sched_params *p); + +struct sched_table *t4_init_sched(unsigned int size); +void t4_cleanup_sched(struct adapter *adap); +#endif /* __CXGB4_SCHED_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ad3552df0545..9a607dbc6ca8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2928,8 +2928,8 @@ static void free_txq(struct adapter *adap, struct sge_txq *q) q->desc = NULL; } -static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, - struct sge_fl *fl) +void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) { struct sge *s = &adap->sge; unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index dc92c80a75f4..de451ee2ba75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1,7 +1,7 @@ /* * This file is part of the Chelsio T4 Ethernet driver for Linux. * - * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -8264,3 +8264,73 @@ void t4_idma_monitor(struct adapter *adapter, t4_sge_decode_idma_state(adapter, idma->idma_state[i]); } } + +/** + * t4_set_vf_mac - Set MAC address for the specified VF + * @adapter: The adapter + * @vf: one of the VFs instantiated by the specified PF + * @naddr: the number of MAC addresses + * @addr: the MAC address(es) to be set to the specified VF + */ +int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, + unsigned int naddr, u8 *addr) +{ + struct fw_acl_mac_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_ACL_MAC_CMD_PFN_V(adapter->pf) | + FW_ACL_MAC_CMD_VFN_V(vf)); + + /* Note: Do not enable the ACL */ + cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); + cmd.nmac = naddr; + + switch (adapter->pf) { + case 3: + memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); + break; + case 2: + memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2)); + break; + case 1: + memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1)); + break; + case 0: + memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0)); + break; + } + + return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd); +} + +int t4_sched_params(struct adapter *adapter, int type, int level, int mode, + int rateunit, int ratemode, int channel, int class, + int minrate, int maxrate, int weight, int pktsize) +{ + struct fw_sched_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + + cmd.u.params.sc = FW_SCHED_SC_PARAMS; + cmd.u.params.type = type; + cmd.u.params.level = level; + cmd.u.params.mode = mode; + cmd.u.params.ch = channel; + cmd.u.params.cl = class; + cmd.u.params.unit = rateunit; + cmd.u.params.rate = ratemode; + cmd.u.params.min = cpu_to_be32(minrate); + cmd.u.params.max = cpu_to_be32(maxrate); + cmd.u.params.weight = cpu_to_be16(weight); + cmd.u.params.pktsize = cpu_to_be16(pktsize); + + return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), + NULL, 1); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index e0ebe1378cb2..fba3b2ad382d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -61,6 +61,7 @@ enum { CPL_ABORT_REQ_RSS = 0x2B, CPL_ABORT_RPL_RSS = 0x2D, + CPL_RX_PHYS_ADDR = 0x30, CPL_CLOSE_CON_RPL = 0x32, CPL_ISCSI_HDR = 0x33, CPL_RDMA_CQE = 0x35, @@ -83,6 +84,10 @@ enum { CPL_PASS_OPEN_REQ6 = 0x81, CPL_ACT_OPEN_REQ6 = 0x83, + CPL_TX_TLS_PDU = 0x88, + CPL_TX_SEC_PDU = 0x8A, + CPL_TX_TLS_ACK = 0x8B, + CPL_RDMA_TERMINATE = 0xA2, CPL_RDMA_WRITE = 0xA4, CPL_SGE_EGR_UPDATE = 0xA5, @@ -94,6 +99,8 @@ enum { CPL_FW4_PLD = 0xC1, CPL_FW4_ACK = 0xC3, + CPL_RX_PHYS_DSGL = 0xD0, + CPL_FW6_MSG = 0xE0, CPL_FW6_PLD = 0xE1, CPL_TX_PKT_LSO = 0xED, @@ -1362,6 +1369,15 @@ struct ulptx_idata { __be32 len; }; +struct ulp_txpkt { + __be32 cmd_dest; + __be32 len; +}; + +#define ULPTX_CMD_S 24 +#define ULPTX_CMD_M 0xFF +#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S) + #define ULPTX_NSGE_S 0 #define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) @@ -1369,6 +1385,22 @@ struct ulptx_idata { #define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S) #define ULPTX_MORE_F ULPTX_MORE_V(1U) +#define ULP_TXPKT_DEST_S 16 +#define ULP_TXPKT_DEST_M 0x3 +#define ULP_TXPKT_DEST_V(x) ((x) << ULP_TXPKT_DEST_S) + +#define ULP_TXPKT_FID_S 4 +#define ULP_TXPKT_FID_M 0x7ff +#define ULP_TXPKT_FID_V(x) ((x) << ULP_TXPKT_FID_S) + +#define ULP_TXPKT_RO_S 3 +#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S) +#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U) + +#define ULP_TX_SC_MORE_S 23 +#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S) +#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U) + struct ulp_mem_io { WR_HDR; __be32 cmd; @@ -1406,4 +1438,409 @@ struct ulp_mem_io { #define ULP_MEMIO_DATA_LEN_S 0 #define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S) +#define ULPTX_NSGE_S 0 +#define ULPTX_NSGE_M 0xFFFF +#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) +#define ULPTX_NSGE_G(x) (((x) >> ULPTX_NSGE_S) & ULPTX_NSGE_M) + +struct ulptx_sc_memrd { + __be32 cmd_to_len; + __be32 addr; +}; + +#define ULP_TXPKT_DATAMODIFY_S 23 +#define ULP_TXPKT_DATAMODIFY_M 0x1 +#define ULP_TXPKT_DATAMODIFY_V(x) ((x) << ULP_TXPKT_DATAMODIFY_S) +#define ULP_TXPKT_DATAMODIFY_G(x) \ + (((x) >> ULP_TXPKT_DATAMODIFY_S) & ULP_TXPKT_DATAMODIFY__M) +#define ULP_TXPKT_DATAMODIFY_F ULP_TXPKT_DATAMODIFY_V(1U) + +#define ULP_TXPKT_CHANNELID_S 22 +#define ULP_TXPKT_CHANNELID_M 0x1 +#define ULP_TXPKT_CHANNELID_V(x) ((x) << ULP_TXPKT_CHANNELID_S) +#define ULP_TXPKT_CHANNELID_G(x) \ + (((x) >> ULP_TXPKT_CHANNELID_S) & ULP_TXPKT_CHANNELID_M) +#define ULP_TXPKT_CHANNELID_F ULP_TXPKT_CHANNELID_V(1U) + +#define SCMD_SEQ_NO_CTRL_S 29 +#define SCMD_SEQ_NO_CTRL_M 0x3 +#define SCMD_SEQ_NO_CTRL_V(x) ((x) << SCMD_SEQ_NO_CTRL_S) +#define SCMD_SEQ_NO_CTRL_G(x) \ + (((x) >> SCMD_SEQ_NO_CTRL_S) & SCMD_SEQ_NO_CTRL_M) + +/* StsFieldPrsnt- Status field at the end of the TLS PDU */ +#define SCMD_STATUS_PRESENT_S 28 +#define SCMD_STATUS_PRESENT_M 0x1 +#define SCMD_STATUS_PRESENT_V(x) ((x) << SCMD_STATUS_PRESENT_S) +#define SCMD_STATUS_PRESENT_G(x) \ + (((x) >> SCMD_STATUS_PRESENT_S) & SCMD_STATUS_PRESENT_M) +#define SCMD_STATUS_PRESENT_F SCMD_STATUS_PRESENT_V(1U) + +/* ProtoVersion - Protocol Version 0: 1.2, 1:1.1, 2:DTLS, 3:Generic, + * 3-15: Reserved. + */ +#define SCMD_PROTO_VERSION_S 24 +#define SCMD_PROTO_VERSION_M 0xf +#define SCMD_PROTO_VERSION_V(x) ((x) << SCMD_PROTO_VERSION_S) +#define SCMD_PROTO_VERSION_G(x) \ + (((x) >> SCMD_PROTO_VERSION_S) & SCMD_PROTO_VERSION_M) + +/* EncDecCtrl - Encryption/Decryption Control. 0: Encrypt, 1: Decrypt */ +#define SCMD_ENC_DEC_CTRL_S 23 +#define SCMD_ENC_DEC_CTRL_M 0x1 +#define SCMD_ENC_DEC_CTRL_V(x) ((x) << SCMD_ENC_DEC_CTRL_S) +#define SCMD_ENC_DEC_CTRL_G(x) \ + (((x) >> SCMD_ENC_DEC_CTRL_S) & SCMD_ENC_DEC_CTRL_M) +#define SCMD_ENC_DEC_CTRL_F SCMD_ENC_DEC_CTRL_V(1U) + +/* CipherAuthSeqCtrl - Cipher Authentication Sequence Control. */ +#define SCMD_CIPH_AUTH_SEQ_CTRL_S 22 +#define SCMD_CIPH_AUTH_SEQ_CTRL_M 0x1 +#define SCMD_CIPH_AUTH_SEQ_CTRL_V(x) \ + ((x) << SCMD_CIPH_AUTH_SEQ_CTRL_S) +#define SCMD_CIPH_AUTH_SEQ_CTRL_G(x) \ + (((x) >> SCMD_CIPH_AUTH_SEQ_CTRL_S) & SCMD_CIPH_AUTH_SEQ_CTRL_M) +#define SCMD_CIPH_AUTH_SEQ_CTRL_F SCMD_CIPH_AUTH_SEQ_CTRL_V(1U) + +/* CiphMode - Cipher Mode. 0: NOP, 1:AES-CBC, 2:AES-GCM, 3:AES-CTR, + * 4:Generic-AES, 5-15: Reserved. + */ +#define SCMD_CIPH_MODE_S 18 +#define SCMD_CIPH_MODE_M 0xf +#define SCMD_CIPH_MODE_V(x) ((x) << SCMD_CIPH_MODE_S) +#define SCMD_CIPH_MODE_G(x) \ + (((x) >> SCMD_CIPH_MODE_S) & SCMD_CIPH_MODE_M) + +/* AuthMode - Auth Mode. 0: NOP, 1:SHA1, 2:SHA2-224, 3:SHA2-256 + * 4-15: Reserved + */ +#define SCMD_AUTH_MODE_S 14 +#define SCMD_AUTH_MODE_M 0xf +#define SCMD_AUTH_MODE_V(x) ((x) << SCMD_AUTH_MODE_S) +#define SCMD_AUTH_MODE_G(x) \ + (((x) >> SCMD_AUTH_MODE_S) & SCMD_AUTH_MODE_M) + +/* HmacCtrl - HMAC Control. 0:NOP, 1:No truncation, 2:Support HMAC Truncation + * per RFC 4366, 3:IPSec 96 bits, 4-7:Reserved + */ +#define SCMD_HMAC_CTRL_S 11 +#define SCMD_HMAC_CTRL_M 0x7 +#define SCMD_HMAC_CTRL_V(x) ((x) << SCMD_HMAC_CTRL_S) +#define SCMD_HMAC_CTRL_G(x) \ + (((x) >> SCMD_HMAC_CTRL_S) & SCMD_HMAC_CTRL_M) + +/* IvSize - IV size in units of 2 bytes */ +#define SCMD_IV_SIZE_S 7 +#define SCMD_IV_SIZE_M 0xf +#define SCMD_IV_SIZE_V(x) ((x) << SCMD_IV_SIZE_S) +#define SCMD_IV_SIZE_G(x) \ + (((x) >> SCMD_IV_SIZE_S) & SCMD_IV_SIZE_M) + +/* NumIVs - Number of IVs */ +#define SCMD_NUM_IVS_S 0 +#define SCMD_NUM_IVS_M 0x7f +#define SCMD_NUM_IVS_V(x) ((x) << SCMD_NUM_IVS_S) +#define SCMD_NUM_IVS_G(x) \ + (((x) >> SCMD_NUM_IVS_S) & SCMD_NUM_IVS_M) + +/* EnbDbgId - If this is enabled upper 20 (63:44) bits if SeqNumber + * (below) are used as Cid (connection id for debug status), these + * bits are padded to zero for forming the 64 bit + * sequence number for TLS + */ +#define SCMD_ENB_DBGID_S 31 +#define SCMD_ENB_DBGID_M 0x1 +#define SCMD_ENB_DBGID_V(x) ((x) << SCMD_ENB_DBGID_S) +#define SCMD_ENB_DBGID_G(x) \ + (((x) >> SCMD_ENB_DBGID_S) & SCMD_ENB_DBGID_M) + +/* IV generation in SW. */ +#define SCMD_IV_GEN_CTRL_S 30 +#define SCMD_IV_GEN_CTRL_M 0x1 +#define SCMD_IV_GEN_CTRL_V(x) ((x) << SCMD_IV_GEN_CTRL_S) +#define SCMD_IV_GEN_CTRL_G(x) \ + (((x) >> SCMD_IV_GEN_CTRL_S) & SCMD_IV_GEN_CTRL_M) +#define SCMD_IV_GEN_CTRL_F SCMD_IV_GEN_CTRL_V(1U) + +/* More frags */ +#define SCMD_MORE_FRAGS_S 20 +#define SCMD_MORE_FRAGS_M 0x1 +#define SCMD_MORE_FRAGS_V(x) ((x) << SCMD_MORE_FRAGS_S) +#define SCMD_MORE_FRAGS_G(x) (((x) >> SCMD_MORE_FRAGS_S) & SCMD_MORE_FRAGS_M) + +/*last frag */ +#define SCMD_LAST_FRAG_S 19 +#define SCMD_LAST_FRAG_M 0x1 +#define SCMD_LAST_FRAG_V(x) ((x) << SCMD_LAST_FRAG_S) +#define SCMD_LAST_FRAG_G(x) (((x) >> SCMD_LAST_FRAG_S) & SCMD_LAST_FRAG_M) + +/* TlsCompPdu */ +#define SCMD_TLS_COMPPDU_S 18 +#define SCMD_TLS_COMPPDU_M 0x1 +#define SCMD_TLS_COMPPDU_V(x) ((x) << SCMD_TLS_COMPPDU_S) +#define SCMD_TLS_COMPPDU_G(x) (((x) >> SCMD_TLS_COMPPDU_S) & SCMD_TLS_COMPPDU_M) + +/* KeyCntxtInline - Key context inline after the scmd OR PayloadOnly*/ +#define SCMD_KEY_CTX_INLINE_S 17 +#define SCMD_KEY_CTX_INLINE_M 0x1 +#define SCMD_KEY_CTX_INLINE_V(x) ((x) << SCMD_KEY_CTX_INLINE_S) +#define SCMD_KEY_CTX_INLINE_G(x) \ + (((x) >> SCMD_KEY_CTX_INLINE_S) & SCMD_KEY_CTX_INLINE_M) +#define SCMD_KEY_CTX_INLINE_F SCMD_KEY_CTX_INLINE_V(1U) + +/* TLSFragEnable - 0: Host created TLS PDUs, 1: TLS Framgmentation in ASIC */ +#define SCMD_TLS_FRAG_ENABLE_S 16 +#define SCMD_TLS_FRAG_ENABLE_M 0x1 +#define SCMD_TLS_FRAG_ENABLE_V(x) ((x) << SCMD_TLS_FRAG_ENABLE_S) +#define SCMD_TLS_FRAG_ENABLE_G(x) \ + (((x) >> SCMD_TLS_FRAG_ENABLE_S) & SCMD_TLS_FRAG_ENABLE_M) +#define SCMD_TLS_FRAG_ENABLE_F SCMD_TLS_FRAG_ENABLE_V(1U) + +/* MacOnly - Only send the MAC and discard PDU. This is valid for hash only + * modes, in this case TLS_TX will drop the PDU and only + * send back the MAC bytes. + */ +#define SCMD_MAC_ONLY_S 15 +#define SCMD_MAC_ONLY_M 0x1 +#define SCMD_MAC_ONLY_V(x) ((x) << SCMD_MAC_ONLY_S) +#define SCMD_MAC_ONLY_G(x) \ + (((x) >> SCMD_MAC_ONLY_S) & SCMD_MAC_ONLY_M) +#define SCMD_MAC_ONLY_F SCMD_MAC_ONLY_V(1U) + +/* AadIVDrop - Drop the AAD and IV fields. Useful in protocols + * which have complex AAD and IV formations Eg:AES-CCM + */ +#define SCMD_AADIVDROP_S 14 +#define SCMD_AADIVDROP_M 0x1 +#define SCMD_AADIVDROP_V(x) ((x) << SCMD_AADIVDROP_S) +#define SCMD_AADIVDROP_G(x) \ + (((x) >> SCMD_AADIVDROP_S) & SCMD_AADIVDROP_M) +#define SCMD_AADIVDROP_F SCMD_AADIVDROP_V(1U) + +/* HdrLength - Length of all headers excluding TLS header + * present before start of crypto PDU/payload. + */ +#define SCMD_HDR_LEN_S 0 +#define SCMD_HDR_LEN_M 0x3fff +#define SCMD_HDR_LEN_V(x) ((x) << SCMD_HDR_LEN_S) +#define SCMD_HDR_LEN_G(x) \ + (((x) >> SCMD_HDR_LEN_S) & SCMD_HDR_LEN_M) + +struct cpl_tx_sec_pdu { + __be32 op_ivinsrtofst; + __be32 pldlen; + __be32 aadstart_cipherstop_hi; + __be32 cipherstop_lo_authinsert; + __be32 seqno_numivs; + __be32 ivgen_hdrlen; + __be64 scmd1; +}; + +#define CPL_TX_SEC_PDU_OPCODE_S 24 +#define CPL_TX_SEC_PDU_OPCODE_M 0xff +#define CPL_TX_SEC_PDU_OPCODE_V(x) ((x) << CPL_TX_SEC_PDU_OPCODE_S) +#define CPL_TX_SEC_PDU_OPCODE_G(x) \ + (((x) >> CPL_TX_SEC_PDU_OPCODE_S) & CPL_TX_SEC_PDU_OPCODE_M) + +/* RX Channel Id */ +#define CPL_TX_SEC_PDU_RXCHID_S 22 +#define CPL_TX_SEC_PDU_RXCHID_M 0x1 +#define CPL_TX_SEC_PDU_RXCHID_V(x) ((x) << CPL_TX_SEC_PDU_RXCHID_S) +#define CPL_TX_SEC_PDU_RXCHID_G(x) \ + (((x) >> CPL_TX_SEC_PDU_RXCHID_S) & CPL_TX_SEC_PDU_RXCHID_M) +#define CPL_TX_SEC_PDU_RXCHID_F CPL_TX_SEC_PDU_RXCHID_V(1U) + +/* Ack Follows */ +#define CPL_TX_SEC_PDU_ACKFOLLOWS_S 21 +#define CPL_TX_SEC_PDU_ACKFOLLOWS_M 0x1 +#define CPL_TX_SEC_PDU_ACKFOLLOWS_V(x) ((x) << CPL_TX_SEC_PDU_ACKFOLLOWS_S) +#define CPL_TX_SEC_PDU_ACKFOLLOWS_G(x) \ + (((x) >> CPL_TX_SEC_PDU_ACKFOLLOWS_S) & CPL_TX_SEC_PDU_ACKFOLLOWS_M) +#define CPL_TX_SEC_PDU_ACKFOLLOWS_F CPL_TX_SEC_PDU_ACKFOLLOWS_V(1U) + +/* Loopback bit in cpl_tx_sec_pdu */ +#define CPL_TX_SEC_PDU_ULPTXLPBK_S 20 +#define CPL_TX_SEC_PDU_ULPTXLPBK_M 0x1 +#define CPL_TX_SEC_PDU_ULPTXLPBK_V(x) ((x) << CPL_TX_SEC_PDU_ULPTXLPBK_S) +#define CPL_TX_SEC_PDU_ULPTXLPBK_G(x) \ + (((x) >> CPL_TX_SEC_PDU_ULPTXLPBK_S) & CPL_TX_SEC_PDU_ULPTXLPBK_M) +#define CPL_TX_SEC_PDU_ULPTXLPBK_F CPL_TX_SEC_PDU_ULPTXLPBK_V(1U) + +/* Length of cpl header encapsulated */ +#define CPL_TX_SEC_PDU_CPLLEN_S 16 +#define CPL_TX_SEC_PDU_CPLLEN_M 0xf +#define CPL_TX_SEC_PDU_CPLLEN_V(x) ((x) << CPL_TX_SEC_PDU_CPLLEN_S) +#define CPL_TX_SEC_PDU_CPLLEN_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CPLLEN_S) & CPL_TX_SEC_PDU_CPLLEN_M) + +/* PlaceHolder */ +#define CPL_TX_SEC_PDU_PLACEHOLDER_S 10 +#define CPL_TX_SEC_PDU_PLACEHOLDER_M 0x1 +#define CPL_TX_SEC_PDU_PLACEHOLDER_V(x) ((x) << CPL_TX_SEC_PDU_PLACEHOLDER_S) +#define CPL_TX_SEC_PDU_PLACEHOLDER_G(x) \ + (((x) >> CPL_TX_SEC_PDU_PLACEHOLDER_S) & \ + CPL_TX_SEC_PDU_PLACEHOLDER_M) + +/* IvInsrtOffset: Insertion location for IV */ +#define CPL_TX_SEC_PDU_IVINSRTOFST_S 0 +#define CPL_TX_SEC_PDU_IVINSRTOFST_M 0x3ff +#define CPL_TX_SEC_PDU_IVINSRTOFST_V(x) ((x) << CPL_TX_SEC_PDU_IVINSRTOFST_S) +#define CPL_TX_SEC_PDU_IVINSRTOFST_G(x) \ + (((x) >> CPL_TX_SEC_PDU_IVINSRTOFST_S) & \ + CPL_TX_SEC_PDU_IVINSRTOFST_M) + +/* AadStartOffset: Offset in bytes for AAD start from + * the first byte following the pkt headers (0-255 bytes) + */ +#define CPL_TX_SEC_PDU_AADSTART_S 24 +#define CPL_TX_SEC_PDU_AADSTART_M 0xff +#define CPL_TX_SEC_PDU_AADSTART_V(x) ((x) << CPL_TX_SEC_PDU_AADSTART_S) +#define CPL_TX_SEC_PDU_AADSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AADSTART_S) & \ + CPL_TX_SEC_PDU_AADSTART_M) + +/* AadStopOffset: offset in bytes for AAD stop/end from the first byte following + * the pkt headers (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_AADSTOP_S 15 +#define CPL_TX_SEC_PDU_AADSTOP_M 0x1ff +#define CPL_TX_SEC_PDU_AADSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AADSTOP_S) +#define CPL_TX_SEC_PDU_AADSTOP_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AADSTOP_S) & CPL_TX_SEC_PDU_AADSTOP_M) + +/* CipherStartOffset: offset in bytes for encryption/decryption start from the + * first byte following the pkt headers (0-1023 bytes) + */ +#define CPL_TX_SEC_PDU_CIPHERSTART_S 5 +#define CPL_TX_SEC_PDU_CIPHERSTART_M 0x3ff +#define CPL_TX_SEC_PDU_CIPHERSTART_V(x) ((x) << CPL_TX_SEC_PDU_CIPHERSTART_S) +#define CPL_TX_SEC_PDU_CIPHERSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTART_S) & \ + CPL_TX_SEC_PDU_CIPHERSTART_M) + +/* CipherStopOffset: offset in bytes for encryption/decryption end + * from end of the payload of this command (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_S 0 +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_M 0x1f +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_V(x) \ + ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) & \ + CPL_TX_SEC_PDU_CIPHERSTOP_HI_M) + +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_S 28 +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_M 0xf +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_V(x) \ + ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) & \ + CPL_TX_SEC_PDU_CIPHERSTOP_LO_M) + +/* AuthStartOffset: offset in bytes for authentication start from + * the first byte following the pkt headers (0-1023) + */ +#define CPL_TX_SEC_PDU_AUTHSTART_S 18 +#define CPL_TX_SEC_PDU_AUTHSTART_M 0x3ff +#define CPL_TX_SEC_PDU_AUTHSTART_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTART_S) +#define CPL_TX_SEC_PDU_AUTHSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHSTART_S) & \ + CPL_TX_SEC_PDU_AUTHSTART_M) + +/* AuthStopOffset: offset in bytes for authentication + * end from end of the payload of this command (0-511 Bytes) + */ +#define CPL_TX_SEC_PDU_AUTHSTOP_S 9 +#define CPL_TX_SEC_PDU_AUTHSTOP_M 0x1ff +#define CPL_TX_SEC_PDU_AUTHSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTOP_S) +#define CPL_TX_SEC_PDU_AUTHSTOP_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHSTOP_S) & \ + CPL_TX_SEC_PDU_AUTHSTOP_M) + +/* AuthInsrtOffset: offset in bytes for authentication insertion + * from end of the payload of this command (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_AUTHINSERT_S 0 +#define CPL_TX_SEC_PDU_AUTHINSERT_M 0x1ff +#define CPL_TX_SEC_PDU_AUTHINSERT_V(x) ((x) << CPL_TX_SEC_PDU_AUTHINSERT_S) +#define CPL_TX_SEC_PDU_AUTHINSERT_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHINSERT_S) & \ + CPL_TX_SEC_PDU_AUTHINSERT_M) + +struct cpl_rx_phys_dsgl { + __be32 op_to_tid; + __be32 pcirlxorder_to_noofsgentr; + struct rss_header rss_hdr_int; +}; + +#define CPL_RX_PHYS_DSGL_OPCODE_S 24 +#define CPL_RX_PHYS_DSGL_OPCODE_M 0xff +#define CPL_RX_PHYS_DSGL_OPCODE_V(x) ((x) << CPL_RX_PHYS_DSGL_OPCODE_S) +#define CPL_RX_PHYS_DSGL_OPCODE_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_OPCODE_S) & CPL_RX_PHYS_DSGL_OPCODE_M) + +#define CPL_RX_PHYS_DSGL_ISRDMA_S 23 +#define CPL_RX_PHYS_DSGL_ISRDMA_M 0x1 +#define CPL_RX_PHYS_DSGL_ISRDMA_V(x) ((x) << CPL_RX_PHYS_DSGL_ISRDMA_S) +#define CPL_RX_PHYS_DSGL_ISRDMA_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_ISRDMA_S) & CPL_RX_PHYS_DSGL_ISRDMA_M) +#define CPL_RX_PHYS_DSGL_ISRDMA_F CPL_RX_PHYS_DSGL_ISRDMA_V(1U) + +#define CPL_RX_PHYS_DSGL_RSVD1_S 20 +#define CPL_RX_PHYS_DSGL_RSVD1_M 0x7 +#define CPL_RX_PHYS_DSGL_RSVD1_V(x) ((x) << CPL_RX_PHYS_DSGL_RSVD1_S) +#define CPL_RX_PHYS_DSGL_RSVD1_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_RSVD1_S) & \ + CPL_RX_PHYS_DSGL_RSVD1_M) + +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_S 31 +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_M 0x1 +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCIRLXORDER_S) +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCIRLXORDER_S) & \ + CPL_RX_PHYS_DSGL_PCIRLXORDER_M) +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_F CPL_RX_PHYS_DSGL_PCIRLXORDER_V(1U) + +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_S 30 +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_M 0x1 +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCINOSNOOP_S) +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCINOSNOOP_S) & \ + CPL_RX_PHYS_DSGL_PCINOSNOOP_M) + +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_F CPL_RX_PHYS_DSGL_PCINOSNOOP_V(1U) + +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_S 29 +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_M 0x1 +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCITPHNTENB_S) +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCITPHNTENB_S) & \ + CPL_RX_PHYS_DSGL_PCITPHNTENB_M) +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_F CPL_RX_PHYS_DSGL_PCITPHNTENB_V(1U) + +#define CPL_RX_PHYS_DSGL_PCITPHNT_S 27 +#define CPL_RX_PHYS_DSGL_PCITPHNT_M 0x3 +#define CPL_RX_PHYS_DSGL_PCITPHNT_V(x) ((x) << CPL_RX_PHYS_DSGL_PCITPHNT_S) +#define CPL_RX_PHYS_DSGL_PCITPHNT_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCITPHNT_S) & \ + CPL_RX_PHYS_DSGL_PCITPHNT_M) + +#define CPL_RX_PHYS_DSGL_DCAID_S 16 +#define CPL_RX_PHYS_DSGL_DCAID_M 0x7ff +#define CPL_RX_PHYS_DSGL_DCAID_V(x) ((x) << CPL_RX_PHYS_DSGL_DCAID_S) +#define CPL_RX_PHYS_DSGL_DCAID_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_DCAID_S) & \ + CPL_RX_PHYS_DSGL_DCAID_M) + +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_S 0 +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_M 0xffff +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_NOOFSGENTR_S) +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_NOOFSGENTR_S) & \ + CPL_RX_PHYS_DSGL_NOOFSGENTR_M) + #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index a89b30720e38..ffe4bf4b96da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1,7 +1,7 @@ /* * This file is part of the Chelsio T4 Ethernet driver for Linux. * - * Copyright (c) 2009-2014 Chelsio Communications, Inc. All rights reserved. + * Copyright (c) 2009-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -102,6 +102,7 @@ enum fw_wr_opcodes { FW_RI_FR_NSMR_WR = 0x19, FW_RI_INV_LSTAG_WR = 0x1a, FW_ISCSI_TX_DATA_WR = 0x45, + FW_CRYPTO_LOOKASIDE_WR = 0X6d, FW_LASTC2E_WR = 0x70 }; @@ -680,6 +681,7 @@ enum fw_cmd_opcodes { FW_RSS_IND_TBL_CMD = 0x20, FW_RSS_GLB_CONFIG_CMD = 0x22, FW_RSS_VI_CONFIG_CMD = 0x23, + FW_SCHED_CMD = 0x24, FW_DEVLOG_CMD = 0x25, FW_CLIP_CMD = 0x28, FW_LASTC2E_CMD = 0x40, @@ -1060,7 +1062,7 @@ struct fw_caps_config_cmd { __be16 niccaps; __be16 ofldcaps; __be16 rdmacaps; - __be16 r4; + __be16 cryptocaps; __be16 iscsicaps; __be16 fcoecaps; __be32 cfcsum; @@ -2961,6 +2963,41 @@ struct fw_rss_vi_config_cmd { #define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S) #define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U) +enum fw_sched_sc { + FW_SCHED_SC_PARAMS = 1, +}; + +struct fw_sched_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_sched { + struct fw_sched_config { + __u8 sc; + __u8 type; + __u8 minmaxen; + __u8 r3[5]; + __u8 nclasses[4]; + __be32 r4; + } config; + struct fw_sched_params { + __u8 sc; + __u8 type; + __u8 level; + __u8 mode; + __u8 unit; + __u8 rate; + __u8 ch; + __u8 cl; + __be32 min; + __be32 max; + __be16 weight; + __be16 pktsize; + __be16 burstsize; + __be16 r4; + } params; + } u; +}; + struct fw_clip_cmd { __be32 op_to_write; __be32 alloc_to_len16; @@ -3249,4 +3286,127 @@ struct fw_devlog_cmd { #define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) +#define MAX_IMM_OFLD_TX_DATA_WR_LEN (0xff + sizeof(struct fw_ofld_tx_data_wr)) + +struct fw_crypto_lookaside_wr { + __be32 op_to_cctx_size; + __be32 len16_pkd; + __be32 session_id; + __be32 rx_chid_to_rx_q_id; + __be32 key_addr; + __be32 pld_size_hash_size; + __be64 cookie; +}; + +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_S 24 +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_OPCODE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_S 23 +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_M 0x1 +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_COMPL_S) +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_COMPL_S) & \ + FW_CRYPTO_LOOKASIDE_WR_COMPL_M) +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_F FW_CRYPTO_LOOKASIDE_WR_COMPL_V(1U) + +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S 15 +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) & \ + FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M) + +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S 5 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) & \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M) + +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M 0x1f +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_LEN16_S) +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_LEN16_S) & \ + FW_CRYPTO_LOOKASIDE_WR_LEN16_M) + +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S 29 +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) & \ + FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M) + +#define FW_CRYPTO_LOOKASIDE_WR_LCB_S 27 +#define FW_CRYPTO_LOOKASIDE_WR_LCB_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_LCB_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_LCB_S) +#define FW_CRYPTO_LOOKASIDE_WR_LCB_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_LCB_S) & FW_CRYPTO_LOOKASIDE_WR_LCB_M) + +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_S 25 +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_PHASH_S) +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_PHASH_S) & \ + FW_CRYPTO_LOOKASIDE_WR_PHASH_M) + +#define FW_CRYPTO_LOOKASIDE_WR_IV_S 23 +#define FW_CRYPTO_LOOKASIDE_WR_IV_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_IV_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_IV_S) +#define FW_CRYPTO_LOOKASIDE_WR_IV_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_IV_S) & FW_CRYPTO_LOOKASIDE_WR_IV_M) + +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_S 10 +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) & \ + FW_CRYPTO_LOOKASIDE_WR_TX_CH_M) + +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M 0x3ff +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) & \ + FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M) + +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S 24 +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S 17 +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M 0x7f +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M) + #endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index e116bb8d1729..100b2cc064a3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2378,7 +2378,7 @@ static void size_nports_qsets(struct adapter *adapter) */ pmask_nports = hweight32(adapter->params.vfres.pmask); if (pmask_nports < adapter->params.nports) { - dev_warn(adapter->pdev_dev, "only using %d of %d provissioned" + dev_warn(adapter->pdev_dev, "only using %d of %d provisioned" " virtual interfaces; limited by Port Access Rights" " mask %#x\n", pmask_nports, adapter->params.nports, adapter->params.vfres.pmask); @@ -2777,6 +2777,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, struct adapter *adapter; struct port_info *pi; struct net_device *netdev; + unsigned int pf; /* * Print our driver banner the first time we're called to initialize a @@ -2903,8 +2904,11 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Allocate our "adapter ports" and stitch everything together. */ pmask = adapter->params.vfres.pmask; + pf = t4vf_get_pf_from_vf(adapter); for_each_port(adapter, pidx) { int port_id, viid; + u8 mac[ETH_ALEN]; + unsigned int naddr = 1; /* * We simplistically allocate our virtual interfaces @@ -2975,6 +2979,26 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, pidx); goto err_free_dev; } + + err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac); + if (err) { + dev_err(&pdev->dev, + "unable to determine MAC ACL address, " + "continuing anyway.. (status %d)\n", err); + } else if (naddr && adapter->params.vfres.nvi == 1) { + struct sockaddr addr; + + ether_addr_copy(addr.sa_data, mac); + err = cxgb4vf_set_mac_addr(netdev, &addr); + if (err) { + dev_err(&pdev->dev, + "unable to set MAC address %pM\n", + mac); + goto err_free_dev; + } + dev_info(&pdev->dev, + "Using assigned MAC ACL: %pM\n", mac); + } } /* See what interrupts we'll be using. If we've been configured to diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 8ee541431e8b..8067424ad4a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -347,6 +347,7 @@ int t4vf_bar2_sge_qregs(struct adapter *adapter, u64 *pbar2_qoffset, unsigned int *pbar2_qid); +unsigned int t4vf_get_pf_from_vf(struct adapter *); int t4vf_get_sge_params(struct adapter *); int t4vf_get_vpd_params(struct adapter *); int t4vf_get_dev_params(struct adapter *); @@ -381,5 +382,7 @@ int t4vf_eth_eq_free(struct adapter *, unsigned int); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); int t4vf_prep_adapter(struct adapter *); +int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, + unsigned int *naddr, u8 *addr); #endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 427bfa71388b..879f4c52b3d5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -639,6 +639,15 @@ int t4vf_bar2_sge_qregs(struct adapter *adapter, return 0; } +unsigned int t4vf_get_pf_from_vf(struct adapter *adapter) +{ + u32 whoami; + + whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); + return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami)); +} + /** * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters * @adapter: the adapter @@ -716,7 +725,6 @@ int t4vf_get_sge_params(struct adapter *adapter) * read. */ if (!is_t4(adapter->params.chip)) { - u32 whoami; unsigned int pf, s_hps, s_qpp; params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | @@ -740,11 +748,7 @@ int t4vf_get_sge_params(struct adapter *adapter) * register we just read. Do it once here so other code in * the driver can just use it. */ - whoami = t4_read_reg(adapter, - T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); - pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? - SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); - + pf = t4vf_get_pf_from_vf(adapter); s_hps = (HOSTPAGESIZEPF0_S + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); sge_params->sge_vf_hps = @@ -1807,3 +1811,50 @@ int t4vf_prep_adapter(struct adapter *adapter) return 0; } + +/** + * t4vf_get_vf_mac_acl - Get the MAC address to be set to + * the VI of this VF. + * @adapter: The adapter + * @pf: The pf associated with vf + * @naddr: the number of ACL MAC addresses returned in addr + * @addr: Placeholder for MAC addresses + * + * Find the MAC address to be set to the VF's VI. The requested MAC address + * is from the host OS via callback in the PF driver. + */ +int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, + unsigned int *naddr, u8 *addr) +{ + struct fw_acl_mac_cmd cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd); + if (ret) + return ret; + + if (cmd.nmac < *naddr) + *naddr = cmd.nmac; + + switch (pf) { + case 3: + memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3)); + break; + case 2: + memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2)); + break; + case 1: + memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1)); + break; + case 0: + memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0)); + break; + } + + return ret; +} diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index f0e9e2ef62a0..6620fc861c47 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1966,7 +1966,7 @@ SetMulticastFilter(struct net_device *dev) } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(ETH_ALEN, ha->addr); - hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ + hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */ @@ -5043,7 +5043,7 @@ build_setup_frame(struct net_device *dev, int mode) *(pa + i) = dev->dev_addr[i]; /* Host address */ if (i & 0x01) pa += 2; } - *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80; + *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80; } else { for (i=0; i<ETH_ALEN; i++) { /* Host address */ *(pa + (i&1)) = dev->dev_addr[i]; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h index ec756eba397b..1bfdc9b117f6 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.h +++ b/drivers/net/ethernet/dec/tulip/de4x5.h @@ -860,8 +860,8 @@ #define PCI 0 #define EISA 1 -#define HASH_TABLE_LEN 512 /* Bits */ -#define HASH_BITS 0x01ff /* 9 LS bits */ +#define DE4X5_HASH_TABLE_LEN 512 /* Bits */ +#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */ #define SETUP_FRAME_LEN 192 /* Bytes */ #define IMPERF_PA_OFFSET 156 /* Bytes */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4555e041ef69..86780b5c40ef 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -508,6 +508,10 @@ struct be_wrb_params { u16 lso_mss; /* MSS for LSO */ }; +struct be_eth_addr { + unsigned char mac[ETH_ALEN]; +}; + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -523,7 +527,7 @@ struct be_adapter { struct be_dma_mem mbox_mem_alloced; struct be_mcc_obj mcc_obj; - spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ + struct mutex mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; u16 cfg_num_rx_irqs; /* configured via set-channels */ @@ -570,9 +574,15 @@ struct be_adapter { int if_handle; /* Used to configure filtering */ u32 if_flags; /* Interface filtering flags */ u32 *pmac_id; /* MAC addr handle used by BE card */ + struct be_eth_addr *uc_list;/* list of uc-addrs programmed (not perm) */ u32 uc_macs; /* Count of secondary UC MAC programmed */ + struct be_eth_addr *mc_list;/* list of mcast addrs programmed */ + u32 mc_count; unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)]; u16 vlans_added; + bool update_uc_list; + bool update_mc_list; + struct mutex rx_filter_lock;/* For protecting vids[] & mc/uc_list[] */ u32 beacon_state; /* for set_phys_id */ @@ -626,6 +636,15 @@ struct be_adapter { u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */ }; +/* Used for defered FW config cmds. Add fields to this struct as reqd */ +struct be_cmd_work { + struct work_struct work; + struct be_adapter *adapter; + union { + __be16 vxlan_port; + } info; +}; + #define be_physfn(adapter) (!adapter->virtfn) #define be_virtfn(adapter) (adapter->virtfn) #define sriov_enabled(adapter) (adapter->flags & \ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 2cc11756859f..fa11a5a8c354 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -571,7 +571,7 @@ int be_process_mcc(struct be_adapter *adapter) /* Wait till no more pending mcc requests are present */ static int be_mcc_wait_compl(struct be_adapter *adapter) { -#define mcc_timeout 120000 /* 12s timeout */ +#define mcc_timeout 12000 /* 12s timeout */ int i, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; @@ -585,7 +585,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (atomic_read(&mcc_obj->q.used) == 0) break; - udelay(100); + usleep_range(500, 1000); } if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "FW not responding\n"); @@ -863,7 +863,7 @@ static bool use_mcc(struct be_adapter *adapter) static int be_cmd_lock(struct be_adapter *adapter) { if (use_mcc(adapter)) { - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); return 0; } else { return mutex_lock_interruptible(&adapter->mbox_lock); @@ -874,7 +874,7 @@ static int be_cmd_lock(struct be_adapter *adapter) static void be_cmd_unlock(struct be_adapter *adapter) { if (use_mcc(adapter)) - spin_unlock_bh(&adapter->mcc_lock); + return mutex_unlock(&adapter->mcc_lock); else return mutex_unlock(&adapter->mbox_lock); } @@ -1044,7 +1044,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, struct be_cmd_req_mac_query *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1073,7 +1073,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1085,7 +1085,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, struct be_cmd_req_pmac_add *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1110,7 +1110,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; @@ -1128,7 +1128,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) if (pmac_id == -1) return 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1148,7 +1148,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1411,7 +1411,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, struct be_dma_mem *q_mem = &rxq->dma_mem; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1441,7 +1441,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1505,7 +1505,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) struct be_cmd_req_q_destroy *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1522,7 +1522,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) q->created = false; err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1590,7 +1590,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) struct be_cmd_req_hdr *hdr; int status = 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1618,7 +1618,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) adapter->stats_cmd_sent = true; err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1634,7 +1634,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, CMD_SUBSYSTEM_ETH)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1657,7 +1657,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, adapter->stats_cmd_sent = true; err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1694,7 +1694,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, struct be_cmd_req_link_status *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); if (link_status) *link_status = LINK_DOWN; @@ -1733,7 +1733,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1744,7 +1744,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) struct be_cmd_req_get_cntl_addnl_attribs *req; int status = 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1759,7 +1759,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) status = be_mcc_notify(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1808,7 +1808,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) if (!get_fat_cmd.va) return -ENOMEM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); while (total_size) { buf_size = min(total_size, (u32)60*1024); @@ -1848,7 +1848,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) err: dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1859,7 +1859,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) struct be_cmd_req_get_fw_version *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1882,7 +1882,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) sizeof(adapter->fw_on_flash)); } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1896,7 +1896,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, struct be_cmd_req_modify_eq_delay *req; int status = 0, i; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1919,7 +1919,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, status = be_mcc_notify(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1946,7 +1946,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, struct be_cmd_req_vlan_config *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1968,7 +1968,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1979,7 +1979,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) struct be_cmd_req_rx_filter *req = mem->va; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1996,8 +1996,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) req->if_flags = (value == ON) ? req->if_flags_mask : 0; if (flags & BE_IF_FLAGS_MULTICAST) { - struct netdev_hw_addr *ha; - int i = 0; + int i; /* Reset mcast promisc mode if already set by setting mask * and not setting flags field @@ -2005,14 +2004,15 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) req->if_flags_mask |= cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & be_if_cap_flags(adapter)); - req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); - netdev_for_each_mc_addr(ha, adapter->netdev) - memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); + req->mcast_num = cpu_to_le32(adapter->mc_count); + for (i = 0; i < adapter->mc_count; i++) + ether_addr_copy(req->mcast_mac[i].byte, + adapter->mc_list[i].mac); } status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2043,7 +2043,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2063,7 +2063,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) return -EOPNOTSUPP; @@ -2082,7 +2082,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2105,7 +2105,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2186,7 +2186,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) return 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2211,7 +2211,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2223,7 +2223,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, struct be_cmd_req_enable_disable_beacon *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2244,7 +2244,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2255,7 +2255,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) struct be_cmd_req_get_beacon_state *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2279,7 +2279,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2303,7 +2303,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -ENOMEM; } - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2325,7 +2325,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, memcpy(data, resp->page_data, PAGE_DATA_LEN); } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2342,7 +2342,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, void *ctxt = NULL; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2384,7 +2384,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, if (status) goto err_unlock; - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(60000))) @@ -2403,7 +2403,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, return status; err_unlock: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2457,7 +2457,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, struct be_mcc_wrb *wrb; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2475,7 +2475,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2488,7 +2488,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, struct lancer_cmd_resp_read_object *resp; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2522,7 +2522,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, } err_unlock: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2534,7 +2534,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_cmd_write_flashrom *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2559,7 +2559,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, if (status) goto err_unlock; - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(40000))) @@ -2570,7 +2570,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, return status; err_unlock: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2581,7 +2581,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, struct be_mcc_wrb *wrb; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2608,7 +2608,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, memcpy(flashed_crc, req->crc, 4); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3192,7 +3192,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_cmd_req_acpi_wol_magic_config *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3209,7 +3209,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3224,7 +3224,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3247,7 +3247,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, if (status) goto err_unlock; - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) @@ -3256,7 +3256,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, return status; err_unlock: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3273,7 +3273,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3299,7 +3299,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, if (status) goto err; - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); wait_for_completion(&adapter->et_cmd_compl); resp = embedded_payload(wrb); @@ -3307,7 +3307,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, return status; err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3323,7 +3323,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3357,7 +3357,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3368,7 +3368,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_cmd_req_seeprom_read *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3384,7 +3384,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3399,7 +3399,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) CMD_SUBSYSTEM_COMMON)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3444,7 +3444,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) } dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3454,7 +3454,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) struct be_cmd_req_set_qos *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3474,7 +3474,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3581,7 +3581,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, struct be_cmd_req_get_fn_privileges *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3613,7 +3613,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3625,7 +3625,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, struct be_cmd_req_set_fn_privileges *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3645,7 +3645,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3677,7 +3677,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, return -ENOMEM; } - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3741,7 +3741,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, } out: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; @@ -3801,7 +3801,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, if (!cmd.va) return -ENOMEM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3823,7 +3823,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, err: dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3859,7 +3859,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, CMD_SUBSYSTEM_COMMON)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3900,7 +3900,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3914,7 +3914,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, int status; u16 vid; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3961,7 +3961,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4156,7 +4156,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, struct be_cmd_req_set_ext_fat_caps *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4172,7 +4172,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4650,7 +4650,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) if (iface == 0xFFFFFFFF) return -1; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4667,7 +4667,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4701,7 +4701,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, struct be_cmd_resp_get_iface_list *resp; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4722,7 +4722,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4816,7 +4816,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) if (BEx_chip(adapter)) return 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4834,7 +4834,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) req->enable = 1; status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4905,7 +4905,7 @@ int __be_cmd_set_logical_link_config(struct be_adapter *adapter, struct be_cmd_req_set_ll_link *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4931,7 +4931,7 @@ int __be_cmd_set_logical_link_config(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4964,7 +4964,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, struct be_cmd_resp_hdr *resp; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4987,7 +4987,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } EXPORT_SYMBOL(be_roce_mcc_cmd); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 874c7539a79d..f7584d4139ff 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -53,6 +53,10 @@ static const struct pci_device_id be_dev_ids[] = { { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); + +/* Workqueue used by all functions for defering cmd calls to the adapter */ +struct workqueue_struct *be_wq; + /* UE Status Low CSR */ static const char * const ue_status_low_desc[] = { "CEV", @@ -1420,13 +1424,18 @@ static int be_vid_config(struct be_adapter *adapter) u16 num = 0, i = 0; int status = 0; - /* No need to further configure vids if in promiscuous mode */ - if (be_in_all_promisc(adapter)) + /* No need to change the VLAN state if the I/F is in promiscuous */ + if (adapter->netdev->flags & IFF_PROMISC) return 0; if (adapter->vlans_added > be_max_vlans(adapter)) return be_set_vlan_promisc(adapter); + if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + status = be_clear_vlan_promisc(adapter); + if (status) + return status; + } /* Construct VLAN Table to give to HW */ for_each_set_bit(i, adapter->vids, VLAN_N_VID) vids[num++] = cpu_to_le16(i); @@ -1439,8 +1448,6 @@ static int be_vid_config(struct be_adapter *adapter) addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) return be_set_vlan_promisc(adapter); - } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { - status = be_clear_vlan_promisc(adapter); } return status; } @@ -1450,46 +1457,45 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) struct be_adapter *adapter = netdev_priv(netdev); int status = 0; + mutex_lock(&adapter->rx_filter_lock); + /* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) - return status; + goto done; if (test_bit(vid, adapter->vids)) - return status; + goto done; set_bit(vid, adapter->vids); adapter->vlans_added++; status = be_vid_config(adapter); - if (status) { - adapter->vlans_added--; - clear_bit(vid, adapter->vids); - } - +done: + mutex_unlock(&adapter->rx_filter_lock); return status; } static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; + + mutex_lock(&adapter->rx_filter_lock); /* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) - return 0; + goto done; if (!test_bit(vid, adapter->vids)) - return 0; + goto done; clear_bit(vid, adapter->vids); adapter->vlans_added--; - return be_vid_config(adapter); -} - -static void be_clear_all_promisc(struct be_adapter *adapter) -{ - be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF); - adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS; + status = be_vid_config(adapter); +done: + mutex_unlock(&adapter->rx_filter_lock); + return status; } static void be_set_all_promisc(struct be_adapter *adapter) @@ -1510,75 +1516,207 @@ static void be_set_mc_promisc(struct be_adapter *adapter) adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS; } -static void be_set_mc_list(struct be_adapter *adapter) +static void be_set_uc_promisc(struct be_adapter *adapter) { int status; - status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON); + if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) + return; + + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON); if (!status) - adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS; - else + adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS; +} + +static void be_clear_uc_promisc(struct be_adapter *adapter) +{ + int status; + + if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)) + return; + + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF); + if (!status) + adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS; +} + +/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync(). + * We use a single callback function for both sync and unsync. We really don't + * add/remove addresses through this callback. But, we use it to detect changes + * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode(). + */ +static int be_uc_list_update(struct net_device *netdev, + const unsigned char *addr) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + adapter->update_uc_list = true; + return 0; +} + +static int be_mc_list_update(struct net_device *netdev, + const unsigned char *addr) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + adapter->update_mc_list = true; + return 0; +} + +static void be_set_mc_list(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct netdev_hw_addr *ha; + bool mc_promisc = false; + int status; + + netif_addr_lock_bh(netdev); + __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update); + + if (netdev->flags & IFF_PROMISC) { + adapter->update_mc_list = false; + } else if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > be_max_mc(adapter)) { + /* Enable multicast promisc if num configured exceeds + * what we support + */ + mc_promisc = true; + adapter->update_mc_list = false; + } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) { + /* Update mc-list unconditionally if the iface was previously + * in mc-promisc mode and now is out of that mode. + */ + adapter->update_mc_list = true; + } + + if (adapter->update_mc_list) { + int i = 0; + + /* cache the mc-list in adapter */ + netdev_for_each_mc_addr(ha, netdev) { + ether_addr_copy(adapter->mc_list[i].mac, ha->addr); + i++; + } + adapter->mc_count = netdev_mc_count(netdev); + } + netif_addr_unlock_bh(netdev); + + if (mc_promisc) { be_set_mc_promisc(adapter); + } else if (adapter->update_mc_list) { + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON); + if (!status) + adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS; + else + be_set_mc_promisc(adapter); + + adapter->update_mc_list = false; + } +} + +static void be_clear_mc_list(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + __dev_mc_unsync(netdev, NULL); + be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF); + adapter->mc_count = 0; } static void be_set_uc_list(struct be_adapter *adapter) { + struct net_device *netdev = adapter->netdev; struct netdev_hw_addr *ha; - int i = 1; /* First slot is claimed by the Primary MAC */ + bool uc_promisc = false; + int curr_uc_macs = 0, i; - for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); + netif_addr_lock_bh(netdev); + __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update); - if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) { - be_set_all_promisc(adapter); - return; + if (netdev->flags & IFF_PROMISC) { + adapter->update_uc_list = false; + } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) { + uc_promisc = true; + adapter->update_uc_list = false; + } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) { + /* Update uc-list unconditionally if the iface was previously + * in uc-promisc mode and now is out of that mode. + */ + adapter->update_uc_list = true; } - netdev_for_each_uc_addr(ha, adapter->netdev) { - adapter->uc_macs++; /* First slot is for Primary MAC */ - be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle, - &adapter->pmac_id[adapter->uc_macs], 0); + if (adapter->update_uc_list) { + i = 1; /* First slot is claimed by the Primary MAC */ + + /* cache the uc-list in adapter array */ + netdev_for_each_uc_addr(ha, netdev) { + ether_addr_copy(adapter->uc_list[i].mac, ha->addr); + i++; + } + curr_uc_macs = netdev_uc_count(netdev); + } + netif_addr_unlock_bh(netdev); + + if (uc_promisc) { + be_set_uc_promisc(adapter); + } else if (adapter->update_uc_list) { + be_clear_uc_promisc(adapter); + + for (i = 0; i < adapter->uc_macs; i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i + 1], 0); + + for (i = 0; i < curr_uc_macs; i++) + be_cmd_pmac_add(adapter, adapter->uc_list[i].mac, + adapter->if_handle, + &adapter->pmac_id[i + 1], 0); + adapter->uc_macs = curr_uc_macs; + adapter->update_uc_list = false; } } static void be_clear_uc_list(struct be_adapter *adapter) { + struct net_device *netdev = adapter->netdev; int i; - for (i = 1; i < (adapter->uc_macs + 1); i++) + __dev_uc_unsync(netdev, NULL); + for (i = 0; i < adapter->uc_macs; i++) be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); + adapter->pmac_id[i + 1], 0); adapter->uc_macs = 0; } -static void be_set_rx_mode(struct net_device *netdev) +static void __be_set_rx_mode(struct be_adapter *adapter) { - struct be_adapter *adapter = netdev_priv(netdev); + struct net_device *netdev = adapter->netdev; + + mutex_lock(&adapter->rx_filter_lock); if (netdev->flags & IFF_PROMISC) { - be_set_all_promisc(adapter); - return; + if (!be_in_all_promisc(adapter)) + be_set_all_promisc(adapter); + } else if (be_in_all_promisc(adapter)) { + /* We need to re-program the vlan-list or clear + * vlan-promisc mode (if needed) when the interface + * comes out of promisc mode. + */ + be_vid_config(adapter); } - /* Interface was previously in promiscuous mode; disable it */ - if (be_in_all_promisc(adapter)) { - be_clear_all_promisc(adapter); - if (adapter->vlans_added) - be_vid_config(adapter); - } + be_set_uc_list(adapter); + be_set_mc_list(adapter); - /* Enable multicast promisc if num configured exceeds what we support */ - if (netdev->flags & IFF_ALLMULTI || - netdev_mc_count(netdev) > be_max_mc(adapter)) { - be_set_mc_promisc(adapter); - return; - } + mutex_unlock(&adapter->rx_filter_lock); +} - if (netdev_uc_count(netdev) != adapter->uc_macs) - be_set_uc_list(adapter); +static void be_work_set_rx_mode(struct work_struct *work) +{ + struct be_cmd_work *cmd_work = + container_of(work, struct be_cmd_work, work); - be_set_mc_list(adapter); + __be_set_rx_mode(cmd_work->adapter); + kfree(cmd_work); } static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) @@ -3429,6 +3567,7 @@ static void be_disable_if_filters(struct be_adapter *adapter) adapter->pmac_id[0], 0); be_clear_uc_list(adapter); + be_clear_mc_list(adapter); /* The IFACE flags are enabled in the open path and cleared * in the close path. When a VF gets detached from the host and @@ -3462,6 +3601,11 @@ static int be_close(struct net_device *netdev) if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) return 0; + /* Before attempting cleanup ensure all the pending cmds in the + * config_wq have finished execution + */ + flush_workqueue(be_wq); + be_disable_if_filters(adapter); if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { @@ -3586,7 +3730,7 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (adapter->vlans_added) be_vid_config(adapter); - be_set_rx_mode(adapter->netdev); + __be_set_rx_mode(adapter); return 0; } @@ -3860,6 +4004,20 @@ static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1); } +static void be_if_destroy(struct be_adapter *adapter) +{ + be_cmd_if_destroy(adapter, adapter->if_handle, 0); + + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; + + kfree(adapter->mc_list); + adapter->mc_list = NULL; + + kfree(adapter->uc_list); + adapter->uc_list = NULL; +} + static int be_clear(struct be_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; @@ -3867,6 +4025,8 @@ static int be_clear(struct be_adapter *adapter) be_cancel_worker(adapter); + flush_workqueue(be_wq); + if (sriov_enabled(adapter)) be_vf_clear(adapter); @@ -3884,10 +4044,8 @@ static int be_clear(struct be_adapter *adapter) } be_disable_vxlan_offloads(adapter); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - be_cmd_if_destroy(adapter, adapter->if_handle, 0); + be_if_destroy(adapter); be_clear_queues(adapter); @@ -4341,7 +4499,7 @@ static int be_mac_setup(struct be_adapter *adapter) static void be_schedule_worker(struct be_adapter *adapter) { - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); + queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000)); adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } @@ -4393,6 +4551,22 @@ static int be_if_create(struct be_adapter *adapter) u32 cap_flags = be_if_cap_flags(adapter); int status; + /* alloc required memory for other filtering fields */ + adapter->pmac_id = kcalloc(be_max_uc(adapter), + sizeof(*adapter->pmac_id), GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; + + adapter->mc_list = kcalloc(be_max_mc(adapter), + sizeof(*adapter->mc_list), GFP_KERNEL); + if (!adapter->mc_list) + return -ENOMEM; + + adapter->uc_list = kcalloc(be_max_uc(adapter), + sizeof(*adapter->uc_list), GFP_KERNEL); + if (!adapter->uc_list) + return -ENOMEM; + if (adapter->cfg_num_rx_irqs == 1) cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); @@ -4401,7 +4575,10 @@ static int be_if_create(struct be_adapter *adapter) status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, &adapter->if_handle, 0); - return status; + if (status) + return status; + + return 0; } int be_update_queues(struct be_adapter *adapter) @@ -4530,11 +4707,6 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - adapter->pmac_id = kcalloc(be_max_uc(adapter), - sizeof(*adapter->pmac_id), GFP_KERNEL); - if (!adapter->pmac_id) - return -ENOMEM; - status = be_msix_enable(adapter); if (status) goto err; @@ -4728,6 +4900,23 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 0, 0, nlflags, filter_mask, NULL); } +static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter, + void (*func)(struct work_struct *)) +{ + struct be_cmd_work *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + dev_err(&adapter->pdev->dev, + "be_work memory allocation failed\n"); + return NULL; + } + + INIT_WORK(&work->work, func); + work->adapter = adapter; + return work; +} + /* VxLAN offload Notes: * * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't @@ -4742,23 +4931,19 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, * adds more than one port, disable offloads and don't re-enable them again * until after all the tunnels are removed. */ -static void be_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static void be_work_add_vxlan_port(struct work_struct *work) { - struct be_adapter *adapter = netdev_priv(netdev); + struct be_cmd_work *cmd_work = + container_of(work, struct be_cmd_work, work); + struct be_adapter *adapter = cmd_work->adapter; + struct net_device *netdev = adapter->netdev; struct device *dev = &adapter->pdev->dev; - __be16 port = ti->port; + __be16 port = cmd_work->info.vxlan_port; int status; - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) - return; - if (adapter->vxlan_port == port && adapter->vxlan_port_count) { adapter->vxlan_port_aliases++; - return; + goto done; } if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { @@ -4770,7 +4955,7 @@ static void be_add_vxlan_port(struct net_device *netdev, } if (adapter->vxlan_port_count++ >= 1) - return; + goto done; status = be_cmd_manage_iface(adapter, adapter->if_handle, OP_CONVERT_NORMAL_TO_TUNNEL); @@ -4795,29 +4980,26 @@ static void be_add_vxlan_port(struct net_device *netdev, dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); - return; + goto done; err: be_disable_vxlan_offloads(adapter); +done: + kfree(cmd_work); } -static void be_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static void be_work_del_vxlan_port(struct work_struct *work) { - struct be_adapter *adapter = netdev_priv(netdev); - __be16 port = ti->port; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) - return; + struct be_cmd_work *cmd_work = + container_of(work, struct be_cmd_work, work); + struct be_adapter *adapter = cmd_work->adapter; + __be16 port = cmd_work->info.vxlan_port; if (adapter->vxlan_port != port) goto done; if (adapter->vxlan_port_aliases) { adapter->vxlan_port_aliases--; - return; + goto out; } be_disable_vxlan_offloads(adapter); @@ -4827,6 +5009,40 @@ static void be_del_vxlan_port(struct net_device *netdev, be16_to_cpu(port)); done: adapter->vxlan_port_count--; +out: + kfree(cmd_work); +} + +static void be_cfg_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti, + void (*func)(struct work_struct *)) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_cmd_work *cmd_work; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) + return; + + cmd_work = be_alloc_work(adapter, func); + if (cmd_work) { + cmd_work->info.vxlan_port = ti->port; + queue_work(be_wq, &cmd_work->work); + } +} + +static void be_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port); +} + +static void be_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port); } static netdev_features_t be_features_check(struct sk_buff *skb, @@ -4891,6 +5107,16 @@ static int be_get_phys_port_id(struct net_device *dev, return 0; } +static void be_set_rx_mode(struct net_device *dev) +{ + struct be_adapter *adapter = netdev_priv(dev); + struct be_cmd_work *work; + + work = be_alloc_work(adapter, be_work_set_rx_mode); + if (work) + queue_work(be_wq, &work->work); +} + static const struct net_device_ops be_netdev_ops = { .ndo_open = be_open, .ndo_stop = be_close, @@ -5116,7 +5342,7 @@ static void be_worker(struct work_struct *work) reschedule: adapter->work_counter++; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); + queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000)); } static void be_unmap_pci_bars(struct be_adapter *adapter) @@ -5256,7 +5482,8 @@ static int be_drv_init(struct be_adapter *adapter) } mutex_init(&adapter->mbox_lock); - spin_lock_init(&adapter->mcc_lock); + mutex_init(&adapter->mcc_lock); + mutex_init(&adapter->rx_filter_lock); spin_lock_init(&adapter->mcc_cq_lock); init_completion(&adapter->et_cmd_compl); @@ -5712,6 +5939,12 @@ static int __init be_init_module(void) pr_info(DRV_NAME " : Use sysfs method to enable VFs\n"); } + be_wq = create_singlethread_workqueue("be_wq"); + if (!be_wq) { + pr_warn(DRV_NAME "workqueue creation failed\n"); + return -1; + } + return pci_register_driver(&be_driver); } module_init(be_init_module); @@ -5719,5 +5952,8 @@ module_init(be_init_module); static void __exit be_exit_module(void) { pci_unregister_driver(&be_driver); + + if (be_wq) + destroy_workqueue(be_wq); } module_exit(be_exit_module); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 01f7e811739b..fb5c63881340 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2887,7 +2887,7 @@ fec_enet_close(struct net_device *ndev) * this kind of feature?). */ -#define HASH_BITS 6 /* #bits in hash */ +#define FEC_HASH_BITS 6 /* #bits in hash */ #define CRC32_POLY 0xEDB88320 static void set_multicast_list(struct net_device *ndev) @@ -2935,10 +2935,10 @@ static void set_multicast_list(struct net_device *ndev) } } - /* only upper 6 bits (HASH_BITS) are used + /* only upper 6 bits (FEC_HASH_BITS) are used * which point to specific bit in he hash registers */ - hash = (crc >> (32 - HASH_BITS)) & 0x3f; + hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; if (hash > 31) { tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 5bf1ade28315..186ef8f16c80 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3756,7 +3756,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) return -EINVAL; } if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { - pr_err("invalid rx-clock propperty\n"); + pr_err("invalid rx-clock property\n"); return -EINVAL; } ug_info->uf_info.rx_clock = *prop; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 7b8fe866f603..e03b30c60dcf 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -271,11 +271,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev) goto err_ioremap; } - if (of_get_property(pdev->dev.of_node, - "little-endian", NULL)) - priv->is_little_endian = true; - else - priv->is_little_endian = false; + priv->is_little_endian = of_property_read_bool(pdev->dev.of_node, + "little-endian"); ret = of_mdiobus_register(bus, np); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 5c8afe1a5ccb..a834774fdb02 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -684,8 +684,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb, if (!phy || IS_ERR(phy)) return -EIO; - if (mdio->irq) - phy->irq = mdio->irq[addr]; + phy->irq = mdio->irq[addr]; /* All data is now stored in the phy struct; * register it diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index afb5daa3721d..eb448dff7564 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/vmalloc.h> @@ -115,10 +116,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsaf_dev->sc_base)) { - dev_err(dsaf_dev->dev, "subctrl can not map!\n"); + if (IS_ERR(dsaf_dev->sc_base)) return PTR_ERR(dsaf_dev->sc_base); - } res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); @@ -129,10 +128,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsaf_dev->sds_base)) { - dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); + if (IS_ERR(dsaf_dev->sds_base)) return PTR_ERR(dsaf_dev->sds_base); - } } else { dsaf_dev->sub_ctrl = syscon; } @@ -147,10 +144,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } } dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsaf_dev->ppe_base)) { - dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n"); + if (IS_ERR(dsaf_dev->ppe_base)) return PTR_ERR(dsaf_dev->ppe_base); - } dsaf_dev->ppe_paddr = res->start; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { @@ -166,10 +161,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } } dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsaf_dev->io_base)) { - dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n"); + if (IS_ERR(dsaf_dev->io_base)) return PTR_ERR(dsaf_dev->io_base); - } } ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num); @@ -2781,6 +2774,89 @@ static struct platform_driver g_dsaf_driver = { module_platform_driver(g_dsaf_driver); +/** + * hns_dsaf_roce_reset - reset dsaf and roce + * @dsaf_fwnode: Pointer to framework node for the dasf + * @enable: false - request reset , true - drop reset + * retuen 0 - success , negative -fail + */ +int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable) +{ + struct dsaf_device *dsaf_dev; + struct platform_device *pdev; + u32 mp; + u32 sl; + u32 credit; + int i; + const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { + {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, + }; + const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, + {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, + {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, + }; + + if (!is_of_node(dsaf_fwnode)) { + pr_err("hisi_dsaf: Only support DT node!\n"); + return -EINVAL; + } + pdev = of_find_device_by_node(to_of_node(dsaf_fwnode)); + dsaf_dev = dev_get_drvdata(&pdev->dev); + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", + dsaf_dev->ae_dev.name); + return -ENODEV; + } + + if (!enable) { + /* Reset rocee-channels in dsaf and rocee */ + hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, false); + hns_dsaf_roce_srst(dsaf_dev, false); + } else { + /* Configure dsaf tx roce correspond to port map and sl map */ + mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG); + for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) + dsaf_set_field(mp, 7 << i * 3, i * 3, + port_map[i][DSAF_ROCE_6PORT_MODE]); + dsaf_set_field(mp, 3 << i * 3, i * 3, 0); + dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp); + + sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG); + for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) + dsaf_set_field(sl, 3 << i * 2, i * 2, + sl_map[i][DSAF_ROCE_6PORT_MODE]); + dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl); + + /* De-reset rocee-channels in dsaf and rocee */ + hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, true); + msleep(SRST_TIME_INTERVAL); + hns_dsaf_roce_srst(dsaf_dev, true); + + /* Eanble dsaf channel rocee credit */ + credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG); + dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0); + dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); + + dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); + dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); + } + return 0; +} +EXPORT_SYMBOL(hns_dsaf_roce_reset); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("HNS DSAF driver"); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 1daf018d9071..f3681d566ae6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -43,6 +43,32 @@ struct hns_mac_cb; #define DSAF_PRIO_NR 8 #define DSAF_REG_PER_ZONE 3 +#define DSAF_ROCE_CREDIT_CHN 8 +#define DSAF_ROCE_CHAN_MODE 3 + +enum dsaf_roce_port_mode { + DSAF_ROCE_6PORT_MODE, + DSAF_ROCE_4PORT_MODE, + DSAF_ROCE_2PORT_MODE, + DSAF_ROCE_CHAN_MODE_NUM, +}; + +enum dsaf_roce_port_num { + DSAF_ROCE_PORT_0, + DSAF_ROCE_PORT_1, + DSAF_ROCE_PORT_2, + DSAF_ROCE_PORT_3, + DSAF_ROCE_PORT_4, + DSAF_ROCE_PORT_5, +}; + +enum dsaf_roce_qos_sl { + DSAF_ROCE_SL_0, + DSAF_ROCE_SL_1, + DSAF_ROCE_SL_2, + DSAF_ROCE_SL_3, +}; + #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) @@ -419,6 +445,10 @@ int hns_dsaf_get_mac_entry_by_index( void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb); +void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable); + +void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable); + int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev); void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 611b67b6f450..36b9f791cf2f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -231,6 +231,42 @@ static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } +/** + * hns_dsaf_srst_chns - reset dsaf channels + * @dsaf_dev: dsaf device struct pointer + * @msk: xbar channels mask value: + * bit0-5 for xge0-5 + * bit6-11 for ppe0-5 + * bit12-17 for roce0-5 + * bit18-19 for com/dfx + * @enable: false - request reset , true - drop reset + */ +void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable) +{ + u32 reg_addr; + + if (!enable) + reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG; + else + reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG; + + dsaf_write_sub(dsaf_dev, reg_addr, msk); +} + +void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable) +{ + if (!enable) { + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1); + } else { + dsaf_write_sub(dsaf_dev, + DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1); + dsaf_write_sub(dsaf_dev, + DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1); + msleep(20); + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1); + } +} + static void hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 235f74444b1d..13c16ab7be48 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -77,6 +77,12 @@ #define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C #define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 #define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C +#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8 +#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50 +#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC +#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C +#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54 +#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328 #define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 #define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 #define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 @@ -133,6 +139,8 @@ #define DSAF_ROCEE_INT_STS_0_REG 0x200 #define DSAFV2_SERDES_LBK_0_REG 0x220 #define DSAF_PAUSE_CFG_REG 0x240 +#define DSAF_ROCE_PORT_MAP_REG 0x2A0 +#define DSAF_ROCE_SL_MAP_REG 0x2A4 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -178,6 +186,7 @@ #define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C #define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C #define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C +#define DSAF_SBM_ROCEE_CFG_REG_REG 0x2380 #define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C #define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 #define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 @@ -796,6 +805,9 @@ #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) +#define DSAF_CHNS_MASK 0x3f000 +#define DSAF_SBM_ROCEE_CFG_CRD_EN_B 2 +#define SRST_TIME_INTERVAL 20 #define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0 #define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0) #define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 88f3c85fb04a..62454d7a062a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -203,7 +203,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct device *dev = &adapter->vdev->dev; dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - send_request_unmap(adapter, ltb->map_id); + if (!adapter->failover) + send_request_unmap(adapter, ltb->map_id); } static int alloc_rx_pool(struct ibmvnic_adapter *adapter, @@ -522,7 +523,8 @@ static int ibmvnic_close(struct net_device *netdev) for (i = 0; i < adapter->req_rx_queues; i++) napi_disable(&adapter->napi[i]); - netif_tx_stop_all_queues(netdev); + if (!adapter->failover) + netif_tx_stop_all_queues(netdev); if (adapter->bounce_buffer) { if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { @@ -2777,12 +2779,6 @@ static void handle_control_ras_rsp(union ibmvnic_crq *crq, } } -static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, loff_t *ppos) { @@ -2834,7 +2830,7 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, static const struct file_operations trace_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_read, }; @@ -2884,7 +2880,7 @@ static ssize_t paused_write(struct file *file, const char __user *user_buf, static const struct file_operations paused_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = paused_read, .write = paused_write, }; @@ -2932,7 +2928,7 @@ static ssize_t tracing_write(struct file *file, const char __user *user_buf, static const struct file_operations tracing_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = tracing_read, .write = tracing_write, }; @@ -2985,7 +2981,7 @@ static ssize_t error_level_write(struct file *file, const char __user *user_buf, static const struct file_operations error_level_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = error_level_read, .write = error_level_write, }; @@ -3036,7 +3032,7 @@ static ssize_t trace_level_write(struct file *file, const char __user *user_buf, static const struct file_operations trace_level_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_level_read, .write = trace_level_write, }; @@ -3089,7 +3085,7 @@ static ssize_t trace_buff_size_write(struct file *file, static const struct file_operations trace_size_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_buff_size_read, .write = trace_buff_size_write, }; @@ -3280,6 +3276,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, rc = ibmvnic_send_crq_init(adapter); if (rc) dev_err(dev, "Error sending init rc=%ld\n", rc); + } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { + dev_info(dev, "Backing device failover detected\n"); + netif_carrier_off(netdev); + adapter->failover = true; } else { /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", @@ -3615,8 +3615,18 @@ static void handle_crq_init_rsp(struct work_struct *work) struct device *dev = &adapter->vdev->dev; struct net_device *netdev = adapter->netdev; unsigned long timeout = msecs_to_jiffies(30000); + bool restart = false; int rc; + if (adapter->failover) { + release_sub_crqs(adapter); + if (netif_running(netdev)) { + netif_tx_disable(netdev); + ibmvnic_close(netdev); + restart = true; + } + } + send_version_xchg(adapter); reinit_completion(&adapter->init_done); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -3645,6 +3655,17 @@ static void handle_crq_init_rsp(struct work_struct *work) netdev->real_num_tx_queues = adapter->req_tx_queues; + if (adapter->failover) { + adapter->failover = false; + if (restart) { + rc = ibmvnic_open(netdev); + if (rc) + goto restart_failed; + } + netif_carrier_on(netdev); + return; + } + rc = register_netdev(netdev); if (rc) { dev_err(dev, @@ -3655,6 +3676,8 @@ static void handle_crq_init_rsp(struct work_struct *work) return; +restart_failed: + dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc); register_failed: release_sub_crqs(adapter); task_failed: @@ -3692,6 +3715,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) dev_set_drvdata(&dev->dev, netdev); adapter->vdev = dev; adapter->netdev = netdev; + adapter->failover = false; ether_addr_copy(adapter->mac_addr, mac_addr_p); ether_addr_copy(netdev->dev_addr, adapter->mac_addr); @@ -3721,6 +3745,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (dma_mapping_error(&dev->dev, adapter->stats_token)) { if (!firmware_has_feature(FW_FEATURE_CMO)) dev_err(&dev->dev, "Couldn't map stats buffer\n"); + rc = -ENOMEM; goto free_crq; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index e82898fd518e..bfc84c7d0e11 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -830,6 +830,7 @@ enum ibmvfc_crq_format { IBMVNIC_CRQ_INIT = 0x01, IBMVNIC_CRQ_INIT_COMPLETE = 0x02, IBMVNIC_PARTITION_MIGRATED = 0x06, + IBMVNIC_DEVICE_FAILOVER = 0x08, }; struct ibmvnic_crq_queue { @@ -1047,4 +1048,5 @@ struct ibmvnic_adapter { u8 map_id; struct work_struct vnic_crq_init; + bool failover; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2a882916b4f6..19103a6a7dcc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -65,76 +65,72 @@ #include "i40e_dcb.h" /* Useful i40e defaults */ -#define I40E_MAX_VEB 16 - -#define I40E_MAX_NUM_DESCRIPTORS 4096 -#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) -#define I40E_DEFAULT_NUM_DESCRIPTORS 512 -#define I40E_REQ_DESCRIPTOR_MULTIPLE 32 -#define I40E_MIN_NUM_DESCRIPTORS 64 -#define I40E_MIN_MSIX 2 -#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ -#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ +#define I40E_MAX_VEB 16 + +#define I40E_MAX_NUM_DESCRIPTORS 4096 +#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) +#define I40E_DEFAULT_NUM_DESCRIPTORS 512 +#define I40E_REQ_DESCRIPTOR_MULTIPLE 32 +#define I40E_MIN_NUM_DESCRIPTORS 64 +#define I40E_MIN_MSIX 2 +#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ +#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ /* max 16 qps */ #define i40e_default_queues_per_vmdq(pf) \ (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1) -#define I40E_DEFAULT_QUEUES_PER_VF 4 -#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ +#define I40E_DEFAULT_QUEUES_PER_VF 4 +#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define i40e_pf_get_max_q_per_tc(pf) \ (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) -#define I40E_FDIR_RING 0 -#define I40E_FDIR_RING_COUNT 32 +#define I40E_FDIR_RING 0 +#define I40E_FDIR_RING_COUNT 32 #ifdef I40E_FCOE -#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */ -#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ +#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */ +#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ #endif /* I40E_FCOE */ -#define I40E_MAX_AQ_BUF_SIZE 4096 -#define I40E_AQ_LEN 256 -#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ -#define I40E_MAX_USER_PRIORITY 8 -#define I40E_DEFAULT_MSG_ENABLE 4 -#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 -#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) +#define I40E_MAX_AQ_BUF_SIZE 4096 +#define I40E_AQ_LEN 256 +#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DEFAULT_MSG_ENABLE 4 +#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 +#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) /* Ethtool Private Flags */ -#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0) -#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) +#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0) +#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4) #define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5) -#define I40E_NVM_VERSION_LO_SHIFT 0 -#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) -#define I40E_NVM_VERSION_HI_SHIFT 12 -#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) -#define I40E_OEM_VER_BUILD_MASK 0xffff -#define I40E_OEM_VER_PATCH_MASK 0xff -#define I40E_OEM_VER_BUILD_SHIFT 8 -#define I40E_OEM_VER_SHIFT 24 +#define I40E_NVM_VERSION_LO_SHIFT 0 +#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) +#define I40E_NVM_VERSION_HI_SHIFT 12 +#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) +#define I40E_OEM_VER_BUILD_MASK 0xffff +#define I40E_OEM_VER_PATCH_MASK 0xff +#define I40E_OEM_VER_BUILD_SHIFT 8 +#define I40E_OEM_VER_SHIFT 24 #define I40E_PHY_DEBUG_ALL \ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW) /* The values in here are decimal coded as hex as is the case in the NVM map*/ -#define I40E_CURRENT_NVM_VERSION_HI 0x2 -#define I40E_CURRENT_NVM_VERSION_LO 0x40 +#define I40E_CURRENT_NVM_VERSION_HI 0x2 +#define I40E_CURRENT_NVM_VERSION_LO 0x40 -/* magic for getting defines into strings */ -#define STRINGIFY(foo) #foo -#define XSTRINGIFY(bar) STRINGIFY(bar) - -#define I40E_RX_DESC(R, i) \ +#define I40E_RX_DESC(R, i) \ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) -#define I40E_TX_DESC(R, i) \ +#define I40E_TX_DESC(R, i) \ (&(((struct i40e_tx_desc *)((R)->desc))[i])) -#define I40E_TX_CTXTDESC(R, i) \ +#define I40E_TX_CTXTDESC(R, i) \ (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define I40E_TX_FDIRDESC(R, i) \ +#define I40E_TX_FDIRDESC(R, i) \ (&(((struct i40e_filter_program_desc *)((R)->desc))[i])) /* default to trying for four seconds */ -#define I40E_TRY_LINK_TIMEOUT (4 * HZ) +#define I40E_TRY_LINK_TIMEOUT (4 * HZ) /** * i40e_is_mac_710 - Return true if MAC is X710/XL710 @@ -199,9 +195,9 @@ struct i40e_lump_tracking { #define I40E_FDIR_BUFFER_HEAD_ROOM 32 #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) -#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) -#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) -#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4) +#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -387,8 +383,8 @@ struct i40e_pf { struct mutex switch_mutex; u16 lan_vsi; /* our default LAN VSI */ u16 lan_veb; /* initial relay, if exists */ -#define I40E_NO_VEB 0xffff -#define I40E_NO_VSI 0xffff +#define I40E_NO_VEB 0xffff +#define I40E_NO_VSI 0xffff u16 next_vsi; /* Next unallocated VSI - 0-based! */ struct i40e_vsi **vsi; struct i40e_veb *veb[I40E_MAX_VEB]; @@ -423,8 +419,8 @@ struct i40e_pf { */ u16 dcbx_cap; - u32 fcoe_hmc_filt_num; - u32 fcoe_hmc_cntx_num; + u32 fcoe_hmc_filt_num; + u32 fcoe_hmc_cntx_num; struct i40e_filter_control_settings filter_settings; struct ptp_clock *ptp_clock; @@ -470,10 +466,10 @@ struct i40e_mac_filter { struct i40e_veb { struct i40e_pf *pf; u16 idx; - u16 veb_idx; /* index of VEB parent */ + u16 veb_idx; /* index of VEB parent */ u16 seid; u16 uplink_seid; - u16 stats_idx; /* index of VEB parent */ + u16 stats_idx; /* index of VEB parent */ u8 enabled_tc; u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ u16 flags; @@ -534,12 +530,13 @@ struct i40e_vsi { u32 promisc_threshold; u16 work_limit; - u16 int_rate_limit; /* value in usecs */ + u16 int_rate_limit; /* value in usecs */ + + u16 rss_table_size; /* HW RSS table size */ + u16 rss_size; /* Allocated RSS queues */ + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ - u16 rss_table_size; /* HW RSS table size */ - u16 rss_size; /* Allocated RSS queues */ - u8 *rss_hkey_user; /* User configured hash keys */ - u8 *rss_lut_user; /* User configured lookup table entries */ u16 max_frame; u16 rx_buf_len; @@ -550,14 +547,14 @@ struct i40e_vsi { int base_vector; bool irqs_ready; - u16 seid; /* HW index of this VSI (absolute index) */ - u16 id; /* VSI number */ + u16 seid; /* HW index of this VSI (absolute index) */ + u16 id; /* VSI number */ u16 uplink_seid; - u16 base_queue; /* vsi's first queue in hw array */ - u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ - u16 req_queue_pairs; /* User requested queue pairs */ - u16 num_queue_pairs; /* Used tx and rx pairs */ + u16 base_queue; /* vsi's first queue in hw array */ + u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ + u16 req_queue_pairs; /* User requested queue pairs */ + u16 num_queue_pairs; /* Used tx and rx pairs */ u16 num_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ s16 vf_id; /* Virtual function ID for SRIOV VSIs */ @@ -576,19 +573,16 @@ struct i40e_vsi { /* TC BW limit max quanta within VSI */ u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS]; - struct i40e_pf *back; /* Backreference to associated PF */ - u16 idx; /* index in pf->vsi[] */ - u16 veb_idx; /* index of VEB parent */ - struct kobject *kobj; /* sysfs object */ - bool current_isup; /* Sync 'link up' logging */ + struct i40e_pf *back; /* Backreference to associated PF */ + u16 idx; /* index in pf->vsi[] */ + u16 veb_idx; /* index of VEB parent */ + struct kobject *kobj; /* sysfs object */ + bool current_isup; /* Sync 'link up' logging */ void *priv; /* client driver data reference. */ /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); - - /* current rxnfc data */ - struct ethtool_rxnfc rxnfc; /* current rss hash opts */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 11cf1a5ebccf..67e396b2b347 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -204,6 +204,9 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_suspend_port_tx = 0x041B, i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, @@ -450,13 +453,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 +#define I40E_AQ_ARP_INIT_IPV4 0x0800 +#define I40E_AQ_ARP_UNSUP_CTL 0x1000 +#define I40E_AQ_ARP_ENA 0x2000 +#define I40E_AQ_ARP_ADD_IPV4 0x4000 +#define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; - __le32 pfpm_proxyfc; + __le32 enabled_offloads; +#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 +#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; @@ -471,17 +476,19 @@ struct i40e_aqc_ns_proxy_data { __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 +#define I40E_AQ_NS_PROXY_ADD_0 0x0001 +#define I40E_AQ_NS_PROXY_DEL_0 0x0002 +#define I40E_AQ_NS_PROXY_ADD_1 0x0004 +#define I40E_AQ_NS_PROXY_DEL_1 0x0008 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 +#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 +#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; @@ -1582,6 +1589,24 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 618f18436618..250db0b244b7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -148,6 +148,11 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) "Cannot locate client instance virtual channel receive routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n"); + continue; + } cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client, vf_id, msg, len); @@ -181,6 +186,11 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) "Cannot locate client instance l2_param_change routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); + continue; + } cdev->lan_info.params = params; cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client, @@ -306,6 +316,11 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id) "Cannot locate client instance VF reset routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n"); + continue; + } cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id); } @@ -336,6 +351,11 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs) "Cannot locate client instance VF enable routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n"); + continue; + } cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs); } @@ -370,6 +390,11 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, "Cannot locate client instance VF capability routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n"); + continue; + } capable = cdev->client->ops->vf_capable(&cdev->lan_info, cdev->client, vf_id); @@ -559,6 +584,7 @@ void i40e_client_subtask(struct i40e_pf *pf) pf->hw.bus.device, pf->hw.bus.func); } + mutex_lock(&i40e_client_instance_mutex); /* Send an Open request to the client */ atomic_inc(&cdev->ref_cnt); if (client->ops && client->ops->open) @@ -568,10 +594,12 @@ void i40e_client_subtask(struct i40e_pf *pf) set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); } else { /* remove client instance */ + mutex_unlock(&i40e_client_instance_mutex); i40e_client_del_instance(pf, client); atomic_dec(&client->ref_cnt); continue; } + mutex_unlock(&i40e_client_instance_mutex); } mutex_unlock(&i40e_client_mutex); } @@ -654,7 +682,7 @@ int i40e_lan_del_device(struct i40e_pf *pf) static int i40e_client_release(struct i40e_client *client) { struct i40e_client_instance *cdev, *tmp; - struct i40e_pf *pf = NULL; + struct i40e_pf *pf; int ret = 0; LIST_HEAD(cdevs_tmp); @@ -664,12 +692,12 @@ static int i40e_client_release(struct i40e_client *client) if (strncmp(cdev->client->name, client->name, I40E_CLIENT_STR_LENGTH)) continue; + pf = (struct i40e_pf *)cdev->lan_info.pf; if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { if (atomic_read(&cdev->ref_cnt) > 0) { ret = I40E_ERR_NOT_READY; goto out; } - pf = (struct i40e_pf *)cdev->lan_info.pf; if (client->ops && client->ops->close) client->ops->close(&cdev->lan_info, client, false); @@ -681,8 +709,7 @@ static int i40e_client_release(struct i40e_client *client) client->name, pf->hw.pf_id); } /* delete the client instance from the list */ - list_del(&cdev->list); - list_add(&cdev->list, &cdevs_tmp); + list_move(&cdev->list, &cdevs_tmp); atomic_dec(&client->ref_cnt); dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n", client->name); @@ -811,7 +838,8 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev, wr32(hw, I40E_PFINT_AEQCTL, reg); } } - + /* Mitigate sync problems with iwarp VF driver */ + i40e_flush(hw); return 0; err: kfree(ldev->qvlist_info); @@ -1009,7 +1037,6 @@ int i40e_unregister_client(struct i40e_client *client) if (!i40e_client_is_registered(client)) { pr_info("i40e: Client %s has not been registered\n", client->name); - mutex_unlock(&i40e_client_mutex); ret = -ENODEV; goto out; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index a4601d97fb24..38a6c36a6a0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -36,9 +36,9 @@ #define I40E_CLIENT_VERSION_MINOR 01 #define I40E_CLIENT_VERSION_BUILD 00 #define I40E_CLIENT_VERSION_STR \ - XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \ - XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \ - XSTRINGIFY(I40E_CLIENT_VERSION_BUILD) + __stringify(I40E_CLIENT_VERSION_MAJOR) "." \ + __stringify(I40E_CLIENT_VERSION_MINOR) "." \ + __stringify(I40E_CLIENT_VERSION_BUILD) struct i40e_client_version { u8 major; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c912e041d102..1835186b62c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1560,13 +1560,13 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } #endif for (i = 0; i < vsi->num_queue_pairs; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); + snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); + snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i); p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); + snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i); p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); + snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i); p += ETH_GSTRING_LEN; } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) @@ -1581,16 +1581,16 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_tx_packets", i); + "veb.tc_%d_tx_packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_tx_bytes", i); + "veb.tc_%d_tx_bytes", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_rx_packets", i); + "veb.tc_%d_rx_packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_rx_bytes", i); + "veb.tc_%d_rx_bytes", i); p += ETH_GSTRING_LEN; } } @@ -1601,23 +1601,23 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { snprintf(p, ETH_GSTRING_LEN, - "port.tx_priority_%u_xon", i); + "port.tx_priority_%d_xon", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "port.tx_priority_%u_xoff", i); + "port.tx_priority_%d_xoff", i); p += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%u_xon", i); + "port.rx_priority_%d_xon", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%u_xoff", i); + "port.rx_priority_%d_xoff", i); p += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%u_xon_2_xoff", i); + "port.rx_priority_%d_xon_2_xoff", i); p += ETH_GSTRING_LEN; } /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ @@ -2141,41 +2141,72 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue, **/ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) { + struct i40e_hw *hw = &pf->hw; + u8 flow_pctype = 0; + u64 i_set = 0; + cmd->data = 0; - if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { - cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; - cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; - return 0; - } - /* Report default options for RSS on i40e */ switch (cmd->flow_type) { case TCP_V4_FLOW: + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + break; case UDP_V4_FLOW: - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through to add IP fields */ + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + break; + case TCP_V6_FLOW: + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + break; + case UDP_V6_FLOW: + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case IPV4_FLOW: - cmd->data |= RXH_IP_SRC | RXH_IP_DST; - break; - case TCP_V6_FLOW: - case UDP_V6_FLOW: - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through to add IP fields */ case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; default: return -EINVAL; } + /* Read flow based hash input set register */ + if (flow_pctype) { + i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, + flow_pctype)) | + ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, + flow_pctype)) << 32); + } + + /* Process bits of hash input set */ + if (i_set) { + if (i_set & I40E_L4_SRC_MASK) + cmd->data |= RXH_L4_B_0_1; + if (i_set & I40E_L4_DST_MASK) + cmd->data |= RXH_L4_B_2_3; + + if (cmd->flow_type == TCP_V4_FLOW || + cmd->flow_type == UDP_V4_FLOW) { + if (i_set & I40E_L3_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_L3_DST_MASK) + cmd->data |= RXH_IP_DST; + } else if (cmd->flow_type == TCP_V6_FLOW || + cmd->flow_type == UDP_V6_FLOW) { + if (i_set & I40E_L3_V6_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_L3_V6_DST_MASK) + cmd->data |= RXH_IP_DST; + } + } + return 0; } @@ -2318,6 +2349,51 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, } /** + * i40e_get_rss_hash_bits - Read RSS Hash bits from register + * @nfc: pointer to user request + * @i_setc bits currently set + * + * Returns value of bits to be set per user request + **/ +static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) +{ + u64 i_set = i_setc; + u64 src_l3 = 0, dst_l3 = 0; + + if (nfc->data & RXH_L4_B_0_1) + i_set |= I40E_L4_SRC_MASK; + else + i_set &= ~I40E_L4_SRC_MASK; + if (nfc->data & RXH_L4_B_2_3) + i_set |= I40E_L4_DST_MASK; + else + i_set &= ~I40E_L4_DST_MASK; + + if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) { + src_l3 = I40E_L3_V6_SRC_MASK; + dst_l3 = I40E_L3_V6_DST_MASK; + } else if (nfc->flow_type == TCP_V4_FLOW || + nfc->flow_type == UDP_V4_FLOW) { + src_l3 = I40E_L3_SRC_MASK; + dst_l3 = I40E_L3_DST_MASK; + } else { + /* Any other flow type are not supported here */ + return i_set; + } + + if (nfc->data & RXH_IP_SRC) + i_set |= src_l3; + else + i_set &= ~src_l3; + if (nfc->data & RXH_IP_DST) + i_set |= dst_l3; + else + i_set &= ~dst_l3; + + return i_set; +} + +/** * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash * @pf: pointer to the physical function struct * @cmd: ethtool rxnfc command @@ -2329,6 +2405,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) struct i40e_hw *hw = &pf->hw; u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); + u8 flow_pctype = 0; + u64 i_set, i_setc; /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports @@ -2337,75 +2415,39 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL; - /* We need at least the IP SRC and DEST fields for hashing */ - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST)) - return -EINVAL; - switch (nfc->flow_type) { case TCP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - return -EINVAL; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - default: - return -EINVAL; - } + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); break; case TCP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - return -EINVAL; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); - - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - default: - return -EINVAL; - } + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); break; case UDP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - return -EINVAL; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - default: - return -EINVAL; - } + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + + hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case UDP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - return -EINVAL; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: - return -EINVAL; - } + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + + hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -2437,13 +2479,23 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return -EINVAL; } + if (flow_pctype) { + i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, + flow_pctype)) | + ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, + flow_pctype)) << 32); + i_set = i40e_get_rss_hash_bits(nfc, i_setc); + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype), + (u32)i_set); + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype), + (u32)(i_set >> 32)); + hena |= BIT_ULL(flow_pctype); + } + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); - /* Save setting for future output/update */ - pf->vsi[pf->lan_vsi]->rxnfc = *nfc; - return 0; } @@ -2744,11 +2796,15 @@ static void i40e_get_channels(struct net_device *dev, static int i40e_set_channels(struct net_device *dev, struct ethtool_channels *ch) { + const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int count = ch->combined_count; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + struct i40e_fdir_filter *rule; + struct hlist_node *node2; int new_count; + int err = 0; /* We do not support setting channels for any other VSI at present */ if (vsi->type != I40E_VSI_MAIN) @@ -2766,6 +2822,26 @@ static int i40e_set_channels(struct net_device *dev, if (count > i40e_max_channels(vsi)) return -EINVAL; + /* verify that the number of channels does not invalidate any current + * flow director rules + */ + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (rule->dest_ctl != drop && count <= rule->q_index) { + dev_warn(&pf->pdev->dev, + "Existing user defined filter %d assigns flow to queue %d\n", + rule->fd_id, rule->q_index); + err = -EINVAL; + } + } + + if (err) { + dev_err(&pf->pdev->dev, + "Existing filter rules must be deleted to reduce combined channel count to %d\n", + count); + return err; + } + /* update feature limits from largest to smallest supported values */ /* TODO: Flow director limit, DCB etc */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 828ed28c3c14..fcdea29be4ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -41,7 +41,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_BUILD 12 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -527,6 +527,7 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) pf->veb[i]->stat_offsets_loaded = false; } } + pf->hw_csum_rx_error = 0; } /** @@ -4616,7 +4617,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - u8 i, enabled_tc; + u8 i, enabled_tc = 1; u8 num_tc = 0; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; @@ -4634,8 +4635,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) else return 1; /* Only TC0 */ - /* At least have TC0 */ - enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) num_tc++; @@ -7985,72 +7984,34 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) { - struct i40e_aqc_get_set_rss_key_data rss_key; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - bool pf_lut = false; - u8 *rss_lut; - int ret, i; - - memcpy(&rss_key, seed, sizeof(rss_key)); - - rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); - if (!rss_lut) - return -ENOMEM; - - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0; i < vsi->rss_table_size; i++) - rss_lut[i] = i % vsi->rss_size; + int ret = 0; - ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot set RSS key, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - goto config_rss_aq_out; + if (seed) { + struct i40e_aqc_get_set_rss_key_data *seed_dw = + (struct i40e_aqc_get_set_rss_key_data *)seed; + ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } } + if (lut) { + bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; - if (vsi->type == I40E_VSI_MAIN) - pf_lut = true; - - ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, - vsi->rss_table_size); - if (ret) - dev_info(&pf->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - -config_rss_aq_out: - kfree(rss_lut); - return ret; -} - -/** - * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used - * @vsi: VSI structure - **/ -static int i40e_vsi_config_rss(struct i40e_vsi *vsi) -{ - u8 seed[I40E_HKEY_ARRAY_SIZE]; - struct i40e_pf *pf = vsi->back; - u8 *lut; - int ret; - - if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) - return 0; - - lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); - if (!lut) - return -ENOMEM; - - i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); - vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); - ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); - kfree(lut); - + ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } return ret; } @@ -8101,6 +8062,46 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, } /** + * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used + * @vsi: VSI structure + **/ +static int i40e_vsi_config_rss(struct i40e_vsi *vsi) +{ + u8 seed[I40E_HKEY_ARRAY_SIZE]; + struct i40e_pf *pf = vsi->back; + u8 *lut; + int ret; + + if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) + return 0; + + if (!vsi->rss_size) + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + if (!vsi->rss_size) + return -EINVAL; + + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + /* Use the user configured hash keys and lookup table if there is one, + * otherwise use default + */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); + else + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + + return ret; +} + +/** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure * @seed: RSS hash seed @@ -8691,6 +8692,28 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) } /** + * i40e_clear_rss_lut - clear the rx hash lookup table + * @vsi: the VSI being configured + **/ +static void i40e_clear_rss_lut(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 vf_id = vsi->vf_id; + u8 i; + + if (vsi->type == I40E_VSI_MAIN) { + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HLUT(i), 0); + } else if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0); + } else { + dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); + } +} + +/** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting @@ -8703,6 +8726,12 @@ static int i40e_set_features(struct net_device *netdev, struct i40e_pf *pf = vsi->back; bool need_reset; + if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) + i40e_pf_config_rss(pf); + else if (!(features & NETIF_F_RXHASH) && + netdev->features & NETIF_F_RXHASH) + i40e_clear_rss_lut(vsi); + if (features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else @@ -11575,7 +11604,8 @@ static int __init i40e_init_module(void) * it can't be any worse than using the system workqueue which * was already single threaded */ - i40e_wq = create_singlethread_workqueue(i40e_driver_name); + i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index df7ecc9578c9..f8d66236fcbf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2840,10 +2840,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_QW1_CMD_SHIFT); /* notify HW of packet */ - if (!tail_bump) + if (!tail_bump) { prefetchw(tx_desc + 1); - - if (tail_bump) { + } else { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -2852,7 +2851,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, wmb(); writel(i, tx_ring->tail); } - return; dma_error: diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6fcbf764f32b..da3423561b3a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -991,7 +991,10 @@ complete_reset: i40e_enable_vf_mappings(vf); set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); - i40e_notify_client_of_vf_reset(pf, abs_vf_id); + /* Do not notify the client during VF init */ + if (vf->pf->num_alloc_vfs) + i40e_notify_client_of_vf_reset(pf, abs_vf_id); + vf->num_vlan = 0; } /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); @@ -1089,7 +1092,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) goto err_iov; } } - i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); /* allocate memory */ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); if (!vfs) { @@ -1113,6 +1115,8 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) } pf->num_alloc_vfs = num_alloc_vfs; + i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); + err_alloc: if (ret) i40e_free_vfs(pf); @@ -2314,6 +2318,7 @@ err: /* send the response back to the VF */ aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, aq_ret, (u8 *)vrh, len); + kfree(vrh); return aq_ret; } @@ -2995,6 +3000,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, else ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; ivi->spoofchk = vf->spoofchk; + ivi->trusted = vf->trusted; ret = 0; error_param: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 3114dcfa1724..40b0eafd0c71 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -204,6 +204,9 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_suspend_port_tx = 0x041B, i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, @@ -447,13 +450,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 +#define I40E_AQ_ARP_INIT_IPV4 0x0800 +#define I40E_AQ_ARP_UNSUP_CTL 0x1000 +#define I40E_AQ_ARP_ENA 0x2000 +#define I40E_AQ_ARP_ADD_IPV4 0x4000 +#define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; - __le32 pfpm_proxyfc; + __le32 enabled_offloads; +#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 +#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; @@ -468,17 +473,19 @@ struct i40e_aqc_ns_proxy_data { __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 +#define I40E_AQ_NS_PROXY_ADD_0 0x0001 +#define I40E_AQ_NS_PROXY_DEL_0 0x0002 +#define I40E_AQ_NS_PROXY_ADD_1 0x0004 +#define I40E_AQ_NS_PROXY_DEL_1 0x0008 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 +#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 +#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; @@ -1579,6 +1586,24 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index a579193b2c21..0130458264e5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -2068,10 +2068,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_QW1_CMD_SHIFT); /* notify HW of packet */ - if (!tail_bump) + if (!tail_bump) { prefetchw(tx_desc + 1); - - if (tail_bump) { + } else { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -2080,7 +2079,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, wmb(); writel(i, tx_ring->tail); } - return; dma_error: diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 76ed97db28e2..dc00aaf94687 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -71,20 +71,20 @@ struct i40e_vsi { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define I40EVF_DEFAULT_TXD 512 -#define I40EVF_DEFAULT_RXD 512 -#define I40EVF_MAX_TXD 4096 -#define I40EVF_MIN_TXD 64 -#define I40EVF_MAX_RXD 4096 -#define I40EVF_MIN_RXD 64 -#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 +#define I40EVF_DEFAULT_TXD 512 +#define I40EVF_DEFAULT_RXD 512 +#define I40EVF_MAX_TXD 4096 +#define I40EVF_MIN_TXD 64 +#define I40EVF_MAX_RXD 4096 +#define I40EVF_MIN_RXD 64 +#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 /* Supported Rx Buffer Sizes */ -#define I40EVF_RXBUFFER_2048 2048 -#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ -#define I40EVF_MAX_AQ_BUF_SIZE 4096 -#define I40EVF_AQ_LEN 32 -#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ +#define I40EVF_RXBUFFER_2048 2048 +#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ +#define I40EVF_MAX_AQ_BUF_SIZE 4096 +#define I40EVF_AQ_LEN 32 +#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -111,7 +111,7 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ #define ITR_COUNTDOWN_START 100 u8 itr_countdown; /* when 0 or 1 update ITR */ - int v_idx; /* vector index in list */ + int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; bool arm_wb_state; cpumask_var_t affinity_mask; @@ -129,11 +129,11 @@ struct i40e_q_vector { ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) -#define I40EVF_RX_DESC_ADV(R, i) \ +#define I40EVF_RX_DESC_ADV(R, i) \ (&(((union i40e_adv_rx_desc *)((R).desc))[i])) -#define I40EVF_TX_DESC_ADV(R, i) \ +#define I40EVF_TX_DESC_ADV(R, i) \ (&(((union i40e_adv_tx_desc *)((R).desc))[i])) -#define I40EVF_TX_CTXTDESC_ADV(R, i) \ +#define I40EVF_TX_CTXTDESC_ADV(R, i) \ (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i])) #define OTHER_VECTOR 1 @@ -204,22 +204,25 @@ struct i40evf_adapter { struct msix_entry *msix_entries; u32 flags; -#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_IMIR_ENABLED BIT(5) -#define I40EVF_FLAG_MQ_CAPABLE BIT(6) -#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) -#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) -#define I40EVF_FLAG_RESET_PENDING BIT(9) -#define I40EVF_FLAG_RESET_NEEDED BIT(10) +#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define I40EVF_FLAG_IN_NETPOLL BIT(4) +#define I40EVF_FLAG_IMIR_ENABLED BIT(5) +#define I40EVF_FLAG_MQ_CAPABLE BIT(6) +#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) +#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) +#define I40EVF_FLAG_RESET_PENDING BIT(9) +#define I40EVF_FLAG_RESET_NEEDED BIT(10) #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) +#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14) #define I40EVF_FLAG_PROMISC_ON BIT(15) #define I40EVF_FLAG_ALLMULTI_ON BIT(16) /* duplicates for common code */ -#define I40E_FLAG_FDIR_ATR_ENABLED 0 -#define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED +#define I40E_FLAG_FDIR_ATR_ENABLED 0 +#define I40E_FLAG_DCB_ENABLED 0 +#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL +#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE /* flags for admin queue service task */ @@ -233,7 +236,7 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) #define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) #define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ #define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) /* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ #define I40EVF_FLAG_AQ_GET_HENA BIT(11) @@ -258,6 +261,7 @@ struct i40evf_adapter { struct work_struct watchdog_task; bool netdev_registered; bool link_up; + enum i40e_aq_link_speed link_speed; enum i40e_virtchnl_ops current_op; #define CLIENT_ENABLED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_offload_flags & \ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index c9c202f6c521..e17a15456266 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -74,13 +74,33 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { static int i40evf_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { - /* In the future the VF will be able to query the PF for - * some information - for now use a dummy value - */ + struct i40evf_adapter *adapter = netdev_priv(netdev); + ecmd->supported = 0; ecmd->autoneg = AUTONEG_DISABLE; ecmd->transceiver = XCVR_DUMMY1; ecmd->port = PORT_NONE; + /* Set speed and duplex */ + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + ethtool_cmd_speed_set(ecmd, SPEED_40000); + break; + case I40E_LINK_SPEED_20GB: + ethtool_cmd_speed_set(ecmd, SPEED_20000); + break; + case I40E_LINK_SPEED_10GB: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case I40E_LINK_SPEED_1GB: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 600fb9c4a7f0..f751f7bc0d81 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_BUILD 12 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -1420,7 +1420,9 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) { int err; + rtnl_lock(); err = i40evf_set_interrupt_capability(adapter); + rtnl_unlock(); if (err) { dev_err(&adapter->pdev->dev, "Unable to setup interrupt capabilities\n"); @@ -1802,6 +1804,8 @@ continue_reset: } adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + /* Open RDMA Client again */ + adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); i40evf_misc_irq_enable(adapter); @@ -2831,7 +2835,8 @@ static int __init i40evf_init_module(void) pr_info("%s\n", i40evf_copyright); - i40evf_wq = create_singlethread_workqueue(i40evf_driver_name); + i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + i40evf_driver_name); if (!i40evf_wq) { pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index d76c221d4c8a..cc6cb30c1667 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -817,6 +817,45 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) } /** + * i40evf_print_link_message - print link up or down + * @adapter: adapter structure + * + * Log a message telling the world of our wonderous link status + */ +static void i40evf_print_link_message(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + char *speed = "Unknown "; + + if (!adapter->link_up) { + netdev_info(netdev, "NIC Link is Down\n"); + return; + } + + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + speed = "40 G"; + break; + case I40E_LINK_SPEED_20GB: + speed = "20 G"; + break; + case I40E_LINK_SPEED_10GB: + speed = "10 G"; + break; + case I40E_LINK_SPEED_1GB: + speed = "1000 M"; + break; + case I40E_LINK_SPEED_100MB: + speed = "100 M"; + break; + default: + break; + } + + netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); +} + +/** * i40evf_request_reset * @adapter: adapter structure * @@ -853,15 +892,13 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, (struct i40e_virtchnl_pf_event *)msg; switch (vpe->event) { case I40E_VIRTCHNL_EVENT_LINK_CHANGE: - adapter->link_up = - vpe->event_data.link_event.link_status; - if (adapter->link_up && !netif_carrier_ok(netdev)) { - dev_info(&adapter->pdev->dev, "NIC Link is Up\n"); - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - } else if (!adapter->link_up) { - dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); - netif_carrier_off(netdev); + adapter->link_speed = + vpe->event_data.link_event.link_speed; + if (adapter->link_up != + vpe->event_data.link_event.link_status) { + adapter->link_up = + vpe->event_data.link_event.link_status; + i40evf_print_link_message(adapter); netif_tx_stop_all_queues(netdev); } break; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 199ff98209cf..acf06051e111 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -188,6 +188,11 @@ struct e1000_adv_tx_context_desc { /* ETQF register bit definitions */ #define E1000_ETQF_FILTER_ENABLE BIT(26) #define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF /* FTQF register bit definitions */ #define E1000_FTQF_VF_BP 0x00008000 diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 2997c443c5dc..2688180a7acd 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1024,4 +1024,8 @@ #define E1000_RTTBCNRC_RF_INT_MASK \ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) +#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) +#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define E1000_VLAPQF_QUEUE_MASK 0x03 + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 21d9d02885cb..d84afdd83e53 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -309,6 +309,7 @@ (0x054E0 + ((_i - 16) * 8))) #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ (0x054E4 + ((_i - 16) * 8))) +#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 5387b3a96489..03fbe4b7663b 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -350,11 +350,49 @@ struct hwmon_buff { }; #endif +/* The number of L2 ether-type filter registers, Index 3 is reserved + * for PTP 1588 timestamp + */ +#define MAX_ETYPE_FILTER (4 - 1) +/* ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters here!! + * + * Current filters: Filter 3 + */ +#define IGB_ETQF_FILTER_1588 3 + #define IGB_N_EXTTS 2 #define IGB_N_PEROUT 2 #define IGB_N_SDP 4 #define IGB_RETA_SIZE 128 +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 0x1, + IGB_FILTER_FLAG_VLAN_TCI = 0x2, +}; + +#define IGB_MAX_RXNFC_FILTERS 16 + +/* RX network flow classification data structure */ +struct igb_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + /* board specific private data structure */ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -473,6 +511,13 @@ struct igb_adapter { int copper_tries; struct e1000_info ei; u16 eee_advert; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + unsigned int nfc_filter_count; + /* lock for RX network flow classification filter */ + spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; }; /* flags controlling PTP/1588 function */ @@ -599,4 +644,9 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); } +int igb_add_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); +int igb_erase_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 64e91c575a39..0c33eca7c832 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2431,6 +2431,63 @@ static int igb_get_ts_info(struct net_device *dev, } } +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igb_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + return 0; + } + return -EINVAL; +} + +static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + static int igb_get_rss_hash_opts(struct igb_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -2484,6 +2541,16 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, cmd->data = adapter->num_rx_queues; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igb_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; case ETHTOOL_GRXFH: ret = igb_get_rss_hash_opts(adapter, cmd); break; @@ -2598,6 +2665,279 @@ static int igb_set_rss_hash_opt(struct igb_adapter *adapter, return 0; } +static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(E1000_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= E1000_ETQF_FILTER_ENABLE; + etqf &= ~E1000_ETQF_ETYPE_MASK; + etqf |= (etype & E1000_ETQF_ETYPE_MASK); + + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT) + & E1000_ETQF_QUEUE_MASK); + etqf |= E1000_ETQF_QUEUE_ENABLE; + + wr32(E1000_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(E1000_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) && + (queue_index != input->action)) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(E1000_VLAPQF, vlapqf); + + return 0; +} + +int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + int err = -EINVAL; + + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + err = igb_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + err = igb_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igb_clear_etype_filter_regs(struct igb_adapter *adapter, + u16 reg_index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 etqf = rd32(E1000_ETQF(reg_index)); + + etqf &= ~E1000_ETQF_QUEUE_ENABLE; + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf &= ~E1000_ETQF_FILTER_ENABLE; + + wr32(E1000_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter, + u16 vlan_tci) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(E1000_VLAPQF); + vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority, + E1000_VLAPQF_QUEUE_MASK); + + wr32(E1000_VLAPQF, vlapqf); +} + +int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) + igb_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + igb_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + return 0; +} + +static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, + struct igb_nfc_filter *input, + u16 sw_idx) +{ + struct igb_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input) + err = igb_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&parent->nfc_node, &input->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igb_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || + (fsp->ring_cookie >= adapter->num_rx_queues)) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGB_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igb_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct igb_adapter *adapter = netdev_priv(dev); @@ -2607,6 +2947,11 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXFH: ret = igb_set_rss_hash_opt(adapter, cmd); break; + case ETHTOOL_SRXCLSRLINS: + ret = igb_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igb_del_ethtool_nfc_entry(adapter, cmd); default: break; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 942a89fb0090..af75eac5fa16 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -176,6 +176,8 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); static void igb_check_vf_rate_limit(struct igb_adapter *); +static void igb_nfc_filter_exit(struct igb_adapter *adapter); +static void igb_nfc_filter_restore(struct igb_adapter *adapter); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); @@ -1611,6 +1613,7 @@ static void igb_configure(struct igb_adapter *adapter) igb_setup_mrqc(adapter); igb_setup_rctl(adapter); + igb_nfc_filter_restore(adapter); igb_configure_tx(adapter); igb_configure_rx(adapter); @@ -2059,6 +2062,21 @@ static int igb_set_features(struct net_device *netdev, if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) return 0; + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igb_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + netdev->features = features; if (netif_running(netdev)) @@ -3053,6 +3071,7 @@ static int igb_sw_init(struct igb_adapter *adapter) VLAN_HLEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + spin_lock_init(&adapter->nfc_lock); spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { @@ -3240,6 +3259,8 @@ static int __igb_close(struct net_device *netdev, bool suspending) igb_down(adapter); igb_free_irq(adapter); + igb_nfc_filter_exit(adapter); + igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); @@ -8306,4 +8327,28 @@ int igb_reinit_queues(struct igb_adapter *adapter) return err; } + +static void igb_nfc_filter_exit(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igb_nfc_filter_restore(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} /* igb_main.c */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 336c103ae374..66dfa2085cc7 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -998,12 +998,12 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, /* define ethertype filter for timestamped packets */ if (is_l2) - wr32(E1000_ETQF(3), + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), (E1000_ETQF_FILTER_ENABLE | /* enable filter */ E1000_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else - wr32(E1000_ETQF(3), 0); + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0); /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 9475ff9055aa..33c025055011 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -645,6 +645,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) #define IXGBE_FLAG_DCB_CAPABLE BIT(27) +#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE BIT(0) @@ -653,13 +654,12 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) -#define IXGBE_FLAG2_RESET_REQUESTED BIT(6) #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) -#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) +#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) /* Tx fast path data */ @@ -673,6 +673,7 @@ struct ixgbe_adapter { /* Port number used to identify VXLAN traffic */ __be16 vxlan_port; + __be16 geneve_port; /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; @@ -840,6 +841,7 @@ enum ixgbe_state_t { __IXGBE_IN_SFP_INIT, __IXGBE_PTP_RUNNING, __IXGBE_PTP_TX_IN_PROGRESS, + __IXGBE_RESET_REQUESTED, }; struct ixgbe_cb { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index c47b605e8651..77d3039283f6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -99,6 +99,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550T: case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: supported = true; break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 0d7209eb5abf..9547191e26c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -193,7 +193,9 @@ static int ixgbe_get_settings(struct net_device *netdev, if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ecmd->supported |= ixgbe_get_supported_10gtypes(hw); if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? SUPPORTED_1000baseKX_Full : diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b4f03748adc0..d76bc1a313ea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -137,6 +137,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, /* required last entry */ {0, } @@ -1103,7 +1104,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); e_warn(drv, "initiating reset due to tx timeout\n"); ixgbe_service_event_schedule(adapter); } @@ -1495,7 +1496,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, struct sk_buff *skb) { __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; bool encap_pkt = false; skb_checksum_none_assert(skb); @@ -1504,8 +1504,8 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; - if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) && - (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) { + /* check for VXLAN and Geneve packets */ + if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { encap_pkt = true; skb->encapsulation = 1; } @@ -2777,7 +2777,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) } if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } @@ -3007,7 +3007,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) case ixgbe_mac_x550em_a: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } @@ -3224,7 +3224,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) - e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); + hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3922,6 +3922,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) rfctl &= ~IXGBE_RFCTL_RSC_DIS; if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) rfctl |= IXGBE_RFCTL_RSC_DIS; + + /* disable NFS filtering */ + rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* Program registers for the distribution of queues */ @@ -4586,18 +4589,23 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) } } -static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) +static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) { - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); + struct ixgbe_hw *hw = &adapter->hw; + u32 vxlanctrl; + + if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | + IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) + return; + + vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); + + if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) adapter->vxlan_port = 0; - break; - default: - break; - } + + if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) + adapter->geneve_port = 0; } #ifdef CONFIG_IXGBE_DCB @@ -5500,8 +5508,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); - adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | - IXGBE_FLAG2_RESET_REQUESTED); + clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; del_timer_sync(&adapter->service_timer); @@ -5711,8 +5719,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; - case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; + /* fall through */ + case ixgbe_mac_X550EM_x: #ifdef CONFIG_IXGBE_DCB adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; #endif @@ -6144,7 +6154,7 @@ int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); - ixgbe_clear_vxlan_port(adapter); + ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); udp_tunnel_get_rx_info(netdev); return 0; @@ -6921,7 +6931,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) * (Do the reset outside of interrupt context). */ e_warn(drv, "initiating reset to clear Tx work after link loss\n"); - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); } } } @@ -7187,11 +7197,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) { - if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) return; - adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; - /* If we're already down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || @@ -7225,9 +7233,9 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } - if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); - adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; + adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; udp_tunnel_get_rx_info(adapter->netdev); rtnl_unlock(); } @@ -7667,6 +7675,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring, if (adapter->vxlan_port && udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); + + if (adapter->geneve_port && + udp_hdr(skb)->dest == adapter->geneve_port) + hdr.network = skb_inner_network_header(skb); } /* Currently only IPv4/IPv6 with TCP is supported */ @@ -8802,10 +8814,23 @@ static int ixgbe_set_features(struct net_device *netdev, netdev->features = features; if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) - adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; - else - ixgbe_clear_vxlan_port(adapter); + if (features & NETIF_F_RXCSUM) { + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + } else { + u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + } + } + + if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { + if (features & NETIF_F_RXCSUM) { + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + } else { + u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + } } if (need_reset) @@ -8818,67 +8843,115 @@ static int ixgbe_set_features(struct net_device *netdev, } /** - * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up + * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports * @dev: The port's netdev * @ti: Tunnel endpoint information **/ -static void ixgbe_add_vxlan_port(struct net_device *dev, - struct udp_tunnel_info *ti) +static void ixgbe_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; __be16 port = ti->port; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; + u32 port_shift = 0; + u32 reg; if (ti->sa_family != AF_INET) return; - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; - if (adapter->vxlan_port == port) - return; + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + + if (adapter->geneve_port == port) + return; + + if (adapter->geneve_port) { + netdev_info(dev, + "GENEVE port %d set, not adding port %d\n", + ntohs(adapter->geneve_port), + ntohs(port)); + return; + } - if (adapter->vxlan_port) { - netdev_info(dev, - "Hit Max num of VXLAN ports, not adding port %d\n", - ntohs(port)); + port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; + adapter->geneve_port = port; + break; + default: return; } - adapter->vxlan_port = port; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port)); + reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); } /** - * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away + * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports * @dev: The port's netdev * @ti: Tunnel endpoint information **/ -static void ixgbe_del_vxlan_port(struct net_device *dev, - struct udp_tunnel_info *ti) +static void ixgbe_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); + u32 port_mask; - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN && + ti->type != UDP_TUNNEL_TYPE_GENEVE) return; if (ti->sa_family != AF_INET) return; - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; - if (adapter->vxlan_port != ti->port) { - netdev_info(dev, "Port %d was not found, not deleting\n", - ntohs(ti->port)); + if (adapter->vxlan_port != ti->port) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + + if (adapter->geneve_port != ti->port) { + netdev_info(dev, "GENEVE port %d not found\n", + ntohs(ti->port)); + return; + } + + port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + break; + default: return; } - ixgbe_clear_vxlan_port(adapter); - adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; } static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], @@ -9192,8 +9265,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - .ndo_udp_tunnel_add = ixgbe_add_vxlan_port, - .ndo_udp_tunnel_del = ixgbe_del_vxlan_port, + .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, + .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 1248a9936f7a..31d82e3abac8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -90,6 +90,7 @@ #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 #define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE /* VF Device IDs */ @@ -487,6 +488,13 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host * Filter Table */ +/* masks for accessing VXLAN and GENEVE UDP ports */ +#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ +#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ + +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 + #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 #define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 @@ -1823,6 +1831,9 @@ enum { #define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) #define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) #define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK BIT(8) +#define IXGBE_X557_MAX_LED_INDEX 3 +#define IXGBE_X557_LED_PROVISIONING 0xC430 /* LED modes */ #define IXGBE_LED_LINK_UP 0x0 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 4716ca499e67..fb1b819d8311 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -295,6 +295,12 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_KR_L: hw->phy.type = ixgbe_phy_x550em_kr; break; + case IXGBE_DEV_ID_X550EM_A_10G_T: + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); @@ -2114,6 +2120,50 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) return ixgbe_enable_lasi_ext_t_x550em(hw); } +/** + * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn on + **/ +s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + return 0; +} + +/** + * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn off + **/ +s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + return 0; +} + /** ixgbe_get_lcd_x550em - Determine lowest common denominator * @hw: pointer to hardware structure * @lcd_speed: pointer to lowest common link speed @@ -2456,6 +2506,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: media_type = ixgbe_media_type_copper; break; default: @@ -2514,6 +2565,9 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_SFP: /* Config MDIO clock speed before the first MDIO PHY access */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); @@ -2853,8 +2907,6 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .write_analog_reg8 = NULL, \ .set_rxpba = &ixgbe_set_rxpba_generic, \ .check_link = &ixgbe_check_mac_link_generic, \ - .led_on = &ixgbe_led_on_generic, \ - .led_off = &ixgbe_led_off_generic, \ .blink_led_start = &ixgbe_blink_led_start_X540, \ .blink_led_stop = &ixgbe_blink_led_stop_X540, \ .set_rar = &ixgbe_set_rar_generic, \ @@ -2886,6 +2938,8 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, static const struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, .reset_hw = &ixgbe_reset_hw_X540, .get_media_type = &ixgbe_get_media_type_X540, .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, @@ -2904,6 +2958,8 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { static const struct ixgbe_mac_operations mac_ops_X550EM_x = { X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, .reset_hw = &ixgbe_reset_hw_X550em, .get_media_type = &ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, @@ -2922,6 +2978,8 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { static struct ixgbe_mac_operations mac_ops_x550em_a = { X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, .reset_hw = ixgbe_reset_hw_X550em, .get_media_type = ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index be52f597688b..5639fbe294d0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -502,12 +502,9 @@ extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); -#ifdef DEBUG -char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); -#define hw_dbg(hw, format, arg...) \ - printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) -#else -#define hw_dbg(hw, format, arg...) do {} while (0) -#endif +#define ixgbevf_hw_to_netdev(hw) \ + (((struct ixgbevf_adapter *)(hw)->back)->netdev) +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg) #endif /* _IXGBEVF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d9d6616f02a4..4044608083cd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1612,7 +1612,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) - pr_err("Could not enable Tx Queue %d\n", reg_idx); + hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); } /** @@ -2993,6 +2993,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) **/ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) { + struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index a52f70ec42b6..d46ba1dabcb7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -284,7 +284,8 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) if (addr) ether_addr_copy(msg_addr, addr); - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); if (!ret_val) { msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -441,7 +442,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; ether_addr_copy(msg_addr, addr); - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -551,7 +553,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; msgbuf[1] = xcast_mode; - err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); if (err) return err; @@ -588,7 +591,8 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; - err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); if (err) goto mbx_err; @@ -791,7 +795,8 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) msgbuf[0] = IXGBE_VF_SET_LPE; msgbuf[1] = max_size; - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); if (ret_val) return ret_val; if ((msgbuf[0] & IXGBE_VF_SET_LPE) && @@ -837,7 +842,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) msg[1] = api; msg[2] = 0; - err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, + sizeof(msg) / sizeof(u32)); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -887,7 +893,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, msg[0] = IXGBE_VF_GET_QUEUE; msg[1] = msg[2] = msg[3] = msg[4] = 0; - err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, + sizeof(msg) / sizeof(u32)); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d41c28d00b57..8e4252dd9a9d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -399,7 +399,6 @@ struct mvneta_port { u16 rx_ring_size; struct mii_bus *mii_bus; - struct phy_device *phy_dev; phy_interface_t phy_interface; struct device_node *phy_node; unsigned int link; @@ -2651,6 +2650,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget) u32 cause_rx_tx; int rx_queue; struct mvneta_port *pp = netdev_priv(napi->dev); + struct net_device *ndev = pp->dev; struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); if (!netif_running(pp->dev)) { @@ -2668,7 +2668,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget) (MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_PSC_SYNC_CHANGE))) { - mvneta_fixed_link_update(pp, pp->phy_dev); + mvneta_fixed_link_update(pp, ndev->phydev); } } @@ -2963,6 +2963,7 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; + struct net_device *ndev = pp->dev; mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -2985,15 +2986,16 @@ static void mvneta_start_dev(struct mvneta_port *pp) MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_PSC_SYNC_CHANGE); - phy_start(pp->phy_dev); + phy_start(ndev->phydev); netif_tx_start_all_queues(pp->dev); } static void mvneta_stop_dev(struct mvneta_port *pp) { unsigned int cpu; + struct net_device *ndev = pp->dev; - phy_stop(pp->phy_dev); + phy_stop(ndev->phydev); for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); @@ -3166,7 +3168,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) static void mvneta_adjust_link(struct net_device *ndev) { struct mvneta_port *pp = netdev_priv(ndev); - struct phy_device *phydev = pp->phy_dev; + struct phy_device *phydev = ndev->phydev; int status_change = 0; if (phydev->link) { @@ -3244,7 +3246,6 @@ static int mvneta_mdio_probe(struct mvneta_port *pp) phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->advertising = phy_dev->supported; - pp->phy_dev = phy_dev; pp->link = 0; pp->duplex = 0; pp->speed = 0; @@ -3254,8 +3255,9 @@ static int mvneta_mdio_probe(struct mvneta_port *pp) static void mvneta_mdio_remove(struct mvneta_port *pp) { - phy_disconnect(pp->phy_dev); - pp->phy_dev = NULL; + struct net_device *ndev = pp->dev; + + phy_disconnect(ndev->phydev); } /* Electing a CPU must be done in an atomic way: it should be done @@ -3495,42 +3497,30 @@ static int mvneta_stop(struct net_device *dev) static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct mvneta_port *pp = netdev_priv(dev); - - if (!pp->phy_dev) + if (!dev->phydev) return -ENOTSUPP; - return phy_mii_ioctl(pp->phy_dev, ifr, cmd); + return phy_mii_ioctl(dev->phydev, ifr, cmd); } /* Ethtool methods */ -/* Get settings (phy address, speed) for ethtools */ -int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +/* Set link ksettings (phy address, speed) for ethtools */ +int mvneta_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { - struct mvneta_port *pp = netdev_priv(dev); - - if (!pp->phy_dev) - return -ENODEV; - - return phy_ethtool_gset(pp->phy_dev, cmd); -} - -/* Set settings (phy address, speed) for ethtools */ -int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct mvneta_port *pp = netdev_priv(dev); - struct phy_device *phydev = pp->phy_dev; + struct mvneta_port *pp = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; if (!phydev) return -ENODEV; - if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { + if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { u32 val; - mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE); + mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE); - if (cmd->autoneg == AUTONEG_DISABLE) { + if (cmd->base.autoneg == AUTONEG_DISABLE) { val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | @@ -3547,17 +3537,17 @@ int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } - pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE); + pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE); netdev_info(pp->dev, "autoneg status set to %i\n", pp->use_inband_status); - if (netif_running(dev)) { + if (netif_running(ndev)) { mvneta_port_down(pp); mvneta_port_up(pp); } } - return phy_ethtool_sset(pp->phy_dev, cmd); + return phy_ethtool_ksettings_set(ndev->phydev, cmd); } /* Set interrupt coalescing for ethtools */ @@ -3821,8 +3811,6 @@ static const struct net_device_ops mvneta_netdev_ops = { const struct ethtool_ops mvneta_eth_tool_ops = { .get_link = ethtool_op_get_link, - .get_settings = mvneta_ethtool_get_settings, - .set_settings = mvneta_ethtool_set_settings, .set_coalesce = mvneta_ethtool_set_coalesce, .get_coalesce = mvneta_ethtool_get_coalesce, .get_drvinfo = mvneta_ethtool_get_drvinfo, @@ -3835,6 +3823,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .get_rxnfc = mvneta_ethtool_get_rxnfc, .get_rxfh = mvneta_ethtool_get_rxfh, .set_rxfh = mvneta_ethtool_set_rxfh, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = mvneta_ethtool_set_link_ksettings, }; /* Initialize hw */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index f1609542adf1..0fd9fc8d2a79 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -342,25 +342,27 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) mdiobus_free(eth->mii_bus); } -static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) +static inline void mtk_irq_disable(struct mtk_eth *eth, + unsigned reg, u32 mask) { unsigned long flags; u32 val; spin_lock_irqsave(ð->irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, reg); + mtk_w32(eth, val & ~mask, reg); spin_unlock_irqrestore(ð->irq_lock, flags); } -static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) +static inline void mtk_irq_enable(struct mtk_eth *eth, + unsigned reg, u32 mask) { unsigned long flags; u32 val; spin_lock_irqsave(ð->irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, reg); + mtk_w32(eth, val | mask, reg); spin_unlock_irqrestore(ð->irq_lock, flags); } @@ -369,18 +371,17 @@ static int mtk_set_mac_address(struct net_device *dev, void *p) int ret = eth_mac_addr(dev, p); struct mtk_mac *mac = netdev_priv(dev); const char *macaddr = dev->dev_addr; - unsigned long flags; if (ret) return ret; - spin_lock_irqsave(&mac->hw->page_lock, flags); + spin_lock_bh(&mac->hw->page_lock); mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA_MAC_ADRH(mac->id)); mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | (macaddr[4] << 8) | macaddr[5], MTK_GDMA_MAC_ADRL(mac->id)); - spin_unlock_irqrestore(&mac->hw->page_lock, flags); + spin_unlock_bh(&mac->hw->page_lock); return 0; } @@ -764,7 +765,6 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) struct mtk_eth *eth = mac->hw; struct mtk_tx_ring *ring = ð->tx_ring; struct net_device_stats *stats = &dev->stats; - unsigned long flags; bool gso = false; int tx_num; @@ -772,14 +772,14 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) * however we have 2 queues running on the same ring so we need to lock * the ring access */ - spin_lock_irqsave(ð->page_lock, flags); + spin_lock(ð->page_lock); tx_num = mtk_cal_txd_req(skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { mtk_stop_queue(eth); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock); return NETDEV_TX_BUSY; } @@ -804,12 +804,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) mtk_stop_queue(eth); - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock); return NETDEV_TX_OK; drop: - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock); stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -899,12 +899,12 @@ release_desc: * we continue */ wmb(); - mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0); + mtk_w32(eth, ring->calc_idx, MTK_PRX_CRX_IDX0); done++; } if (done < budget) - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); return done; } @@ -1014,7 +1014,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) return budget; napi_complete(napi); - mtk_irq_enable(eth, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); return tx_done; } @@ -1026,12 +1026,12 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) int rx_done = 0; mtk_handle_status_irq(eth); - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); rx_done = mtk_poll_rx(napi, budget, eth); if (unlikely(netif_msg_intr(eth))) { - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + status = mtk_r32(eth, MTK_PDMA_INT_STATUS); + mask = mtk_r32(eth, MTK_PDMA_INT_MASK); dev_info(eth->dev, "done rx %d, intr 0x%08x/0x%x\n", rx_done, status, mask); @@ -1040,12 +1040,12 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) if (rx_done == budget) return budget; - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + status = mtk_r32(eth, MTK_PDMA_INT_STATUS); if (status & MTK_RX_DONE_INT) return budget; napi_complete(napi); - mtk_irq_enable(eth, MTK_RX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); return rx_done; } @@ -1094,6 +1094,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) mtk_w32(eth, ring->phys + ((MTK_DMA_SIZE - 1) * sz), MTK_QTX_DRX_PTR); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); return 0; @@ -1164,11 +1165,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth) */ wmb(); - mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0); - mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0); - mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0); - mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); + mtk_w32(eth, eth->rx_ring.phys, MTK_PRX_BASE_PTR0); + mtk_w32(eth, MTK_DMA_SIZE, MTK_PRX_MAX_CNT0); + mtk_w32(eth, eth->rx_ring.calc_idx, MTK_PRX_CRX_IDX0); + mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_PDMA_RST_IDX); return 0; } @@ -1287,7 +1287,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) if (likely(napi_schedule_prep(ð->rx_napi))) { __napi_schedule(ð->rx_napi); - mtk_irq_disable(eth, MTK_RX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); } return IRQ_HANDLED; @@ -1299,7 +1299,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) if (likely(napi_schedule_prep(ð->tx_napi))) { __napi_schedule(ð->tx_napi); - mtk_irq_disable(eth, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); } return IRQ_HANDLED; @@ -1310,11 +1310,12 @@ static void mtk_poll_controller(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT; - mtk_irq_disable(eth, int_mask); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); mtk_handle_irq_rx(eth->irq[2], dev); - mtk_irq_enable(eth, int_mask); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); } #endif @@ -1329,11 +1330,15 @@ static int mtk_start_dma(struct mtk_eth *eth) } mtk_w32(eth, - MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | - MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | - MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO, + MTK_TX_WB_DDONE | MTK_TX_DMA_EN | + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO, MTK_QDMA_GLO_CFG); + mtk_w32(eth, + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS | MTK_MULTI_EN, + MTK_PDMA_GLO_CFG); + return 0; } @@ -1351,7 +1356,8 @@ static int mtk_open(struct net_device *dev) napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); - mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); } atomic_inc(ð->dma_refcnt); @@ -1363,16 +1369,15 @@ static int mtk_open(struct net_device *dev) static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) { - unsigned long flags; u32 val; int i; /* stop the dma engine */ - spin_lock_irqsave(ð->page_lock, flags); + spin_lock_bh(ð->page_lock); val = mtk_r32(eth, glo_cfg); mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), glo_cfg); - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock_bh(ð->page_lock); /* wait for dma stop */ for (i = 0; i < 10; i++) { @@ -1397,7 +1402,8 @@ static int mtk_stop(struct net_device *dev) if (!atomic_dec_and_test(ð->dma_refcnt)) return 0; - mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); @@ -1451,7 +1457,9 @@ static int __init mtk_hw_init(struct mtk_eth *eth) /* disable delay and normal interrupt */ mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); - mtk_irq_disable(eth, ~0); + mtk_w32(eth, 0, MTK_PDMA_DELAY_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); mtk_w32(eth, 0, MTK_RST_GL); @@ -1465,9 +1473,8 @@ static int __init mtk_hw_init(struct mtk_eth *eth) for (i = 0; i < 2; i++) { u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); - /* setup the forward port to send frame to QDMA */ + /* setup the forward port to send frame to PDMA */ val &= ~0xffff; - val |= 0x5555; /* Enable RX checksum */ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; @@ -1507,7 +1514,8 @@ static void mtk_uninit(struct net_device *dev) phy_disconnect(mac->phy_dev); mtk_mdio_cleanup(eth); - mtk_irq_disable(eth, ~0); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); free_irq(eth->irq[1], dev); free_irq(eth->irq[2], dev); } @@ -1686,7 +1694,7 @@ static void mtk_get_ethtool_stats(struct net_device *dev, } do { - data_src = (u64*)hwstats; + data_src = (u64 *)hwstats; data_dst = data; start = u64_stats_fetch_begin_irq(&hwstats->syncp); @@ -1912,7 +1920,6 @@ static int mtk_remove(struct platform_device *pdev) netif_napi_del(ð->tx_napi); netif_napi_del(ð->rx_napi); mtk_cleanup(eth); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index f82e3acb947b..7c1f3f2e11d4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -68,6 +68,32 @@ /* Unicast Filter MAC Address Register - High */ #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) +/* PDMA RX Base Pointer Register */ +#define MTK_PRX_BASE_PTR0 0x900 + +/* PDMA RX Maximum Count Register */ +#define MTK_PRX_MAX_CNT0 0x904 + +/* PDMA RX CPU Pointer Register */ +#define MTK_PRX_CRX_IDX0 0x908 + +/* PDMA Global Configuration Register */ +#define MTK_PDMA_GLO_CFG 0xa04 +#define MTK_MULTI_EN BIT(10) + +/* PDMA Reset Index Register */ +#define MTK_PDMA_RST_IDX 0xa08 +#define MTK_PST_DRX_IDX0 BIT(16) + +/* PDMA Delay Interrupt Register */ +#define MTK_PDMA_DELAY_INT 0xa0c + +/* PDMA Interrupt Status Register */ +#define MTK_PDMA_INT_STATUS 0xa20 + +/* PDMA Interrupt Mask Register */ +#define MTK_PDMA_INT_MASK 0xa28 + /* PDMA Interrupt grouping registers */ #define MTK_PDMA_INT_GRP1 0xa50 #define MTK_PDMA_INT_GRP2 0xa54 @@ -119,13 +145,16 @@ /* QDMA Interrupt Status Register */ #define MTK_QMTK_INT_STATUS 0x1A18 +#define MTK_RX_DONE_INT3 BIT(19) +#define MTK_RX_DONE_INT2 BIT(18) #define MTK_RX_DONE_INT1 BIT(17) #define MTK_RX_DONE_INT0 BIT(16) #define MTK_TX_DONE_INT3 BIT(3) #define MTK_TX_DONE_INT2 BIT(2) #define MTK_TX_DONE_INT1 BIT(1) #define MTK_TX_DONE_INT0 BIT(0) -#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1) +#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1 | \ + MTK_RX_DONE_INT2 | MTK_RX_DONE_INT3) #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 05cc1effc13c..dad326ccd4dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ - fs_counters.o rl.o + fs_counters.o rl.o lag.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c2ec01a22d55..1e639f886021 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -294,11 +294,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DEALLOC_Q_COUNTER: case MLX5_CMD_OP_DEALLOC_PD: case MLX5_CMD_OP_DEALLOC_UAR: - case MLX5_CMD_OP_DETTACH_FROM_MCG: + case MLX5_CMD_OP_DETACH_FROM_MCG: case MLX5_CMD_OP_DEALLOC_XRCD: case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: + case MLX5_CMD_OP_DESTROY_LAG: + case MLX5_CMD_OP_DESTROY_VPORT_LAG: case MLX5_CMD_OP_DESTROY_TIR: case MLX5_CMD_OP_DESTROY_SQ: case MLX5_CMD_OP_DESTROY_RQ: @@ -315,6 +317,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: + case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -389,6 +392,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: + case MLX5_CMD_OP_CREATE_LAG: + case MLX5_CMD_OP_MODIFY_LAG: + case MLX5_CMD_OP_QUERY_LAG: + case MLX5_CMD_OP_CREATE_VPORT_LAG: case MLX5_CMD_OP_CREATE_TIR: case MLX5_CMD_OP_MODIFY_TIR: case MLX5_CMD_OP_QUERY_TIR: @@ -416,6 +423,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: + case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -504,7 +512,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); MLX5_COMMAND_STR_CASE(ACCESS_REG); MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); - MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG); + MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); MLX5_COMMAND_STR_CASE(MAD_IFC); MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); @@ -526,6 +534,12 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); MLX5_COMMAND_STR_CASE(SET_WOL_ROL); MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); + MLX5_COMMAND_STR_CASE(CREATE_LAG); + MLX5_COMMAND_STR_CASE(MODIFY_LAG); + MLX5_COMMAND_STR_CASE(QUERY_LAG); + MLX5_COMMAND_STR_CASE(DESTROY_LAG); + MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG); + MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG); MLX5_COMMAND_STR_CASE(CREATE_TIR); MLX5_COMMAND_STR_CASE(MODIFY_TIR); MLX5_COMMAND_STR_CASE(DESTROY_TIR); @@ -564,15 +578,130 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); + MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); + MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); default: return "unknown command opcode"; } } +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: + return "OK"; + case MLX5_CMD_STAT_INT_ERR: + return "internal error"; + case MLX5_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case MLX5_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case MLX5_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case MLX5_CMD_STAT_RES_BUSY: + return "resource busy"; + case MLX5_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case MLX5_CMD_STAT_IX_ERR: + return "bad index"; + case MLX5_CMD_STAT_NO_RES_ERR: + return "no resources"; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case MLX5_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +static int cmd_status_to_err(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: return 0; + case MLX5_CMD_STAT_INT_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; + case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_IX_ERR: return -EINVAL; + case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} + +struct mlx5_ifc_mbox_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_mbox_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome) +{ + *status = MLX5_GET(mbox_out, out, status); + *syndrome = MLX5_GET(mbox_out, out, syndrome); +} + +static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) +{ + u32 syndrome; + u8 status; + u16 opcode; + u16 op_mod; + + mlx5_cmd_mbox_status(out, &status, &syndrome); + if (!status) + return 0; + + opcode = MLX5_GET(mbox_in, in, opcode); + op_mod = MLX5_GET(mbox_in, in, op_mod); + + mlx5_core_err(dev, + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", + mlx5_command_str(opcode), + opcode, op_mod, + cmd_status_str(status), + status, + syndrome); + + return cmd_status_to_err(status); +} + static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) { - u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; + u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); struct mlx5_cmd_mailbox *next = msg->next; int data_only; u32 offset = 0; @@ -622,9 +751,7 @@ static void dump_command(struct mlx5_core_dev *dev, static u16 msg_to_opcode(struct mlx5_cmd_msg *in) { - struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); - - return be16_to_cpu(hdr->opcode); + return MLX5_GET(mbox_in, in->first.data, opcode); } static void cb_timeout_handler(struct work_struct *work) @@ -762,16 +889,6 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) return err; } -static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out) -{ - return &out->syndrome; -} - -static u8 *get_status_ptr(struct mlx5_outbox_hdr *out) -{ - return &out->status; -} - /* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion @@ -820,7 +937,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, goto out_free; ds = ent->ts2 - ent->ts1; - op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); + op = MLX5_GET(mbox_in, in->first.data, opcode); if (op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[op]; spin_lock_irq(&stats->lock); @@ -1035,7 +1152,6 @@ static ssize_t data_write(struct file *filp, const char __user *buf, struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; void *ptr; - int err; if (*pos != 0) return -EINVAL; @@ -1043,25 +1159,15 @@ static ssize_t data_write(struct file *filp, const char __user *buf, kfree(dbg->in_msg); dbg->in_msg = NULL; dbg->inlen = 0; - - ptr = kzalloc(count, GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - if (copy_from_user(ptr, buf, count)) { - err = -EFAULT; - goto out; - } + ptr = memdup_user(buf, count); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); dbg->in_msg = ptr; dbg->inlen = count; *pos = count; return count; - -out: - kfree(ptr); - return err; } static ssize_t data_read(struct file *filp, char __user *buf, size_t count, @@ -1321,11 +1427,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) callback = ent->callback; context = ent->context; err = ent->ret; - if (!err) + if (!err) { err = mlx5_copy_from_msg(ent->uout, ent->out, ent->uout_size); + err = err ? err : mlx5_cmd_check(dev, + ent->in->first.data, + ent->uout); + } + mlx5_free_cmd_msg(dev, ent->out); free_msg(dev, ent->in); @@ -1377,14 +1488,9 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, return msg; } -static u16 opcode_from_in(struct mlx5_inbox_hdr *in) -{ - return be16_to_cpu(in->opcode); -} - -static int is_manage_pages(struct mlx5_inbox_hdr *in) +static int is_manage_pages(void *in) { - return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; + return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; } static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, @@ -1401,9 +1507,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, if (pci_channel_offline(dev->pdev) || dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status); - *get_synd_ptr(out) = cpu_to_be32(drv_synd); - *get_status_ptr(out) = status; + u16 opcode = MLX5_GET(mbox_in, in, opcode); + + err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); + MLX5_SET(mbox_out, out, status, status); + MLX5_SET(mbox_out, out, syndrome, drv_synd); return err; } @@ -1457,7 +1565,10 @@ out_in: int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { - return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); + int err; + + err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); + return err ? : mlx5_cmd_check(dev, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec); @@ -1694,96 +1805,3 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) pci_pool_destroy(cmd->pool); } EXPORT_SYMBOL(mlx5_cmd_cleanup); - -static const char *cmd_status_str(u8 status) -{ - switch (status) { - case MLX5_CMD_STAT_OK: - return "OK"; - case MLX5_CMD_STAT_INT_ERR: - return "internal error"; - case MLX5_CMD_STAT_BAD_OP_ERR: - return "bad operation"; - case MLX5_CMD_STAT_BAD_PARAM_ERR: - return "bad parameter"; - case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: - return "bad system state"; - case MLX5_CMD_STAT_BAD_RES_ERR: - return "bad resource"; - case MLX5_CMD_STAT_RES_BUSY: - return "resource busy"; - case MLX5_CMD_STAT_LIM_ERR: - return "limits exceeded"; - case MLX5_CMD_STAT_BAD_RES_STATE_ERR: - return "bad resource state"; - case MLX5_CMD_STAT_IX_ERR: - return "bad index"; - case MLX5_CMD_STAT_NO_RES_ERR: - return "no resources"; - case MLX5_CMD_STAT_BAD_INP_LEN_ERR: - return "bad input length"; - case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: - return "bad output length"; - case MLX5_CMD_STAT_BAD_QP_STATE_ERR: - return "bad QP state"; - case MLX5_CMD_STAT_BAD_PKT_ERR: - return "bad packet (discarded)"; - case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: - return "bad size too many outstanding CQEs"; - default: - return "unknown status"; - } -} - -static int cmd_status_to_err(u8 status) -{ - switch (status) { - case MLX5_CMD_STAT_OK: return 0; - case MLX5_CMD_STAT_INT_ERR: return -EIO; - case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; - case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; - case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; - case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; - case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; - case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; - case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; - case MLX5_CMD_STAT_IX_ERR: return -EINVAL; - case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; - case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; - case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; - case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; - case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; - case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; - default: return -EIO; - } -} - -/* this will be available till all the commands use set/get macros */ -int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) -{ - if (!hdr->status) - return 0; - - pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", - cmd_status_str(hdr->status), hdr->status, - be32_to_cpu(hdr->syndrome)); - - return cmd_status_to_err(hdr->status); -} - -int mlx5_cmd_status_to_err_v2(void *ptr) -{ - u32 syndrome; - u8 status; - - status = be32_to_cpu(*(__be32 *)ptr) >> 24; - if (!status) - return 0; - - syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); - - pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", - cmd_status_str(status), status, syndrome); - - return cmd_status_to_err(status); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 873a631ad155..32d4af9b594d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -134,33 +134,29 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) complete(&cq->free); } - int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_create_cq_mbox_in *in, int inlen) + u32 *in, int inlen) { - int err; struct mlx5_cq_table *table = &dev->priv.cq_table; - struct mlx5_create_cq_mbox_out out; - struct mlx5_destroy_cq_mbox_in din; - struct mlx5_destroy_cq_mbox_out dout; + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; + u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); struct mlx5_eq *eq; + int err; eq = mlx5_eqn2eq(dev, eqn); if (IS_ERR(eq)) return PTR_ERR(eq); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); - memset(&out, 0, sizeof(out)); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + memset(out, 0, sizeof(out)); + MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + cq->cqn = MLX5_GET(create_cq_out, out, cqn); cq->cons_index = 0; cq->arm_sn = 0; atomic_set(&cq->refcount, 1); @@ -186,10 +182,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, return 0; err_cmd: - memset(&din, 0, sizeof(din)); - memset(&dout, 0, sizeof(dout)); - din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); - mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + memset(din, 0, sizeof(din)); + memset(dout, 0, sizeof(dout)); + MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); + MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); + mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } EXPORT_SYMBOL(mlx5_core_create_cq); @@ -197,8 +194,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) { struct mlx5_cq_table *table = &dev->priv.cq_table; - struct mlx5_destroy_cq_mbox_in in; - struct mlx5_destroy_cq_mbox_out out; + u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; struct mlx5_core_cq *tmp; int err; @@ -214,17 +211,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) return -EINVAL; } - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); - in.cqn = cpu_to_be32(cq->cqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); + MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - synchronize_irq(cq->irqn); mlx5_debug_cq_remove(dev, cq); @@ -237,44 +229,23 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) EXPORT_SYMBOL(mlx5_core_destroy_cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_query_cq_mbox_out *out) + u32 *out, int outlen) { - struct mlx5_query_cq_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, sizeof(*out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); - in.cqn = cpu_to_be32(cq->cqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); - if (err) - return err; - - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); + u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0}; - return err; + MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ); + MLX5_SET(query_cq_in, in, cqn, cq->cqn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_cq); - int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_modify_cq_mbox_in *in, int in_sz) + u32 *in, int inlen) { - struct mlx5_modify_cq_mbox_out out; - int err; - - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); - err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); - if (err) - return err; + u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0}; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return 0; + MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_cq); @@ -283,18 +254,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, u16 cq_period, u16 cq_max_count) { - struct mlx5_modify_cq_mbox_in in; - - memset(&in, 0, sizeof(in)); - - in.cqn = cpu_to_be32(cq->cqn); - in.ctx.cq_period = cpu_to_be16(cq_period); - in.ctx.cq_max_count = cpu_to_be16(cq_max_count); - in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | - MLX5_CQ_MODIFY_COUNT); - - return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0}; + void *cqc; + + MLX5_SET(modify_cq_in, in, cqn, cq->cqn); + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, cq_period, cq_period); + MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); + MLX5_SET(modify_cq_in, in, + modify_field_select_resize_field_select.modify_field_select.modify_field_select, + MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); + + return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); } +EXPORT_SYMBOL(mlx5_core_modify_cq_moderation); int mlx5_init_cq_table(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 5210d92e6bc7..e94a9532e218 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int index, int *is_str) { - struct mlx5_query_qp_mbox_out *out; + int outlen = MLX5_ST_SZ_BYTES(query_qp_out); struct mlx5_qp_context *ctx; u64 param = 0; + u32 *out; int err; int no_sq; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); if (!out) return param; - err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); + err = mlx5_core_qp_query(dev, qp, out, outlen); if (err) { - mlx5_core_warn(dev, "failed to query qp\n"); + mlx5_core_warn(dev, "failed to query qp err=%d\n", err); goto out; } *is_str = 0; - ctx = &out->ctx; + + /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ + ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc); + switch (index) { case QP_PID: param = qp->pid; @@ -358,32 +362,32 @@ out: static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, int index) { - struct mlx5_query_eq_mbox_out *out; - struct mlx5_eq_context *ctx; + int outlen = MLX5_ST_SZ_BYTES(query_eq_out); u64 param = 0; + void *ctx; + u32 *out; int err; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); if (!out) return param; - ctx = &out->ctx; - - err = mlx5_core_eq_query(dev, eq, out, sizeof(*out)); + err = mlx5_core_eq_query(dev, eq, out, outlen); if (err) { mlx5_core_warn(dev, "failed to query eq\n"); goto out; } + ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry); switch (index) { case EQ_NUM_EQES: - param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + param = 1 << MLX5_GET(eqc, ctx, log_eq_size); break; case EQ_INTR: - param = ctx->intr; + param = MLX5_GET(eqc, ctx, intr); break; case EQ_LOG_PG_SZ: - param = (ctx->log_page_size & 0x1f) + 12; + param = MLX5_GET(eqc, ctx, log_page_size) + 12; break; } @@ -395,37 +399,37 @@ out: static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int index) { - struct mlx5_query_cq_mbox_out *out; - struct mlx5_cq_context *ctx; + int outlen = MLX5_ST_SZ_BYTES(query_cq_out); u64 param = 0; + void *ctx; + u32 *out; int err; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = mlx5_vzalloc(outlen); if (!out) return param; - ctx = &out->ctx; - - err = mlx5_core_query_cq(dev, cq, out); + err = mlx5_core_query_cq(dev, cq, out, outlen); if (err) { mlx5_core_warn(dev, "failed to query cq\n"); goto out; } + ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context); switch (index) { case CQ_PID: param = cq->pid; break; case CQ_NUM_CQES: - param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + param = 1 << MLX5_GET(cqc, ctx, log_cq_size); break; case CQ_LOG_PG_SZ: - param = (ctx->log_pg_sz & 0x1f) + 12; + param = MLX5_GET(cqc, ctx, log_page_size); break; } out: - kfree(out); + kvfree(out); return param; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bf722aa88cf0..96995609f205 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -654,40 +654,6 @@ struct mlx5e_priv { void *ppriv; }; -enum mlx5e_link_mode { - MLX5E_1000BASE_CX_SGMII = 0, - MLX5E_1000BASE_KX = 1, - MLX5E_10GBASE_CX4 = 2, - MLX5E_10GBASE_KX4 = 3, - MLX5E_10GBASE_KR = 4, - MLX5E_20GBASE_KR2 = 5, - MLX5E_40GBASE_CR4 = 6, - MLX5E_40GBASE_KR4 = 7, - MLX5E_56GBASE_R4 = 8, - MLX5E_10GBASE_CR = 12, - MLX5E_10GBASE_SR = 13, - MLX5E_10GBASE_ER = 14, - MLX5E_40GBASE_SR4 = 15, - MLX5E_40GBASE_LR4 = 16, - MLX5E_50GBASE_SR2 = 18, - MLX5E_100GBASE_CR4 = 20, - MLX5E_100GBASE_SR4 = 21, - MLX5E_100GBASE_KR4 = 22, - MLX5E_100GBASE_LR4 = 23, - MLX5E_100BASE_TX = 24, - MLX5E_1000BASE_T = 25, - MLX5E_10GBASE_T = 26, - MLX5E_25GBASE_CR = 27, - MLX5E_25GBASE_KR = 28, - MLX5E_25GBASE_SR = 29, - MLX5E_50GBASE_CR2 = 30, - MLX5E_50GBASE_KR2 = 31, - MLX5E_LINK_MODES_NUMBER, -}; - -#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) - - void mlx5e_build_ptys2ethtool_map(void); void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 9cce153e1035..029e856f72a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -60,24 +60,27 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey) { - struct mlx5_create_mkey_mbox_in *in; + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + void *mkc; + u32 *in; int err; - in = mlx5_vzalloc(sizeof(*in)); + in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; - in->seg.flags = MLX5_PERM_LOCAL_WRITE | - MLX5_PERM_LOCAL_READ | - MLX5_ACCESS_MODE_PA; - in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); - err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, - NULL); + MLX5_SET(mkc, mkc, pd, pdn); + MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, qpn, 0xffffff); - kvfree(in); + err = mlx5_core_create_mkey(mdev, mkey, in, inlen); + kvfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index d0cf8fa22659..d1cd1564e9b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -803,7 +803,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; u32 eth_proto_cap; u32 eth_proto_admin; u32 eth_proto_lp; @@ -813,7 +813,6 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, int err; err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); - if (err) { netdev_err(netdev, "%s: query port ptys failed: %d\n", __func__, err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2459c7f3db8d..03586ee68fc4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -174,18 +174,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) { int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u32 *out = (u32 *)priv->stats.vport.query_vport_out; - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; struct mlx5_core_dev *mdev = priv->mdev; - memset(in, 0, sizeof(in)); - MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); MLX5_SET(query_vport_counter_in, in, other_vport, 0); memset(out, 0, outlen); - mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } @@ -488,7 +485,8 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); - MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD); + MLX5_SET64(modify_rq_in, in, modify_bitmask, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); MLX5_SET(rqc, rqc, vsd, vsd); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); @@ -1999,14 +1997,15 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv) static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) { struct mlx5_core_dev *mdev = priv->mdev; - u32 in[MLX5_ST_SZ_DW(create_tis_in)]; + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - memset(in, 0, sizeof(in)); - MLX5_SET(tisc, tisc, prio, tc << 1); MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); + if (mlx5_lag_is_lacp_owner(mdev)) + MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); + return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); } @@ -3211,37 +3210,37 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_create_mkey_mbox_in *in; - struct mlx5_mkey_seg *mkc; - int inlen = sizeof(*in); u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev), BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)); + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + void *mkc; + u32 *in; int err; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; - mkc = &in->seg; - mkc->status = MLX5_MKEY_STATUS_FREE; - mkc->flags = MLX5_PERM_UMR_EN | - MLX5_PERM_LOCAL_READ | - MLX5_PERM_LOCAL_WRITE | - MLX5_ACCESS_MODE_MTT; + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages); - mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); - mkc->len = cpu_to_be64(npages << PAGE_SHIFT); - mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages)); - mkc->log2_page_size = PAGE_SHIFT; + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); - err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, - NULL, NULL); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); + MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT); + MLX5_SET(mkc, mkc, translations_octword_size, + MLX5_MTT_OCTW(npages)); + MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); - kvfree(in); + err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen); + kvfree(in); return err; } @@ -3360,6 +3359,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch_rep rep; + mlx5_lag_add(mdev, netdev); + if (mlx5e_vxlan_allowed(mdev)) { rtnl_lock(); udp_tunnel_get_rx_info(netdev); @@ -3383,6 +3384,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) { queue_work(priv->wq, &priv->set_rx_mode_work); mlx5e_disable_async_events(priv); + mlx5_lag_remove(priv->mdev); } static const struct mlx5e_profile mlx5e_nic_profile = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 134de4a11f1d..29db4735182a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -415,8 +415,8 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, { rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep); if (!rep->priv_data) { - pr_warn("Failed to create representor for vport %d\n", - rep->vport); + mlx5_core_warn(esw->dev, "Failed to create representor for vport %d\n", + rep->vport); return -EINVAL; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 0e30602ef76d..aaca09002ca6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -86,23 +86,12 @@ struct cre_des_eq { static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) { - struct mlx5_destroy_eq_mbox_in in; - struct mlx5_destroy_eq_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ); - in.eqn = eqn; - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (!err) - goto ex; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); + u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; -ex: - return err; + MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); + MLX5_SET(destroy_eq_in, in, eq_number, eqn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) @@ -351,11 +340,13 @@ static void init_eq_buf(struct mlx5_eq *eq) int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar) { + u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; - struct mlx5_create_eq_mbox_in *in; - struct mlx5_create_eq_mbox_out out; - int err; + __be64 *pas; + void *eqc; int inlen; + u32 *in; + int err; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; @@ -365,35 +356,36 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, init_eq_buf(eq); - inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; + inlen = MLX5_ST_SZ_BYTES(create_eq_in) + + MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; + in = mlx5_vzalloc(inlen); if (!in) { err = -ENOMEM; goto err_buf; } - memset(&out, 0, sizeof(out)); - mlx5_fill_page_array(&eq->buf, in->pas); + pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); + mlx5_fill_page_array(&eq->buf, pas); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); - in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); - in->ctx.intr = vecidx; - in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; - in->events_mask = cpu_to_be64(mask); + MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); + MLX5_SET64(create_eq_in, in, event_bitmask, mask); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - if (err) - goto err_in; + eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); + MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); + MLX5_SET(eqc, eqc, uar_page, uar->index); + MLX5_SET(eqc, eqc, intr, vecidx); + MLX5_SET(eqc, eqc, log_page_size, + eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); - if (out.hdr.status) { - err = mlx5_cmd_status_to_err(&out.hdr); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + if (err) goto err_in; - } snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); - eq->eqn = out.eq_number; + eq->eqn = MLX5_GET(create_eq_out, out, eq_number); eq->irqn = priv->msix_arr[vecidx].vector; eq->dev = dev; eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; @@ -547,22 +539,12 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) } int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - struct mlx5_query_eq_mbox_out *out, int outlen) + u32 *out, int outlen) { - struct mlx5_query_eq_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, outlen); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); - in.eqn = eq->eqn; - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); - if (err) - return err; + u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0}; - if (out->hdr.status) - err = mlx5_cmd_status_to_err(&out->hdr); - - return err; + MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); + MLX5_SET(query_eq_in, in, eq_number, eq->eqn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL_GPL(mlx5_core_eq_query); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 8b78f156214e..101430571d6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -87,13 +87,9 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) { - int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]; - int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0}; + int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; void *nic_vport_ctx; - int err; - - memset(out, 0, sizeof(out)); - memset(in, 0, sizeof(in)); MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); @@ -116,45 +112,31 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_promisc_change, 1); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (err) - goto ex; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto ex; - return 0; -ex: - return err; + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } /* E-Switch vport context HW commands */ static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0}; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); - MLX5_SET(query_esw_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(query_esw_vport_context_in, in, other_vport, 1); - - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, u16 *vlan, u8 *qos) { - u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)]; + u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0}; int err; bool cvlan_strip; bool cvlan_insert; - memset(out, 0, sizeof(out)); - *vlan = 0; *qos = 0; @@ -188,27 +170,20 @@ out: static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, void *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)]; - - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0}; + MLX5_SET(modify_esw_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); - - MLX5_SET(modify_esw_vport_context_in, in, opcode, - MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); - - return mlx5_cmd_exec_check_status(dev, in, inlen, - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, u16 vlan, u8 qos, bool set) { - u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0}; if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) @@ -216,7 +191,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n", vport, vlan, qos, set); - if (set) { MLX5_SET(modify_esw_vport_context_in, in, esw_vport_context.vport_cvlan_strip, 1); @@ -241,13 +215,10 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac, u8 vlan_valid, u16 vlan) { - u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)]; - u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)]; + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; u8 *in_mac_addr; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); MLX5_SET(set_l2_table_entry_in, in, table_index, index); @@ -257,23 +228,18 @@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); ether_addr_copy(&in_mac_addr[2], mac); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index) { - u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)]; - u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); MLX5_SET(delete_l2_table_entry_in, in, table_index, index); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix) @@ -340,7 +306,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, spec = mlx5_vzalloc(sizeof(*spec)); if (!spec) { - pr_warn("FDB: Failed to alloc match parameters\n"); + esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); return NULL; } dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@ -374,8 +340,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 0, &dest); if (IS_ERR(flow_rule)) { - pr_warn( - "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", + esw_warn(esw->dev, + "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); flow_rule = NULL; } @@ -1352,8 +1318,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 0, NULL); if (IS_ERR(vport->ingress.allow_rule)) { err = PTR_ERR(vport->ingress.allow_rule); - pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure ingress allow rule, err(%d)\n", + vport->vport, err); vport->ingress.allow_rule = NULL; goto out; } @@ -1365,8 +1332,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 0, NULL); if (IS_ERR(vport->ingress.drop_rule)) { err = PTR_ERR(vport->ingress.drop_rule); - pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure ingress drop rule, err(%d)\n", + vport->vport, err); vport->ingress.drop_rule = NULL; goto out; } @@ -1418,8 +1386,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, 0, NULL); if (IS_ERR(vport->egress.allowed_vlan)) { err = PTR_ERR(vport->egress.allowed_vlan); - pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure egress allowed vlan rule failed, err(%d)\n", + vport->vport, err); vport->egress.allowed_vlan = NULL; goto out; } @@ -1432,8 +1401,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, 0, NULL); if (IS_ERR(vport->egress.drop_rule)) { err = PTR_ERR(vport->egress.drop_rule); - pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure egress drop rule failed, err(%d)\n", + vport->vport, err); vport->egress.drop_rule = NULL; } out: @@ -1905,7 +1875,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, struct ifla_vf_stats *vf_stats) { int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; int err = 0; u32 *out; @@ -1918,8 +1888,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, if (!out) return -ENOMEM; - memset(in, 0, sizeof(in)); - MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 9134010e2921..7a0415e6d339 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -41,10 +41,8 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft) { - u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]; - u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; MLX5_SET(set_flow_table_root_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); @@ -55,30 +53,23 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, MLX5_SET(set_flow_table_root_in, in, other_vport, 1); } - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, u16 vport, + enum fs_flow_table_op_mod op_mod, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id) { - u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; - u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; int err; - memset(in, 0, sizeof(in)); - MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); - if (next_ft) { - MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); - MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); - } MLX5_SET(create_flow_table_in, in, table_type, type); MLX5_SET(create_flow_table_in, in, level, level); MLX5_SET(create_flow_table_in, in, log_size, log_size); @@ -87,10 +78,23 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, MLX5_SET(create_flow_table_in, in, other_vport, 1); } - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + switch (op_mod) { + case FS_FT_OP_MOD_NORMAL: + if (next_ft) { + MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); + } + break; + + case FS_FT_OP_MOD_LAG_DEMUX: + MLX5_SET(create_flow_table_in, in, op_mod, 0x1); + if (next_ft) + MLX5_SET(create_flow_table_in, in, lag_master_next_table_id, + next_ft->id); + break; + } + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) *table_id = MLX5_GET(create_flow_table_out, out, table_id); @@ -100,11 +104,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft) { - u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0}; MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); @@ -115,39 +116,49 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, MLX5_SET(destroy_flow_table_in, in, other_vport, 1); } - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { - u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); MLX5_SET(modify_flow_table_in, in, table_type, ft->type); MLX5_SET(modify_flow_table_in, in, table_id, ft->id); - if (ft->vport) { - MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); - MLX5_SET(modify_flow_table_in, in, other_vport, 1); - } - MLX5_SET(modify_flow_table_in, in, modify_field_select, - MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); - if (next_ft) { - MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); - MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id); + + if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) { + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, + lag_master_next_table_id, next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, + lag_master_next_table_id, 0); + } } else { - MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + if (ft->vport) { + MLX5_SET(modify_flow_table_in, in, vport_number, + ft->vport); + MLX5_SET(modify_flow_table_in, in, other_vport, 1); + } + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(modify_flow_table_in, in, table_miss_id, + next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + } } - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, @@ -155,12 +166,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, u32 *in, unsigned int *group_id) { + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; int err; - memset(out, 0, sizeof(out)); - MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); MLX5_SET(create_flow_group_in, in, table_type, ft->type); @@ -170,13 +179,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, MLX5_SET(create_flow_group_in, in, other_vport, 1); } - err = mlx5_cmd_exec_check_status(dev, in, - inlen, out, - sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *group_id = MLX5_GET(create_flow_group_out, out, group_id); - return err; } @@ -184,11 +190,8 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned int group_id) { - u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; - u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0}; MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); @@ -200,8 +203,7 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, MLX5_SET(destroy_flow_group_in, in, other_vport, 1); } - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, @@ -212,7 +214,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, { unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct); - u32 out[MLX5_ST_SZ_DW(set_fte_out)]; + u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; struct mlx5_flow_rule *dst; void *in_flow_context; void *in_match_value; @@ -290,11 +292,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, - sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); kvfree(in); - return err; } @@ -303,7 +302,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, unsigned group_id, struct fs_fte *fte) { - return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); + return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); } int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, @@ -327,12 +326,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned int index) { - u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; - u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; - int err; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0}; MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); MLX5_SET(delete_fte_in, in, table_type, ft->type); @@ -343,74 +338,55 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, MLX5_SET(delete_fte_in, in, other_vport, 1); } - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); - - return err; + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) { - u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]; - u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; int err; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(alloc_flow_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_FLOW_COUNTER); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); - if (err) - return err; - - *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); - - return 0; + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); + return err; } int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) { - u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0}; MLX5_SET(dealloc_flow_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, u64 *packets, u64 *bytes) { u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + - MLX5_ST_SZ_BYTES(traffic_counter)]; - u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; + MLX5_ST_SZ_BYTES(traffic_counter)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; void *stats; int err = 0; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, flow_counter_id, id); - - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics); *packets = MLX5_GET64(traffic_counter, stats, packets); *bytes = MLX5_GET64(traffic_counter, stats, octets); - return 0; } @@ -448,18 +424,14 @@ void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b) int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) { - u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - b->out, b->outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen); } void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, @@ -480,3 +452,51 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, *packets = MLX5_GET64(traffic_counter, stats, packets); *bytes = MLX5_GET64(traffic_counter, stats, octets); } + +#define MAX_ENCAP_SIZE (128) + +int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev, + int header_type, + size_t size, + void *encap_header, + u32 *encap_id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) + + (MAX_ENCAP_SIZE / sizeof(u32))]; + void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, + encap_header); + void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in, + encap_header); + int inlen = header - (void *)in + size; + int err; + + if (size > MAX_ENCAP_SIZE) + return -EINVAL; + + memset(in, 0, inlen); + MLX5_SET(alloc_encap_header_in, in, opcode, + MLX5_CMD_OP_ALLOC_ENCAP_HEADER); + MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size); + MLX5_SET(encap_header_in, encap_header_in, header_type, header_type); + memcpy(header, encap_header, size); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + + *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id); + return err; +} + +void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(dealloc_encap_header_in, in, opcode, + MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); + MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id); + + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 158844cef82b..c5bc4686c832 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -35,6 +35,7 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, u16 vport, + enum fs_flow_table_op_mod op_mod, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id); @@ -88,4 +89,11 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b, u16 id, u64 *packets, u64 *bytes); +int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev, + int header_type, + size_t size, + void *encap_header, + u32 *encap_id); +void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 3d6c1f65e586..5da2cc878582 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -96,6 +96,10 @@ #define OFFLOADS_NUM_PRIOS 1 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1) +#define LAG_PRIO_NUM_LEVELS 1 +#define LAG_NUM_PRIOS 1 +#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1) + struct node_caps { size_t arr_sz; long *caps; @@ -111,12 +115,16 @@ static struct init_tree_node { int num_levels; } root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 6, + .ar_size = 7, .children = (struct init_tree_node[]) { ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, LAG_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, + LAG_PRIO_NUM_LEVELS))), ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, @@ -345,7 +353,7 @@ static void del_flow_table(struct fs_node *node) err = mlx5_cmd_destroy_flow_table(dev, ft); if (err) - pr_warn("flow steering can't destroy ft\n"); + mlx5_core_warn(dev, "flow steering can't destroy ft\n"); fs_get_obj(prio, ft->node.parent); prio->num_ft--; } @@ -364,7 +372,7 @@ static void del_rule(struct fs_node *node) match_value = mlx5_vzalloc(match_len); if (!match_value) { - pr_warn("failed to allocate inbox\n"); + mlx5_core_warn(dev, "failed to allocate inbox\n"); return; } @@ -387,8 +395,9 @@ static void del_rule(struct fs_node *node) modify_mask, fte); if (err) - pr_warn("%s can't del rule fg id=%d fte_index=%d\n", - __func__, fg->id, fte->index); + mlx5_core_warn(dev, + "%s can't del rule fg id=%d fte_index=%d\n", + __func__, fg->id, fte->index); } kvfree(match_value); } @@ -409,8 +418,9 @@ static void del_fte(struct fs_node *node) err = mlx5_cmd_delete_fte(dev, ft, fte->index); if (err) - pr_warn("flow steering can't delete fte in index %d of flow group id %d\n", - fte->index, fg->id); + mlx5_core_warn(dev, + "flow steering can't delete fte in index %d of flow group id %d\n", + fte->index, fg->id); fte->status = 0; fg->num_ftes--; @@ -427,8 +437,8 @@ static void del_flow_group(struct fs_node *node) dev = get_dev(&ft->node); if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) - pr_warn("flow steering can't destroy fg %d of ft %d\n", - fg->id, ft->id); + mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", + fg->id, ft->id); } static struct fs_fte *alloc_fte(u8 action, @@ -475,7 +485,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) } static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte, - enum fs_flow_table_type table_type) + enum fs_flow_table_type table_type, + enum fs_flow_table_op_mod op_mod) { struct mlx5_flow_table *ft; @@ -485,6 +496,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft ft->level = level; ft->node.type = FS_TYPE_FLOW_TABLE; + ft->op_mod = op_mod; ft->type = table_type; ft->vport = vport; ft->max_fte = max_fte; @@ -722,6 +734,7 @@ static void list_add_flow_table(struct mlx5_flow_table *ft, } static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + enum fs_flow_table_op_mod op_mod, u16 vport, int prio, int max_fte, u32 level) { @@ -754,18 +767,19 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa level += fs_prio->start_level; ft = alloc_flow_table(level, vport, - roundup_pow_of_two(max_fte), - root->table_type); + max_fte ? roundup_pow_of_two(max_fte) : 0, + root->table_type, + op_mod); if (!ft) { err = -ENOMEM; goto unlock_root; } tree_init_node(&ft->node, 1, del_flow_table); - log_table_sz = ilog2(ft->max_fte); + log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); - err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level, - log_table_sz, next_ft, &ft->id); + err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type, + ft->level, log_table_sz, next_ft, &ft->id); if (err) goto free_ft; @@ -792,15 +806,26 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, u32 level) { - return __mlx5_create_flow_table(ns, 0, prio, max_fte, level); + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio, + max_fte, level); } struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, u32 level, u16 vport) { - return __mlx5_create_flow_table(ns, vport, prio, max_fte, level); + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio, + max_fte, level); +} + +struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( + struct mlx5_flow_namespace *ns, + int prio, u32 level) +{ + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0, + level); } +EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, @@ -1379,6 +1404,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, switch (type) { case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_LAG: case MLX5_FLOW_NAMESPACE_OFFLOADS: case MLX5_FLOW_NAMESPACE_ETHTOOL: case MLX5_FLOW_NAMESPACE_KERNEL: @@ -1401,6 +1427,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &steering->esw_ingress_root_ns->ns; else return NULL; + case MLX5_FLOW_NAMESPACE_SNIFFER_RX: + if (steering->sniffer_rx_root_ns) + return &steering->sniffer_rx_root_ns->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_SNIFFER_TX: + if (steering->sniffer_tx_root_ns) + return &steering->sniffer_tx_root_ns->ns; + else + return NULL; default: return NULL; } @@ -1700,10 +1736,46 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_root_ns(steering->esw_egress_root_ns); cleanup_root_ns(steering->esw_ingress_root_ns); cleanup_root_ns(steering->fdb_root_ns); + cleanup_root_ns(steering->sniffer_rx_root_ns); + cleanup_root_ns(steering->sniffer_tx_root_ns); mlx5_cleanup_fc_stats(dev); kfree(steering); } +static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX); + if (!steering->sniffer_tx_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_root_ns(steering->sniffer_tx_root_ns); + return PTR_ERR(prio); + } + return 0; +} + +static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX); + if (!steering->sniffer_rx_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_root_ns(steering->sniffer_rx_root_ns); + return PTR_ERR(prio); + } + return 0; +} + static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; @@ -1800,6 +1872,18 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) } } + if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) { + err = init_sniffer_rx_root_ns(steering); + if (err) + goto err; + } + + if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) { + err = init_sniffer_tx_root_ns(steering); + if (err) + goto err; + } + return 0; err: mlx5_cleanup_fs(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 9cffb6aeb4e9..71ff03bceabb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -49,6 +49,13 @@ enum fs_flow_table_type { FS_FT_ESW_EGRESS_ACL = 0x2, FS_FT_ESW_INGRESS_ACL = 0x3, FS_FT_FDB = 0X4, + FS_FT_SNIFFER_RX = 0X5, + FS_FT_SNIFFER_TX = 0X6, +}; + +enum fs_flow_table_op_mod { + FS_FT_OP_MOD_NORMAL, + FS_FT_OP_MOD_LAG_DEMUX, }; enum fs_fte_status { @@ -61,6 +68,8 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace *fdb_root_ns; struct mlx5_flow_root_namespace *esw_egress_root_ns; struct mlx5_flow_root_namespace *esw_ingress_root_ns; + struct mlx5_flow_root_namespace *sniffer_tx_root_ns; + struct mlx5_flow_root_namespace *sniffer_rx_root_ns; }; struct fs_node { @@ -93,6 +102,7 @@ struct mlx5_flow_table { unsigned int max_fte; unsigned int level; enum fs_flow_table_type type; + enum fs_flow_table_op_mod op_mod; struct { bool active; unsigned int required_groups; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 77fc1aa26114..5718aada6605 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -38,13 +38,10 @@ static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_adapter_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0}; MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } int mlx5_query_board_id(struct mlx5_core_dev *dev) @@ -162,38 +159,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) { - struct mlx5_cmd_init_hca_mbox_in in; - struct mlx5_cmd_init_hca_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); + u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0}; - return err; + MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) { - struct mlx5_cmd_teardown_hca_mbox_in in; - struct mlx5_cmd_teardown_hca_mbox_out out; - int err; + u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c new file mode 100644 index 000000000000..92c3e0dbcbdc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -0,0 +1,602 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/netdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/vport.h> +#include "mlx5_core.h" + +enum { + MLX5_LAG_FLAG_BONDED = 1 << 0, +}; + +struct lag_func { + struct mlx5_core_dev *dev; + struct net_device *netdev; +}; + +/* Used for collection of netdev event info. */ +struct lag_tracker { + enum netdev_lag_tx_type tx_type; + struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS]; + bool is_bonded; +}; + +/* LAG data of a ConnectX card. + * It serves both its phys functions. + */ +struct mlx5_lag { + u8 flags; + u8 v2p_map[MLX5_MAX_PORTS]; + struct lag_func pf[MLX5_MAX_PORTS]; + struct lag_tracker tracker; + struct delayed_work bond_work; + struct notifier_block nb; +}; + +/* General purpose, use for short periods of time. + * Beware of lock dependencies (preferably, no locks should be acquired + * under it). + */ +static DEFINE_MUTEX(lag_mutex); + +static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, + u8 remap_port2) +{ + u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0}; + void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); + + MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); + + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1, + u8 remap_port2) +{ + u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0}; + void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx); + + MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG); + MLX5_SET(modify_lag_in, in, field_select, 0x1); + + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev) +{ + u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0}; + + MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev) +{ + u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0}; + + MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} +EXPORT_SYMBOL(mlx5_cmd_create_vport_lag); + +int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) +{ + u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0}; + + MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} +EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); + +static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev) +{ + return dev->priv.lag; +} + +static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, + struct net_device *ndev) +{ + int i; + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].netdev == ndev) + return i; + + return -1; +} + +static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) +{ + return !!(ldev->flags & MLX5_LAG_FLAG_BONDED); +} + +static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, + u8 *port1, u8 *port2) +{ + if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { + if (tracker->netdev_state[0].tx_enabled) { + *port1 = 1; + *port2 = 1; + } else { + *port1 = 2; + *port2 = 2; + } + } else { + *port1 = 1; + *port2 = 2; + if (!tracker->netdev_state[0].link_up) + *port1 = 2; + else if (!tracker->netdev_state[1].link_up) + *port2 = 1; + } +} + +static void mlx5_activate_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker) +{ + struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + int err; + + ldev->flags |= MLX5_LAG_FLAG_BONDED; + + mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0], + &ldev->v2p_map[1]); + + err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]); + if (err) + mlx5_core_err(dev0, + "Failed to create LAG (%d)\n", + err); +} + +static void mlx5_deactivate_lag(struct mlx5_lag *ldev) +{ + struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + int err; + + ldev->flags &= ~MLX5_LAG_FLAG_BONDED; + + err = mlx5_cmd_destroy_lag(dev0); + if (err) + mlx5_core_err(dev0, + "Failed to destroy LAG (%d)\n", + err); +} + +static void mlx5_do_bond(struct mlx5_lag *ldev) +{ + struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + struct mlx5_core_dev *dev1 = ldev->pf[1].dev; + struct lag_tracker tracker; + u8 v2p_port1, v2p_port2; + int i, err; + + if (!dev0 || !dev1) + return; + + mutex_lock(&lag_mutex); + tracker = ldev->tracker; + mutex_unlock(&lag_mutex); + + if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) { + if (mlx5_sriov_is_enabled(dev0) || + mlx5_sriov_is_enabled(dev1)) { + mlx5_core_warn(dev0, "LAG is not supported with SRIOV"); + return; + } + + for (i = 0; i < MLX5_MAX_PORTS; i++) + mlx5_remove_dev_by_protocol(ldev->pf[i].dev, + MLX5_INTERFACE_PROTOCOL_IB); + + mlx5_activate_lag(ldev, &tracker); + + mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_nic_vport_enable_roce(dev1); + } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) { + mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1, + &v2p_port2); + + if ((v2p_port1 != ldev->v2p_map[0]) || + (v2p_port2 != ldev->v2p_map[1])) { + ldev->v2p_map[0] = v2p_port1; + ldev->v2p_map[1] = v2p_port2; + + err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); + if (err) + mlx5_core_err(dev0, + "Failed to modify LAG (%d)\n", + err); + } + } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) { + mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_nic_vport_disable_roce(dev1); + + mlx5_deactivate_lag(ldev); + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + mlx5_add_dev_by_protocol(ldev->pf[i].dev, + MLX5_INTERFACE_PROTOCOL_IB); + } +} + +static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) +{ + schedule_delayed_work(&ldev->bond_work, delay); +} + +static void mlx5_do_bond_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, + bond_work); + int status; + + status = mutex_trylock(&mlx5_intf_mutex); + if (!status) { + /* 1 sec delay. */ + mlx5_queue_bond_work(ldev, HZ); + return; + } + + mlx5_do_bond(ldev); + mutex_unlock(&mlx5_intf_mutex); +} + +static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + struct net_device *ndev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *upper = info->upper_dev, *ndev_tmp; + struct netdev_lag_upper_info *lag_upper_info; + bool is_bonded; + int bond_status = 0; + int num_slaves = 0; + int idx; + + if (!netif_is_lag_master(upper)) + return 0; + + lag_upper_info = info->upper_info; + + /* The event may still be of interest if the slave does not belong to + * us, but is enslaved to a master which has one or more of our netdevs + * as slaves (e.g., if a new slave is added to a master that bonds two + * of our netdevs, we should unbond). + */ + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, ndev_tmp) { + idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp); + if (idx > -1) + bond_status |= (1 << idx); + + num_slaves++; + } + rcu_read_unlock(); + + /* None of this lagdev's netdevs are slaves of this master. */ + if (!(bond_status & 0x3)) + return 0; + + if (lag_upper_info) + tracker->tx_type = lag_upper_info->tx_type; + + /* Determine bonding status: + * A device is considered bonded if both its physical ports are slaves + * of the same lag master, and only them. + * Lag mode must be activebackup or hash. + */ + is_bonded = (num_slaves == MLX5_MAX_PORTS) && + (bond_status == 0x3) && + ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) || + (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)); + + if (tracker->is_bonded != is_bonded) { + tracker->is_bonded = is_bonded; + return 1; + } + + return 0; +} + +static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + struct net_device *ndev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + int idx; + + if (!netif_is_lag_port(ndev)) + return 0; + + idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev); + if (idx == -1) + return 0; + + /* This information is used to determine virtual to physical + * port mapping. + */ + lag_lower_info = info->lower_state_info; + if (!lag_lower_info) + return 0; + + tracker->netdev_state[idx] = *lag_lower_info; + + return 1; +} + +static int mlx5_lag_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct lag_tracker tracker; + struct mlx5_lag *ldev; + int changed = 0; + + if (!net_eq(dev_net(ndev), &init_net)) + return NOTIFY_DONE; + + if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE)) + return NOTIFY_DONE; + + ldev = container_of(this, struct mlx5_lag, nb); + tracker = ldev->tracker; + + switch (event) { + case NETDEV_CHANGEUPPER: + changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev, + ptr); + break; + case NETDEV_CHANGELOWERSTATE: + changed = mlx5_handle_changelowerstate_event(ldev, &tracker, + ndev, ptr); + break; + } + + mutex_lock(&lag_mutex); + ldev->tracker = tracker; + mutex_unlock(&lag_mutex); + + if (changed) + mlx5_queue_bond_work(ldev, 0); + + return NOTIFY_DONE; +} + +static struct mlx5_lag *mlx5_lag_dev_alloc(void) +{ + struct mlx5_lag *ldev; + + ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); + if (!ldev) + return NULL; + + INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work); + + return ldev; +} + +static void mlx5_lag_dev_free(struct mlx5_lag *ldev) +{ + kfree(ldev); +} + +static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, + struct mlx5_core_dev *dev, + struct net_device *netdev) +{ + unsigned int fn = PCI_FUNC(dev->pdev->devfn); + + if (fn >= MLX5_MAX_PORTS) + return; + + mutex_lock(&lag_mutex); + ldev->pf[fn].dev = dev; + ldev->pf[fn].netdev = netdev; + ldev->tracker.netdev_state[fn].link_up = 0; + ldev->tracker.netdev_state[fn].tx_enabled = 0; + + dev->priv.lag = ldev; + mutex_unlock(&lag_mutex); +} + +static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, + struct mlx5_core_dev *dev) +{ + int i; + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev == dev) + break; + + if (i == MLX5_MAX_PORTS) + return; + + mutex_lock(&lag_mutex); + memset(&ldev->pf[i], 0, sizeof(*ldev->pf)); + + dev->priv.lag = NULL; + mutex_unlock(&lag_mutex); +} + +static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev) +{ + return (u16)((dev->pdev->bus->number << 8) | + PCI_SLOT(dev->pdev->devfn)); +} + +/* Must be called with intf_mutex held */ +void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) +{ + struct mlx5_lag *ldev = NULL; + struct mlx5_core_dev *tmp_dev; + struct mlx5_priv *priv; + u16 pci_id; + + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + !MLX5_CAP_GEN(dev, lag_master) || + (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)) + return; + + pci_id = mlx5_gen_pci_id(dev); + + mlx5_core_for_each_priv(priv) { + tmp_dev = container_of(priv, struct mlx5_core_dev, priv); + if ((dev != tmp_dev) && + (mlx5_gen_pci_id(tmp_dev) == pci_id)) { + ldev = tmp_dev->priv.lag; + break; + } + } + + if (!ldev) { + ldev = mlx5_lag_dev_alloc(); + if (!ldev) { + mlx5_core_err(dev, "Failed to alloc lag dev\n"); + return; + } + } + + mlx5_lag_dev_add_pf(ldev, dev, netdev); + + if (!ldev->nb.notifier_call) { + ldev->nb.notifier_call = mlx5_lag_netdev_event; + if (register_netdevice_notifier(&ldev->nb)) { + ldev->nb.notifier_call = NULL; + mlx5_core_err(dev, "Failed to register LAG netdev notifier\n"); + } + } +} + +/* Must be called with intf_mutex held */ +void mlx5_lag_remove(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + int i; + + ldev = mlx5_lag_dev_get(dev); + if (!ldev) + return; + + if (mlx5_lag_is_bonded(ldev)) + mlx5_deactivate_lag(ldev); + + mlx5_lag_dev_remove_pf(ldev, dev); + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + break; + + if (i == MLX5_MAX_PORTS) { + if (ldev->nb.notifier_call) + unregister_netdevice_notifier(&ldev->nb); + cancel_delayed_work_sync(&ldev->bond_work); + mlx5_lag_dev_free(ldev); + } +} + +bool mlx5_lag_is_active(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + bool res; + + mutex_lock(&lag_mutex); + ldev = mlx5_lag_dev_get(dev); + res = ldev && mlx5_lag_is_bonded(ldev); + mutex_unlock(&lag_mutex); + + return res; +} +EXPORT_SYMBOL(mlx5_lag_is_active); + +struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) +{ + struct net_device *ndev = NULL; + struct mlx5_lag *ldev; + + mutex_lock(&lag_mutex); + ldev = mlx5_lag_dev_get(dev); + + if (!(ldev && mlx5_lag_is_bonded(ldev))) + goto unlock; + + if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { + ndev = ldev->tracker.netdev_state[0].tx_enabled ? + ldev->pf[0].netdev : ldev->pf[1].netdev; + } else { + ndev = ldev->pf[0].netdev; + } + if (ndev) + dev_hold(ndev); + +unlock: + mutex_unlock(&lag_mutex); + + return ndev; +} +EXPORT_SYMBOL(mlx5_lag_get_roce_netdev); + +bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) +{ + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, + priv); + struct mlx5_lag *ldev; + + if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB) + return true; + + ldev = mlx5_lag_dev_get(dev); + if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev) + return true; + + /* If bonded, we do not add an IB device for PF1. */ + return false; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c index 1368dac00da0..3a3b0005fd2b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -39,36 +39,33 @@ int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port) { - struct mlx5_mad_ifc_mbox_in *in = NULL; - struct mlx5_mad_ifc_mbox_out *out = NULL; - int err; + int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out); + int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in); + int err = -ENOMEM; + void *data; + void *resp; + u32 *out; + u32 *in; - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - out = kzalloc(sizeof(*out), GFP_KERNEL); - if (!out) { - err = -ENOMEM; + in = kzalloc(inlen, GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); + if (!in || !out) goto out; - } - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC); - in->hdr.opmod = cpu_to_be16(opmod); - in->port = port; + MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC); + MLX5_SET(mad_ifc_in, in, op_mod, opmod); + MLX5_SET(mad_ifc_in, in, port, port); - memcpy(in->data, inb, sizeof(in->data)); + data = MLX5_ADDR_OF(mad_ifc_in, in, mad); + memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad)); - err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out)); + err = mlx5_cmd_exec(dev, in, inlen, out, outlen); if (err) goto out; - if (out->hdr.status) { - err = mlx5_cmd_status_to_err(&out->hdr); - goto out; - } - - memcpy(outb, out->data, sizeof(out->data)); + resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet); + memcpy(outb, resp, + MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet)); out: kfree(out); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2385bae92672..c132ef1faefe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -73,8 +73,9 @@ module_param_named(prof_sel, prof_sel, int, 0444); MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); static LIST_HEAD(intf_list); -static LIST_HEAD(dev_list); -static DEFINE_MUTEX(intf_mutex); + +LIST_HEAD(mlx5_dev_list); +DEFINE_MUTEX(mlx5_intf_mutex); struct mlx5_device_context { struct list_head list; @@ -324,7 +325,7 @@ enum { MLX5_DEV_CAP_FLAG_DCT, }; -static u16 to_fw_pkey_sz(u32 size) +static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) { switch (size) { case 128: @@ -340,7 +341,7 @@ static u16 to_fw_pkey_sz(u32 size) case 4096: return 5; default: - pr_warn("invalid pkey table size %d\n", size); + mlx5_core_warn(dev, "invalid pkey table size %d\n", size); return 0; } } @@ -363,10 +364,6 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, opmod); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); - if (err) - goto query_ex; - - err = mlx5_cmd_status_to_err_v2(out); if (err) { mlx5_core_warn(dev, "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", @@ -409,20 +406,11 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod) { - u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; - int err; - - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1); - err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); - if (err) - return err; - - err = mlx5_cmd_status_to_err_v2(out); - - return err; + return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); } static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) @@ -490,7 +478,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) 128); /* we limit the size of the pkey table to 128 entries for now */ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, - to_fw_pkey_sz(128)); + to_fw_pkey_sz(dev, 128)); if (prof->mask & MLX5_PROF_MASK_QP_SIZE) MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, @@ -528,37 +516,22 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) { - u32 out[MLX5_ST_SZ_DW(enable_hca_out)]; - u32 in[MLX5_ST_SZ_DW(enable_hca_in)]; - int err; + u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); MLX5_SET(enable_hca_in, in, function_id, func_id); - memset(out, 0, sizeof(out)); - - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - return mlx5_cmd_status_to_err_v2(out); + return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); } int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) { - u32 out[MLX5_ST_SZ_DW(disable_hca_out)]; - u32 in[MLX5_ST_SZ_DW(disable_hca_in)]; - int err; + u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); MLX5_SET(disable_hca_in, in, function_id, func_id); - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (err) - return err; - - return mlx5_cmd_status_to_err_v2(out); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev) @@ -758,44 +731,40 @@ clean: static int mlx5_core_set_issi(struct mlx5_core_dev *dev) { - u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]; - u32 query_out[MLX5_ST_SZ_DW(query_issi_out)]; - u32 set_in[MLX5_ST_SZ_DW(set_issi_in)]; - u32 set_out[MLX5_ST_SZ_DW(set_issi_out)]; - int err; + u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; + u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; u32 sup_issi; - - memset(query_in, 0, sizeof(query_in)); - memset(query_out, 0, sizeof(query_out)); + int err; MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); - - err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in), - query_out, sizeof(query_out)); + err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), + query_out, sizeof(query_out)); if (err) { - if (((struct mlx5_outbox_hdr *)query_out)->status == - MLX5_CMD_STAT_BAD_OP_ERR) { + u32 syndrome; + u8 status; + + mlx5_cmd_mbox_status(query_out, &status, &syndrome); + if (status == MLX5_CMD_STAT_BAD_OP_ERR) { pr_debug("Only ISSI 0 is supported\n"); return 0; } - pr_err("failed to query ISSI\n"); + pr_err("failed to query ISSI err(%d)\n", err); return err; } sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); if (sup_issi & (1 << 1)) { - memset(set_in, 0, sizeof(set_in)); - memset(set_out, 0, sizeof(set_out)); + u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; + u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); MLX5_SET(set_issi_in, set_in, current_issi, 1); - - err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in), - set_out, sizeof(set_out)); + err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), + set_out, sizeof(set_out)); if (err) { - pr_err("failed to set ISSI=1\n"); + pr_err("failed to set ISSI=1 err(%d)\n", err); return err; } @@ -814,6 +783,9 @@ static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) struct mlx5_device_context *dev_ctx; struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + if (!mlx5_lag_intf_add(intf, priv)) + return; + dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); if (!dev_ctx) return; @@ -852,11 +824,11 @@ static int mlx5_register_device(struct mlx5_core_dev *dev) struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; - mutex_lock(&intf_mutex); - list_add_tail(&priv->dev_list, &dev_list); + mutex_lock(&mlx5_intf_mutex); + list_add_tail(&priv->dev_list, &mlx5_dev_list); list_for_each_entry(intf, &intf_list, list) mlx5_add_device(intf, priv); - mutex_unlock(&intf_mutex); + mutex_unlock(&mlx5_intf_mutex); return 0; } @@ -866,11 +838,11 @@ static void mlx5_unregister_device(struct mlx5_core_dev *dev) struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; - mutex_lock(&intf_mutex); + mutex_lock(&mlx5_intf_mutex); list_for_each_entry(intf, &intf_list, list) mlx5_remove_device(intf, priv); list_del(&priv->dev_list); - mutex_unlock(&intf_mutex); + mutex_unlock(&mlx5_intf_mutex); } int mlx5_register_interface(struct mlx5_interface *intf) @@ -880,11 +852,11 @@ int mlx5_register_interface(struct mlx5_interface *intf) if (!intf->add || !intf->remove) return -EINVAL; - mutex_lock(&intf_mutex); + mutex_lock(&mlx5_intf_mutex); list_add_tail(&intf->list, &intf_list); - list_for_each_entry(priv, &dev_list, dev_list) + list_for_each_entry(priv, &mlx5_dev_list, dev_list) mlx5_add_device(intf, priv); - mutex_unlock(&intf_mutex); + mutex_unlock(&mlx5_intf_mutex); return 0; } @@ -894,11 +866,11 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) { struct mlx5_priv *priv; - mutex_lock(&intf_mutex); - list_for_each_entry(priv, &dev_list, dev_list) + mutex_lock(&mlx5_intf_mutex); + list_for_each_entry(priv, &mlx5_dev_list, dev_list) mlx5_remove_device(intf, priv); list_del(&intf->list); - mutex_unlock(&intf_mutex); + mutex_unlock(&mlx5_intf_mutex); } EXPORT_SYMBOL(mlx5_unregister_interface); @@ -924,6 +896,30 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) } EXPORT_SYMBOL(mlx5_get_protocol_dev); +/* Must be called with intf_mutex held */ +void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) +{ + struct mlx5_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + mlx5_add_device(intf, &dev->priv); + break; + } +} + +/* Must be called with intf_mutex held */ +void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) +{ + struct mlx5_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + mlx5_remove_device(intf, &dev->priv); + break; + } +} + static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { struct pci_dev *pdev = dev->pdev; @@ -1344,8 +1340,9 @@ static int init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { - pr_warn("selected profile out of range, selecting default (%d)\n", - MLX5_DEFAULT_PROF); + mlx5_core_warn(dev, + "selected profile out of range, selecting default (%d)\n", + MLX5_DEFAULT_PROF); prof_sel = MLX5_DEFAULT_PROF; } dev->profile = &profile[prof_sel]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c index d5a0c2d61a18..ba2b09cc192f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -37,70 +37,30 @@ #include <rdma/ib_verbs.h> #include "mlx5_core.h" -struct mlx5_attach_mcg_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 qpn; - __be32 rsvd; - u8 gid[16]; -}; - -struct mlx5_attach_mcg_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvf[8]; -}; - -struct mlx5_detach_mcg_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 qpn; - __be32 rsvd; - u8 gid[16]; -}; - -struct mlx5_detach_mcg_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvf[8]; -}; - int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) { - struct mlx5_attach_mcg_mbox_in in; - struct mlx5_attach_mcg_mbox_out out; - int err; + u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0}; + void *gid; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG); - memcpy(in.gid, mgid, sizeof(*mgid)); - in.qpn = cpu_to_be32(qpn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG); + MLX5_SET(attach_to_mcg_in, in, qpn, qpn); + gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid); + memcpy(gid, mgid, sizeof(*mgid)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_attach_mcg); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) { - struct mlx5_detach_mcg_mbox_in in; - struct mlx5_detach_mcg_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG); - memcpy(in.gid, mgid, sizeof(*mgid)); - in.qpn = cpu_to_be32(qpn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); + u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0}; + void *gid; - return err; + MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG); + MLX5_SET(detach_from_mcg_in, in, qpn, qpn); + gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid); + memcpy(gid, mgid, sizeof(*mgid)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_detach_mcg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 2f86ec6fcf25..714b71bed2be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -46,6 +46,9 @@ extern int mlx5_core_debug_mask; +extern struct list_head mlx5_dev_list; +extern struct mutex mlx5_intf_mutex; + #define mlx5_core_dbg(__dev, format, ...) \ dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \ (__dev)->priv.name, __func__, __LINE__, current->pid, \ @@ -58,8 +61,8 @@ do { \ } while (0) #define mlx5_core_err(__dev, format, ...) \ - dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \ - (__dev)->priv.name, __func__, __LINE__, current->pid, \ + dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_warn(__dev, format, ...) \ @@ -70,24 +73,14 @@ do { \ #define mlx5_core_info(__dev, format, ...) \ dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__) +#define mlx5_core_for_each_priv(__priv) \ + list_for_each_entry(__priv, &mlx5_dev_list, dev_list) + enum { MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_TIME, /* print command execution time */ }; -static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in, - int in_size, u32 *out, - int out_size) -{ - int err; - - err = mlx5_cmd_exec(dev, in, in_size, out, out_size); - if (err) - return err; - - return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out); -} - int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); @@ -97,6 +90,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); +bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); @@ -105,7 +99,27 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); void mlx5_cq_tasklet_cb(unsigned long data); +void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); +void mlx5_lag_remove(struct mlx5_core_dev *dev); + +void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); +void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); + +bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); + void mlx5e_init(void); void mlx5e_cleanup(void); +static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) +{ + /* LACP owner conditions: + * 1) Function is physical. + * 2) LAG is supported by FW. + * 3) LAG is managed by driver (currently the only option). + */ + return MLX5_CAP_GEN(dev, vport_group_manager) && + (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && + MLX5_CAP_GEN(dev, lag_master); +} + #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 77a7293921d5..b9736f505bdf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -49,48 +49,43 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev) { } -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - struct mlx5_create_mkey_mbox_in *in, int inlen, - mlx5_cmd_cbk_t callback, void *context, - struct mlx5_create_mkey_mbox_out *out) +int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, + u32 *in, int inlen, + u32 *out, int outlen, + mlx5_cmd_cbk_t callback, void *context) { struct mlx5_mkey_table *table = &dev->priv.mkey_table; - struct mlx5_create_mkey_mbox_out lout; + u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; + u32 mkey_index; + void *mkc; int err; u8 key; - memset(&lout, 0, sizeof(lout)); spin_lock_irq(&dev->priv.mkey_lock); key = dev->priv.mkey_key++; spin_unlock_irq(&dev->priv.mkey_lock); - in->seg.qpn_mkey7_0 |= cpu_to_be32(key); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); - if (callback) { - err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out), - callback, context); - return err; - } else { - err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout)); - } + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); - if (err) { - mlx5_core_dbg(dev, "cmd exec failed %d\n", err); - return err; - } + MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); + MLX5_SET(mkc, mkc, mkey_7_0, key); - if (lout.hdr.status) { - mlx5_core_dbg(dev, "status %d\n", lout.hdr.status); - return mlx5_cmd_status_to_err(&lout.hdr); - } + if (callback) + return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen, + callback, context); + + err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout)); + if (err) + return err; - mkey->iova = be64_to_cpu(in->seg.start_addr); - mkey->size = be64_to_cpu(in->seg.len); - mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; - mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; + mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index); + mkey->iova = MLX5_GET64(mkc, mkc, start_addr); + mkey->size = MLX5_GET64(mkc, mkc, len); + mkey->key = mlx5_idx_to_mkey(mkey_index) | key; + mkey->pd = MLX5_GET(mkc, mkc, pd); mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", - be32_to_cpu(lout.mkey), key, mkey->key); + mkey_index, key, mkey->key); /* connect to mkey tree */ write_lock_irq(&table->lock); @@ -104,20 +99,25 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, return err; } +EXPORT_SYMBOL(mlx5_core_create_mkey_cb); + +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, + u32 *in, int inlen) +{ + return mlx5_core_create_mkey_cb(dev, mkey, in, inlen, + NULL, 0, NULL, NULL); +} EXPORT_SYMBOL(mlx5_core_create_mkey); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey) { struct mlx5_mkey_table *table = &dev->priv.mkey_table; - struct mlx5_destroy_mkey_mbox_in in; - struct mlx5_destroy_mkey_mbox_out out; + u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0}; struct mlx5_core_mkey *deleted_mkey; unsigned long flags; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); write_lock_irqsave(&table->lock, flags); deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key)); @@ -128,94 +128,71 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, return -ENOENT; } - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); - in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key)); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); + MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - struct mlx5_query_mkey_mbox_out *out, int outlen) + u32 *out, int outlen) { - struct mlx5_query_mkey_mbox_in in; - int err; + u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0}; - memset(&in, 0, sizeof(in)); memset(out, 0, outlen); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY); - in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key)); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); - if (err) - return err; - - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); - - return err; + MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY); + MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_mkey); int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, u32 *mkey) { - struct mlx5_query_special_ctxs_mbox_in in; - struct mlx5_query_special_ctxs_mbox_out out; + u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - *mkey = be32_to_cpu(out.dump_fill_mkey); - + MLX5_SET(query_special_contexts_in, in, opcode, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *mkey = MLX5_GET(query_special_contexts_out, out, + dump_fill_mkey); return err; } EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); +static inline u32 mlx5_get_psv(u32 *out, int psv_index) +{ + switch (psv_index) { + case 1: return MLX5_GET(create_psv_out, out, psv1_index); + case 2: return MLX5_GET(create_psv_out, out, psv2_index); + case 3: return MLX5_GET(create_psv_out, out, psv3_index); + default: return MLX5_GET(create_psv_out, out, psv0_index); + } +} + int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index) { - struct mlx5_allocate_psv_in in; - struct mlx5_allocate_psv_out out; + u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0}; int i, err; if (npsvs > MLX5_MAX_PSVS) return -EINVAL; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); + MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV); + MLX5_SET(create_psv_in, in, pd, pdn); + MLX5_SET(create_psv_in, in, num_psv, npsvs); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV); - in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) { - mlx5_core_err(dev, "cmd exec failed %d\n", err); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) return err; - } - - if (out.hdr.status) { - mlx5_core_err(dev, "create_psv bad status %d\n", - out.hdr.status); - return mlx5_cmd_status_to_err(&out.hdr); - } for (i = 0; i < npsvs; i++) - sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff; + sig_index[i] = mlx5_get_psv(out, i); return err; } @@ -223,29 +200,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) { - struct mlx5_destroy_psv_in in; - struct mlx5_destroy_psv_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0}; - in.psv_number = cpu_to_be32(psv_num); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) { - mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err); - goto out; - } - - if (out.hdr.status) { - mlx5_core_err(dev, "destroy_psv bad status %d\n", - out.hdr.status); - err = mlx5_cmd_status_to_err(&out.hdr); - goto out; - } - -out: - return err; + MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV); + MLX5_SET(destroy_psv_in, in, psvn, psv_num); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_psv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 32dea3524cee..673a7c96479a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -44,12 +44,6 @@ enum { MLX5_PAGES_TAKE = 2 }; -enum { - MLX5_BOOT_PAGES = 1, - MLX5_INIT_PAGES = 2, - MLX5_POST_INIT_PAGES = 3 -}; - struct mlx5_pages_req { struct mlx5_core_dev *dev; u16 func_id; @@ -67,33 +61,6 @@ struct fw_page { unsigned free_count; }; -struct mlx5_query_pages_inbox { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_query_pages_outbox { - struct mlx5_outbox_hdr hdr; - __be16 rsvd; - __be16 func_id; - __be32 num_pages; -}; - -struct mlx5_manage_pages_inbox { - struct mlx5_inbox_hdr hdr; - __be16 rsvd; - __be16 func_id; - __be32 num_entries; - __be64 pas[0]; -}; - -struct mlx5_manage_pages_outbox { - struct mlx5_outbox_hdr hdr; - __be32 num_entries; - u8 rsvd[4]; - __be64 pas[0]; -}; - enum { MAX_RECLAIM_TIME_MSECS = 5000, MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60, @@ -167,24 +134,21 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) { - struct mlx5_query_pages_inbox in; - struct mlx5_query_pages_outbox out; + u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); - in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); + MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); + MLX5_SET(query_pages_in, in, op_mod, boot ? + MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : + MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - *npages = be32_to_cpu(out.num_pages); - *func_id = be16_to_cpu(out.func_id); + *npages = MLX5_GET(query_pages_out, out, num_pages); + *func_id = MLX5_GET(query_pages_out, out, function_id); return err; } @@ -280,46 +244,37 @@ out_alloc: static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) { - struct mlx5_manage_pages_inbox *in; - struct mlx5_manage_pages_outbox out; + u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; int err; - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return; - - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); - in->func_id = cpu_to_be16(func_id); - err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); - if (!err) - err = mlx5_cmd_status_to_err(&out.hdr); + MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); + MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); + MLX5_SET(manage_pages_in, in, function_id, func_id); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) - mlx5_core_warn(dev, "page notify failed\n"); - - kfree(in); + mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n", + func_id, err); } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail) { - struct mlx5_manage_pages_inbox *in; - struct mlx5_manage_pages_outbox out; - int inlen; + u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; + int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); u64 addr; int err; + u32 *in; int i; - inlen = sizeof(*in) + npages * sizeof(in->pas[0]); + inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in = mlx5_vzalloc(inlen); if (!in) { err = -ENOMEM; mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); goto out_free; } - memset(&out, 0, sizeof(out)); for (i = 0; i < npages; i++) { retry: @@ -332,27 +287,21 @@ retry: goto retry; } - in->pas[i] = cpu_to_be64(addr); + MLX5_SET64(manage_pages_in, in, pas[i], addr); } - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); - in->func_id = cpu_to_be16(func_id); - in->num_entries = cpu_to_be32(npages); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); + MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); + MLX5_SET(manage_pages_in, in, function_id, func_id); + MLX5_SET(manage_pages_in, in, input_num_entries, npages); + + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); goto out_4k; } - err = mlx5_cmd_status_to_err(&out.hdr); - if (err) { - mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", - func_id, npages, out.hdr.status); - goto out_4k; - } - dev->priv.fw_pages += npages; if (func_id) dev->priv.vfs_pages += npages; @@ -364,7 +313,7 @@ retry: out_4k: for (i--; i >= 0; i--) - free_4k(dev, be64_to_cpu(in->pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); out_free: kvfree(in); if (notify_fail) @@ -373,8 +322,7 @@ out_free: } static int reclaim_pages_cmd(struct mlx5_core_dev *dev, - struct mlx5_manage_pages_inbox *in, int in_size, - struct mlx5_manage_pages_outbox *out, int out_size) + u32 *in, int in_size, u32 *out, int out_size) { struct fw_page *fwp; struct rb_node *p; @@ -382,55 +330,54 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 i = 0; if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) - return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size, - (u32 *)out, out_size); + return mlx5_cmd_exec(dev, in, in_size, out, out_size); - npages = be32_to_cpu(in->num_entries); + /* No hard feelings, we want our pages back! */ + npages = MLX5_GET(manage_pages_in, in, input_num_entries); p = rb_first(&dev->priv.page_root); while (p && i < npages) { fwp = rb_entry(p, struct fw_page, rb_node); - out->pas[i] = cpu_to_be64(fwp->addr); + MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr); p = rb_next(p); i++; } - out->num_entries = cpu_to_be32(i); + MLX5_SET(manage_pages_out, out, output_num_entries, i); return 0; } static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed) { - struct mlx5_manage_pages_inbox in; - struct mlx5_manage_pages_outbox *out; + int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; int num_claimed; - int outlen; - u64 addr; + u32 *out; int err; int i; if (nclaimed) *nclaimed = 0; - memset(&in, 0, sizeof(in)); - outlen = sizeof(*out) + npages * sizeof(out->pas[0]); + outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); - in.func_id = cpu_to_be16(func_id); - in.num_entries = cpu_to_be32(npages); + MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); + MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); + MLX5_SET(manage_pages_in, in, function_id, func_id); + MLX5_SET(manage_pages_in, in, input_num_entries, npages); + mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); - err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen); + err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); if (err) { mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); goto out_free; } - num_claimed = be32_to_cpu(out->num_entries); + num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries); if (num_claimed > npages) { mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n", num_claimed, npages); @@ -438,10 +385,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, goto out_free; } - for (i = 0; i < num_claimed; i++) { - addr = be64_to_cpu(out->pas[i]); - free_4k(dev, addr); - } + for (i = 0; i < num_claimed; i++) + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); + if (nclaimed) *nclaimed = num_claimed; @@ -518,8 +464,8 @@ static int optimal_reclaimed_pages(void) int ret; ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - - sizeof(struct mlx5_manage_pages_outbox)) / - FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); + MLX5_ST_SZ_BYTES(manage_pages_out)) / + MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c index f2d3aee909e8..bd830d8d6c5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -36,66 +36,27 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" -struct mlx5_alloc_pd_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_alloc_pd_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 pdn; - u8 rsvd[4]; -}; - -struct mlx5_dealloc_pd_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 pdn; - u8 rsvd[4]; -}; - -struct mlx5_dealloc_pd_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn) { - struct mlx5_alloc_pd_mbox_in in; - struct mlx5_alloc_pd_mbox_out out; + u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - *pdn = be32_to_cpu(out.pdn) & 0xffffff; + MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *pdn = MLX5_GET(alloc_pd_out, out, pd); return err; } EXPORT_SYMBOL(mlx5_core_alloc_pd); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn) { - struct mlx5_dealloc_pd_mbox_in in; - struct mlx5_dealloc_pd_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD); - in.pdn = cpu_to_be32(pdn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; + u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0}; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD); + MLX5_SET(dealloc_pd_in, in, pd, pdn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_dealloc_pd); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 752c08127138..34e7184e23c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -38,45 +38,42 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, - u16 reg_num, int arg, int write) + u16 reg_id, int arg, int write) { - struct mlx5_access_reg_mbox_in *in = NULL; - struct mlx5_access_reg_mbox_out *out = NULL; + int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out; + int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in; int err = -ENOMEM; + u32 *out = NULL; + u32 *in = NULL; + void *data; - in = mlx5_vzalloc(sizeof(*in) + size_in); - if (!in) - return -ENOMEM; - - out = mlx5_vzalloc(sizeof(*out) + size_out); - if (!out) - goto ex1; - - memcpy(in->data, data_in, size_in); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG); - in->hdr.opmod = cpu_to_be16(!write); - in->arg = cpu_to_be32(arg); - in->register_id = cpu_to_be16(reg_num); - err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, - sizeof(*out) + size_out); - if (err) - goto ex2; + in = mlx5_vzalloc(inlen); + out = mlx5_vzalloc(outlen); + if (!in || !out) + goto out; - if (out->hdr.status) - err = mlx5_cmd_status_to_err(&out->hdr); + data = MLX5_ADDR_OF(access_register_in, in, register_data); + memcpy(data, data_in, size_in); - if (!err) - memcpy(data_out, out->data, size_out); + MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG); + MLX5_SET(access_register_in, in, op_mod, !write); + MLX5_SET(access_register_in, in, argument, arg); + MLX5_SET(access_register_in, in, register_id, reg_id); + + err = mlx5_cmd_exec(dev, in, inlen, out, outlen); + if (err) + goto out; + + data = MLX5_ADDR_OF(access_register_out, out, register_data); + memcpy(data_out, data, size_out); -ex2: +out: kvfree(out); -ex1: kvfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); - struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; @@ -104,12 +101,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port) { - u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(ptys_reg, in, local_port, local_port); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); - return mlx5_core_access_reg(dev, in, sizeof(in), ptys, ptys_size, MLX5_REG_PTYS, 0, 0); } @@ -117,13 +112,11 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration) { + u32 in[MLX5_ST_SZ_DW(mlcr_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(mlcr_reg)]; - u32 in[MLX5_ST_SZ_DW(mlcr_reg)]; - memset(in, 0, sizeof(in)); MLX5_SET(mlcr_reg, in, local_port, 1); MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration); - return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MLCR, 0, 1); } @@ -182,25 +175,39 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper); -int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, int proto_mask, - u8 local_port) +int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, + u32 *proto_oper, u8 local_port) { u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; - err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port); + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, + local_port); if (err) return err; - if (proto_mask == MLX5_PTYS_EN) - *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - else - *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); + *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + + return 0; +} +EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper); + +int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, + local_port); + if (err) + return err; + + *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); return 0; } -EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); +EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper); int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, u32 proto_admin, int proto_mask) @@ -246,15 +253,12 @@ EXPORT_SYMBOL_GPL(mlx5_toggle_port_link); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status) { - u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(paos_reg)]; - memset(in, 0, sizeof(in)); - MLX5_SET(paos_reg, in, local_port, 1); MLX5_SET(paos_reg, in, admin_status, status); MLX5_SET(paos_reg, in, ase, 1); - return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 1); } @@ -263,19 +267,15 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status) { - u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(paos_reg)]; int err; - memset(in, 0, sizeof(in)); - MLX5_SET(paos_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 0); if (err) return err; - *status = MLX5_GET(paos_reg, out, admin_status); return 0; } @@ -284,13 +284,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, u16 *max_mtu, u16 *oper_mtu, u8 port) { - u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; - memset(in, 0, sizeof(in)); - MLX5_SET(pmtu_reg, in, local_port, port); - mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMTU, 0, 0); @@ -304,14 +301,11 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) { - u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; - memset(in, 0, sizeof(in)); - MLX5_SET(pmtu_reg, in, admin_mtu, mtu); MLX5_SET(pmtu_reg, in, local_port, port); - return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMTU, 0, 1); } @@ -333,15 +327,12 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) { + u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmlp_reg)]; - u32 in[MLX5_ST_SZ_DW(pmlp_reg)]; int module_mapping; int err; - memset(in, 0, sizeof(in)); - MLX5_SET(pmlp_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMLP, 0, 0); if (err) @@ -410,11 +401,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom); static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int pvlc_size, u8 local_port) { - u32 in[MLX5_ST_SZ_DW(pvlc_reg)]; + u32 in[MLX5_ST_SZ_DW(pvlc_reg)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(pvlc_reg, in, local_port, local_port); - return mlx5_core_access_reg(dev, in, sizeof(in), pvlc, pvlc_size, MLX5_REG_PVLC, 0, 0); } @@ -460,10 +449,9 @@ EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt); int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; - memset(in, 0, sizeof(in)); MLX5_SET(pfcc_reg, in, local_port, 1); MLX5_SET(pfcc_reg, in, pptx, tx_pause); MLX5_SET(pfcc_reg, in, pprx, rx_pause); @@ -476,13 +464,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 *rx_pause, u32 *tx_pause) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; - memset(in, 0, sizeof(in)); MLX5_SET(pfcc_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 0); if (err) @@ -500,10 +486,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_pause); int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; - memset(in, 0, sizeof(in)); MLX5_SET(pfcc_reg, in, local_port, 1); MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx); MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx); @@ -517,13 +502,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; - memset(in, 0, sizeof(in)); MLX5_SET(pfcc_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 0); if (err) @@ -567,12 +550,11 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev) int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc) { - u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(qtct_reg)]; int err; int i; - memset(in, 0, sizeof(in)); for (i = 0; i < 8; i++) { if (prio_tc[i] > mlx5_max_tc(mdev)) return -EINVAL; @@ -617,11 +599,9 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group) { - u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; int i; - memset(in, 0, sizeof(in)); - for (i = 0; i <= mlx5_max_tc(mdev); i++) { MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1); MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]); @@ -633,11 +613,9 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) { - u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; int i; - memset(in, 0, sizeof(in)); - for (i = 0; i <= mlx5_max_tc(mdev); i++) { MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1); MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]); @@ -651,12 +629,10 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_units) { - u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; void *ets_tcn_conf; int i; - memset(in, 0, sizeof(in)); - MLX5_SET(qetc_reg, in, port_number, 1); for (i = 0; i <= mlx5_max_tc(mdev); i++) { @@ -701,35 +677,24 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit); int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode) { - u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)]; - u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0}; MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL); MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1); MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode); - - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_set_port_wol); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) { - u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)]; - u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)]; + u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0}; int err; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL); - - err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), - out, sizeof(out)); - + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (!err) *wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode); @@ -740,11 +705,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_wol); static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(pcmr_reg)]; + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(pcmr_reg, in, local_port, 1); - return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, MLX5_REG_PCMR, 0, 0); } @@ -759,12 +722,10 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable) { - u32 in[MLX5_ST_SZ_DW(pcmr_reg)]; + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(pcmr_reg, in, local_port, 1); MLX5_SET(pcmr_reg, in, fcs_chk, enable); - return mlx5_set_ports_check(mdev, in, sizeof(in)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b82d65802d96..d0a4005fe63a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -271,30 +271,20 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev, int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - struct mlx5_create_qp_mbox_in *in, - int inlen) + u32 *in, int inlen) { - struct mlx5_create_qp_mbox_out out; - struct mlx5_destroy_qp_mbox_in din; - struct mlx5_destroy_qp_mbox_out dout; + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; + u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)]; + u32 din[MLX5_ST_SZ_DW(destroy_qp_in)]; int err; - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - if (err) { - mlx5_core_warn(dev, "ret %d\n", err); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + if (err) return err; - } - - if (out.hdr.status) { - mlx5_core_warn(dev, "current num of QPs 0x%x\n", - atomic_read(&dev->num_qps)); - return mlx5_cmd_status_to_err(&out.hdr); - } - qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + qp->qpn = MLX5_GET(create_qp_out, out, qpn); mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); err = create_qprqsq_common(dev, qp, MLX5_RES_QP); @@ -311,12 +301,11 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, return 0; err_cmd: - memset(&din, 0, sizeof(din)); - memset(&dout, 0, sizeof(dout)); - din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); - din.qpn = cpu_to_be32(qp->qpn); - mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); - + memset(din, 0, sizeof(din)); + memset(dout, 0, sizeof(dout)); + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } EXPORT_SYMBOL_GPL(mlx5_core_create_qp); @@ -324,45 +313,145 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp); int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) { - struct mlx5_destroy_qp_mbox_in in; - struct mlx5_destroy_qp_mbox_out out; + u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0}; int err; mlx5_debug_qp_remove(dev, qp); destroy_qprqsq_common(dev, qp); - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); - in.qpn = cpu_to_be32(qp->qpn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - atomic_dec(&dev->num_qps); return 0; } EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); -int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, - struct mlx5_modify_qp_mbox_in *in, int sqd_event, +struct mbox_info { + u32 *in; + u32 *out; + int inlen; + int outlen; +}; + +static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen) +{ + mbox->inlen = inlen; + mbox->outlen = outlen; + mbox->in = kzalloc(mbox->inlen, GFP_KERNEL); + mbox->out = kzalloc(mbox->outlen, GFP_KERNEL); + if (!mbox->in || !mbox->out) { + kfree(mbox->in); + kfree(mbox->out); + return -ENOMEM; + } + + return 0; +} + +static void mbox_free(struct mbox_info *mbox) +{ + kfree(mbox->in); + kfree(mbox->out); +} + +static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, + u32 opt_param_mask, void *qpc, + struct mbox_info *mbox) +{ + mbox->out = NULL; + mbox->in = NULL; + +#define MBOX_ALLOC(mbox, typ) \ + mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out)) + +#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \ + MLX5_SET(typ##_in, in, opcode, _opcode); \ + MLX5_SET(typ##_in, in, qpn, _qpn) + +#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \ + MOD_QP_IN_SET(typ, in, _opcode, _qpn); \ + MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \ + memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc)) + + switch (opcode) { + /* 2RST & 2ERR */ + case MLX5_CMD_OP_2RST_QP: + if (MBOX_ALLOC(mbox, qp_2rst)) + return -ENOMEM; + MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn); + break; + case MLX5_CMD_OP_2ERR_QP: + if (MBOX_ALLOC(mbox, qp_2err)) + return -ENOMEM; + MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn); + break; + + /* MODIFY with QPC */ + case MLX5_CMD_OP_RST2INIT_QP: + if (MBOX_ALLOC(mbox, rst2init_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_INIT2RTR_QP: + if (MBOX_ALLOC(mbox, init2rtr_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_RTR2RTS_QP: + if (MBOX_ALLOC(mbox, rtr2rts_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_RTS2RTS_QP: + if (MBOX_ALLOC(mbox, rts2rts_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_SQERR2RTS_QP: + if (MBOX_ALLOC(mbox, sqerr2rts_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_INIT2INIT_QP: + if (MBOX_ALLOC(mbox, init2init_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + default: + mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n", + opcode, qpn); + return -EINVAL; + } + return 0; +} + +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, + u32 opt_param_mask, void *qpc, struct mlx5_core_qp *qp) { - struct mlx5_modify_qp_mbox_out out; - int err = 0; + struct mbox_info mbox; + int err; - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(operation); - in->qpn = cpu_to_be32(qp->qpn); - err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + err = modify_qp_mbox_alloc(dev, opcode, qp->qpn, + opt_param_mask, qpc, &mbox); if (err) return err; - return mlx5_cmd_status_to_err(&out.hdr); + err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen); + mbox_free(&mbox); + return err; } EXPORT_SYMBOL_GPL(mlx5_core_qp_modify); @@ -382,66 +471,38 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) } int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - struct mlx5_query_qp_mbox_out *out, int outlen) + u32 *out, int outlen) { - struct mlx5_query_qp_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, outlen); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); - in.qpn = cpu_to_be32(qp->qpn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); - if (err) - return err; + u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0}; - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); - - return err; + MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP); + MLX5_SET(query_qp_in, in, qpn, qp->qpn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL_GPL(mlx5_core_qp_query); int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) { - struct mlx5_alloc_xrcd_mbox_in in; - struct mlx5_alloc_xrcd_mbox_out out; + u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - else - *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; - + MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd); return err; } EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) { - struct mlx5_dealloc_xrcd_mbox_in in; - struct mlx5_dealloc_xrcd_mbox_out out; - int err; + u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0}; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); - in.xrcdn = cpu_to_be32(xrcdn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD); + MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); @@ -449,28 +510,23 @@ EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, u8 flags, int error) { - struct mlx5_page_fault_resume_mbox_in in; - struct mlx5_page_fault_resume_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME); - in.hdr.opmod = 0; - flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR | - MLX5_PAGE_FAULT_RESUME_WRITE | - MLX5_PAGE_FAULT_RESUME_RDMA); - flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0); - in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) | - (flags << MLX5_QPN_BITS)); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - return err; + u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; + + MLX5_SET(page_fault_resume_in, in, opcode, + MLX5_CMD_OP_PAGE_FAULT_RESUME); + MLX5_SET(page_fault_resume_in, in, qpn, qpn); + + if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR) + MLX5_SET(page_fault_resume_in, in, req_res, 1); + if (flags & MLX5_PAGE_FAULT_RESUME_WRITE) + MLX5_SET(page_fault_resume_in, in, read_write, 1); + if (flags & MLX5_PAGE_FAULT_RESUME_RDMA) + MLX5_SET(page_fault_resume_in, in, rdma, 1); + if (error) + MLX5_SET(page_fault_resume_in, in, error, 1); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); #endif @@ -541,15 +597,12 @@ EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id) { - u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]; - u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0}; int err; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) *counter_id = MLX5_GET(alloc_q_counter_out, out, counter_set_id); @@ -559,31 +612,25 @@ EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter); int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id) { - u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0}; MLX5_SET(dealloc_q_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_Q_COUNTER); MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter); int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, int reset, void *out, int out_size) { - u32 in[MLX5_ST_SZ_DW(query_q_counter_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0}; MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); MLX5_SET(query_q_counter_in, in, clear, reset); MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size); + return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); } EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index c07c28bd3d55..104902a93a0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -63,19 +63,14 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, u32 rate, u16 index) { - u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]; - u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0}; MLX5_SET(set_rate_limit_in, in, opcode, MLX5_CMD_OP_SET_RATE_LIMIT); MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); MLX5_SET(set_rate_limit_in, in, rate_limit, rate); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index b380a6bc1f85..78e789245183 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -37,6 +37,13 @@ #include "eswitch.h" #endif +bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + + return !!sriov->num_vfs; +} + static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; @@ -144,6 +151,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!mlx5_core_is_pf(dev)) return -EPERM; + if (num_vfs && mlx5_lag_is_active(dev)) { + mlx5_core_warn(dev, "can't turn sriov on while LAG is active"); + return -EINVAL; + } + mlx5_core_cleanup_vfs(dev); if (!num_vfs) { @@ -155,13 +167,13 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!pci_vfs_assigned(pdev)) pci_disable_sriov(pdev); else - pr_info("unloading PF driver while leaving orphan VFs\n"); + mlx5_core_info(dev, "unloading PF driver while leaving orphan VFs\n"); return 0; } err = mlx5_core_sriov_enable(pdev, num_vfs); if (err) { - dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err); + mlx5_core_warn(dev, "mlx5_core_sriov_enable failed %d\n", err); return err; } @@ -180,7 +192,8 @@ static int sync_required(struct pci_dev *pdev) int cur_vfs = pci_num_vf(pdev); if (cur_vfs != sriov->num_vfs) { - pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs); + mlx5_core_warn(dev, "current VFs %d, registered %d - sync needed\n", + cur_vfs, sriov->num_vfs); return 1; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index c07f4d01b70e..3099630015d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -175,8 +175,8 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, MLX5_SET(create_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_SRQ); - err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, - sizeof(create_out)); + err = mlx5_cmd_exec(dev, create_in, inlen, create_out, + sizeof(create_out)); kvfree(create_in); if (!err) srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); @@ -194,8 +194,8 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev, MLX5_CMD_OP_DESTROY_SRQ); MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn); - return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), - srq_out, sizeof(srq_out)); + return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), + srq_out, sizeof(srq_out)); } static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, @@ -209,8 +209,8 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); - return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), - srq_out, sizeof(srq_out)); + return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), + srq_out, sizeof(srq_out)); } static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, @@ -228,9 +228,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, MLX5_SET(query_srq_in, srq_in, opcode, MLX5_CMD_OP_QUERY_SRQ); MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn); - err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), - srq_out, - MLX5_ST_SZ_BYTES(query_srq_out)); + err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), + srq_out, MLX5_ST_SZ_BYTES(query_srq_out)); if (err) goto out; @@ -272,8 +271,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, MLX5_CMD_OP_CREATE_XRC_SRQ); memset(create_out, 0, sizeof(create_out)); - err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, - sizeof(create_out)); + err = mlx5_cmd_exec(dev, create_in, inlen, create_out, + sizeof(create_out)); if (err) goto out; @@ -286,36 +285,30 @@ out: static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { - u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; - u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; - - memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); - memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0}; + u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0}; MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); - return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), - xrcsrq_out, sizeof(xrcsrq_out)); + return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); } static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm) { - u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; - u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; - - memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); - memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; + u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm); - return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), - xrcsrq_out, sizeof(xrcsrq_out)); + return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); } static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, @@ -335,9 +328,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); - err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), - xrcsrq_out, - MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + + err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 28274a6fbafe..a00ff49eec18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -36,17 +36,14 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn) { - u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)]; - u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; int err; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain); @@ -57,29 +54,23 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain); void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn) { - u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0}; MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN); MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) { - u32 out[MLX5_ST_SZ_DW(create_rq_out)]; + u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; int err; MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *rqn = MLX5_GET(create_rq_out, out, rqn); @@ -95,21 +86,18 @@ int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_rq); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) { - u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_rq_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0}; MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_rq); @@ -121,19 +109,17 @@ int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out) MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ); MLX5_SET(query_rq_in, in, rqn, rqn); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_rq); int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) { - u32 out[MLX5_ST_SZ_DW(create_sq_out)]; + u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; int err; MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *sqn = MLX5_GET(create_sq_out, out, sqn); @@ -142,27 +128,22 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_sq_out)]; + u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; MLX5_SET(modify_sq_in, in, sqn, sqn); MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_sq); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) { - u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_sq_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0}; MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); MLX5_SET(destroy_sq_in, in, sqn, sqn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out) @@ -172,21 +153,20 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out) MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ); MLX5_SET(query_sq_in, in, sqn, sqn); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_sq); int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) { - u32 out[MLX5_ST_SZ_DW(create_tir_out)]; + u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; int err; MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *tirn = MLX5_GET(create_tir_out, out, tirn); @@ -197,39 +177,32 @@ EXPORT_SYMBOL(mlx5_core_create_tir); int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_tir_out)]; + u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0}; MLX5_SET(modify_tir_in, in, tirn, tirn); MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) { - u32 in[MLX5_ST_SZ_DW(destroy_tir_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_tir_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0}; MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR); MLX5_SET(destroy_tir_in, in, tirn, tirn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_tir); int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn) { - u32 out[MLX5_ST_SZ_DW(create_tis_out)]; + u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; int err; MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *tisn = MLX5_GET(create_tis_out, out, tisn); @@ -245,34 +218,29 @@ int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, MLX5_SET(modify_tis_in, in, tisn, tisn); MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_tis); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) { - u32 in[MLX5_ST_SZ_DW(destroy_tis_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_tis_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0}; MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); MLX5_SET(destroy_tis_in, in, tisn, tisn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_tis); int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn) { - u32 out[MLX5_ST_SZ_DW(create_rmp_out)]; + u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0}; int err; MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *rmpn = MLX5_GET(create_rmp_out, out, rmpn); @@ -281,38 +249,31 @@ int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_rmp_out)]; + u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0}; MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn) { - u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0}; MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); MLX5_SET(destroy_rmp_in, in, rmpn, rmpn); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out) { - u32 in[MLX5_ST_SZ_DW(query_rmp_in)]; + u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0}; int outlen = MLX5_ST_SZ_BYTES(query_rmp_out); - memset(in, 0, sizeof(in)); MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP); MLX5_SET(query_rmp_in, in, rmpn, rmpn); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) @@ -347,13 +308,11 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *xsrqn) { - u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; + u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0}; int err; MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn); @@ -362,33 +321,25 @@ int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) { - u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0}; MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) { - u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; + u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0}; void *srqc; void *xrc_srqc; int err; - memset(in, 0, sizeof(in)); MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn); - - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, - MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); if (!err) { xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out, xrc_srq_context_entry); @@ -401,32 +352,25 @@ int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) { - u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; - u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn); MLX5_SET(arm_xrc_srq_in, in, lwm, lwm); MLX5_SET(arm_xrc_srq_in, in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn) { - u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; + u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; int err; MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *rqtn = MLX5_GET(create_rqt_out, out, rqtn); @@ -437,25 +381,20 @@ EXPORT_SYMBOL(mlx5_core_create_rqt); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_rqt_out)]; + u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; MLX5_SET(modify_rqt_in, in, rqtn, rqtn); MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) { - u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); MLX5_SET(destroy_rqt_in, in, rqtn, rqtn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_rqt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 5ff8af472bf5..ab0b896621a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -42,73 +42,28 @@ enum { NUM_LOW_LAT_UUARS = 4, }; - -struct mlx5_alloc_uar_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_alloc_uar_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 uarn; - u8 rsvd[4]; -}; - -struct mlx5_free_uar_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 uarn; - u8 rsvd[4]; -}; - -struct mlx5_free_uar_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) { - struct mlx5_alloc_uar_mbox_in in; - struct mlx5_alloc_uar_mbox_out out; + u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - goto ex; - - if (out.hdr.status) { - err = mlx5_cmd_status_to_err(&out.hdr); - goto ex; - } - - *uarn = be32_to_cpu(out.uarn) & 0xffffff; - -ex: + MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *uarn = MLX5_GET(alloc_uar_out, out, uar); return err; } EXPORT_SYMBOL(mlx5_cmd_alloc_uar); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) { - struct mlx5_free_uar_mbox_in in; - struct mlx5_free_uar_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); - in.uarn = cpu_to_be32(uarn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - goto ex; + u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0}; - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - -ex: - return err; + MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR); + MLX5_SET(dealloc_uar_in, in, uar, uarn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_cmd_free_uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 21365d06982b..525f17af108e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -39,10 +39,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u32 *out, int outlen) { - int err; - u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0}; MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE); @@ -51,11 +48,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, if (vport) MLX5_SET(query_vport_state_in, in, other_vport, 1); - err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); - if (err) - mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n"); - - return err; + return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) @@ -81,58 +74,43 @@ EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state) { - u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]; - u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)]; - int err; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0}; MLX5_SET(modify_vport_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VPORT_STATE); MLX5_SET(modify_vport_state_in, in, op_mod, opmod); MLX5_SET(modify_vport_state_in, in, vport_number, vport); - if (vport) MLX5_SET(modify_vport_state_in, in, other_vport, 1); - MLX5_SET(modify_vport_state_in, in, admin_state, state); - err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, - sizeof(out)); - if (err) - mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n"); - - return err; + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state); static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); - MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); } void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, @@ -147,6 +125,26 @@ void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); +int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 min_inline) +{ + u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0}; + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + void *nic_vport_ctx; + + MLX5_SET(modify_nic_vport_context_in, in, + field_select.min_inline, 1); + MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, + in, nic_vport_context); + MLX5_SET(nic_vport_context, nic_vport_ctx, + min_wqe_inline_mode, min_inline); + + return mlx5_modify_nic_vport_context(mdev, in, inlen); +} + int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr) { @@ -254,7 +252,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, u8 addr_list[][ETH_ALEN], int *list_size) { - u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; void *nic_vport_ctx; int max_list_size; int req_list_size; @@ -278,7 +276,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); - memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; @@ -291,7 +288,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) goto out; @@ -361,7 +358,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, ether_addr_copy(curr_mac, addr_list[i]); } - err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); kfree(in); return err; } @@ -406,7 +403,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) goto out; @@ -473,7 +470,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]); } - err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); kfree(in); return err; } @@ -631,10 +628,6 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, if (err) goto out; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto out; - tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); gid->global.subnet_prefix = tmp->global.subnet_prefix; gid->global.interface_id = tmp->global.interface_id; @@ -700,10 +693,6 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, if (err) goto out; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto out; - pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey); for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey)) *pkey = MLX5_GET_PR(pkey, pkarr, pkey); @@ -721,7 +710,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, struct mlx5_hca_vport_context *rep) { int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); - int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)]; + int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0}; int is_group_manager; void *out; void *ctx; @@ -729,7 +718,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); - memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; @@ -752,9 +740,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) goto ex; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto ex; ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context); rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select); @@ -969,10 +954,6 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, MLX5_SET(query_vport_counter_in, in, port_num, port_num); err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); - if (err) - goto free; - err = mlx5_cmd_status_to_err_v2(out); - free: kvfree(in); return err; @@ -1035,11 +1016,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter); MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); - if (err) - goto ex; - - err = mlx5_cmd_status_to_err_v2(out); - ex: kfree(in); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index e25a73ed2981..07a9ba6cfc70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -46,41 +46,24 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv) static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) { - struct mlx5_outbox_hdr *hdr; - int err; - - u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)]; - u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0}; MLX5_SET(add_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); - - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); - if (err) - return err; - - hdr = (struct mlx5_outbox_hdr *)out; - return hdr->status ? -ENOMEM : 0; + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) { - u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)]; - u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0}; MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); - - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index d3476ead9982..d2e32979319c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -87,6 +87,7 @@ struct mlxsw_rx_listener { void (*func)(struct sk_buff *skb, u8 local_port, void *priv); u8 local_port; u16 trap_id; + enum mlxsw_reg_hpkt_action action; }; struct mlxsw_event_listener { diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1721098eef13..b83d0a7a0b49 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -591,6 +591,12 @@ static const struct mlxsw_reg_info mlxsw_reg_sfn = { */ MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8); +/* reg_sfn_end + * Forces the current session to end. + * Access: OP + */ +MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1); + /* reg_sfn_num_rec * Request: Number of learned notifications and aged-out notification * records requested. @@ -605,6 +611,7 @@ static inline void mlxsw_reg_sfn_pack(char *payload) { MLXSW_REG_ZERO(sfn, payload); mlxsw_reg_sfn_swid_set(payload, 0); + mlxsw_reg_sfn_end_set(payload, 1); mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 7291f2c4b0c7..6c6b726c4897 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -555,8 +555,9 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); } -static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid, bool learn_enable) +int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool learn_enable) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char *spvmlr_pl; @@ -565,13 +566,20 @@ static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); if (!spvmlr_pl) return -ENOMEM; - mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, - learn_enable); + mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin, + vid_end, learn_enable); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); kfree(spvmlr_pl); return err; } +static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, bool learn_enable) +{ + return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, + learn_enable); +} + static int mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) { @@ -973,10 +981,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev, goto err_port_vp_mode_trans; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) - goto err_port_vid_learning_set; - err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); if (err) goto err_port_add_vid; @@ -984,8 +988,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev, return 0; err_port_add_vid: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); -err_port_vid_learning_set: if (list_is_singular(&mlxsw_sp_port->vports_list)) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); err_port_vp_mode_trans: @@ -1012,8 +1014,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); - /* Drop FID reference. If this was the last reference the * resources will be freed. */ @@ -2570,123 +2570,47 @@ static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, netif_receive_skb(skb); } +static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + skb->offload_fwd_mark = 1; + return mlxsw_sp_rx_listener_func(skb, local_port, priv); +} + +#define MLXSW_SP_RXL(_func, _trap_id, _action) \ + { \ + .func = _func, \ + .local_port = MLXSW_PORT_DONT_CARE, \ + .trap_id = MLXSW_TRAP_ID_##_trap_id, \ + .action = MLXSW_REG_HPKT_ACTION_##_action, \ + } + static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_FDB_MC, - }, + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, FDB_MC, TRAP_TO_CPU), /* Traps for specific L2 packet types, not trapped as FDB MC */ - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_STP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_LACP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_EAPOL, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_LLDP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_MMRP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_MVRP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_RPVST, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_DHCP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_ARPBC, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_ARPUC, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_MTUERROR, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_TTLERROR, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_LBERROR, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_OSPF, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IP2ME, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4, - }, + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, STP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LACP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, EAPOL, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LLDP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MMRP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MVRP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RPVST, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, DHCP, MIRROR_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, IGMP_QUERY, MIRROR_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V1_REPORT, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_REPORT, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_LEAVE, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V3_REPORT, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPBC, MIRROR_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPUC, MIRROR_TO_CPU), + /* L3 traps */ + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MTUERROR, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, TTLERROR, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LBERROR, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, OSPF, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IP2ME, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RTR_INGRESS0, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, HOST_MISS_IPV4, TRAP_TO_CPU), }; static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) @@ -2713,7 +2637,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_rx_listener_register; - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + mlxsw_reg_hpkt_pack(hpkt_pl, mlxsw_sp_rx_listener[i].action, mlxsw_sp_rx_listener[i].trap_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index ab3feb81bd43..01537d3a1c48 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -558,6 +558,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, u32 maxrate); +int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool learn_enable); #ifdef CONFIG_MLXSW_SPECTRUM_DCB diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d1b59cdfacc1..0c3fbbc6b537 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -261,12 +261,40 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, false); } +static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool set) +{ + u16 vid; + int err; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + + return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, + set); + } + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, + set); + if (err) + goto err_port_vid_learning_set; + } + + return 0; + +err_port_vid_learning_set: + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set); + return err; +} + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, unsigned long brport_flags) { + unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0; unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; - bool set; int err; if (!mlxsw_sp_port->bridged) @@ -276,17 +304,30 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; if ((uc_flood ^ brport_flags) & BR_FLOOD) { - set = mlxsw_sp_port->uc_flood ? false : true; - err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set); + err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, + !mlxsw_sp_port->uc_flood); if (err) return err; } + if ((learning ^ brport_flags) & BR_LEARNING) { + err = mlxsw_sp_port_learning_set(mlxsw_sp_port, + !mlxsw_sp_port->learning); + if (err) + goto err_port_learning_set; + } + mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; return 0; + +err_port_learning_set: + if ((uc_flood ^ brport_flags) & BR_FLOOD) + mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, + mlxsw_sp_port->uc_flood); + return err; } static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) @@ -635,6 +676,27 @@ static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool learn_enable) +{ + u16 vid, vid_e; + int err; + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1), + vid_end); + + err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, + vid_e, learn_enable); + if (err) + return err; + } + + return 0; +} + static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool flag_untagged, bool flag_pvid) @@ -675,6 +737,14 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, } } + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, + mlxsw_sp_port->learning); + if (err) { + netdev_err(dev, "Failed to set learning for VIDs %d-%d\n", + vid_begin, vid_end); + goto err_port_vid_learning_set; + } + /* Changing activity bits only if HW operation succeded */ for (vid = vid_begin; vid <= vid_end; vid++) { set_bit(vid, mlxsw_sp_port->active_vlans); @@ -697,6 +767,9 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, err_port_stp_state_set: for (vid = vid_begin; vid <= vid_end; vid++) clear_bit(vid, mlxsw_sp_port->active_vlans); + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, + false); +err_port_vid_learning_set: if (old_pvid != mlxsw_sp_port->pvid) mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); err_port_pvid_set: @@ -1001,29 +1074,20 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end) { - struct net_device *dev = mlxsw_sp_port->dev; u16 vid, pvid; - int err; if (!mlxsw_sp_port->bridged) return -EINVAL; - err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, - false, false); - if (err) { - netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, - vid_end); - return err; - } + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, + false); pvid = mlxsw_sp_port->pvid; - if (pvid >= vid_begin && pvid <= vid_end) { - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); - if (err) { - netdev_err(dev, "Unable to del PVID %d\n", pvid); - return err; - } - } + if (pvid >= vid_begin && pvid <= vid_end) + mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); + + __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, + false); mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); @@ -1366,8 +1430,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, vid = fid; } - adding = adding && mlxsw_sp_port->learning; - do_fdb_op: err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, true); @@ -1429,8 +1491,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, vid = fid; } - adding = adding && mlxsw_sp_port->learning; - do_fdb_op: err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, adding, true); @@ -1496,20 +1556,18 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); rtnl_lock(); - do { - mlxsw_reg_sfn_pack(sfn_pl); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); - if (err) { - dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); - break; - } - num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); - for (i = 0; i < num_rec; i++) - mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); + mlxsw_reg_sfn_pack(sfn_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); + if (err) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); + goto out; + } + num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); + for (i = 0; i < num_rec; i++) + mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); - } while (num_rec); +out: rtnl_unlock(); - kfree(sfn_pl); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); } diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 87b7b814778b..712d8bcb7d8c 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -751,7 +751,7 @@ static void netdev_rx(struct net_device *dev) dev_err(&pdev->dev, "rx crc err\n"); ether->stats.rx_crc_errors++; } else if (status & RXDS_ALIE) { - dev_err(&pdev->dev, "rx aligment err\n"); + dev_err(&pdev->dev, "rx alignment err\n"); ether->stats.rx_frame_errors++; } else if (status & RXDS_PTLE) { dev_err(&pdev->dev, "rx longer err\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 45ab74676573..2d67469eb8f6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -26,7 +26,7 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.7.1.20" +#define DRV_MODULE_VERSION "8.10.9.20" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 @@ -42,6 +42,8 @@ enum qed_coalescing_mode { struct qed_eth_cb_ops; struct qed_dev_info; +union qed_mcp_protocol_stats; +enum qed_mcp_protocol_type; /* helpers */ static inline u32 qed_db_addr(u32 cid, u32 DEMS) @@ -606,7 +608,9 @@ void qed_link_update(struct qed_hwfn *hwfn); u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); - +void qed_get_protocol_stats(struct qed_dev *cdev, + enum qed_mcp_protocol_type type, + union qed_mcp_protocol_stats *stats); int qed_slowpath_irq_req(struct qed_hwfn *hwfn); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 1c35f376143e..547692759d06 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -377,9 +377,8 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, } } -u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, - enum protocol_type type, - u32 *vf_cid) +u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid) { if (vf_cid) *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; @@ -405,10 +404,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, return cnt; } -static void -qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, - enum protocol_type proto, - u8 seg, u8 seg_type, u32 count, bool has_fl) +static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type proto, + u8 seg, + u8 seg_type, u32 count, bool has_fl) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; @@ -420,8 +419,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, struct qed_ilt_cli_blk *p_blk, - u32 start_line, u32 total_size, - u32 elem_size) + u32 start_line, u32 total_size, u32 elem_size) { u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); @@ -448,8 +446,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, p_cli->first.val = *p_line; p_cli->active = true; - *p_line += DIV_ROUND_UP(p_blk->total_size, - p_blk->real_size_in_page); + *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); p_cli->last.val = *p_line - 1; DP_VERBOSE(p_hwfn, QED_MSG_ILT, @@ -926,12 +923,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, void *p_virt; u32 size; - size = min_t(u32, sz_left, - p_blk->real_size_in_page); + size = min_t(u32, sz_left, p_blk->real_size_in_page); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - size, - &p_phys, - GFP_KERNEL); + size, &p_phys, GFP_KERNEL); if (!p_virt) return -ENOMEM; memset(p_virt, 0, size); @@ -976,7 +970,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { p_blk = &clients[i].pf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); - if (rc != 0) + if (rc) goto ilt_shadow_fail; } for (k = 0; k < p_mngr->vf_count; k++) { @@ -985,7 +979,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) p_blk = &clients[i].vf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines); - if (rc != 0) + if (rc) goto ilt_shadow_fail; } } @@ -1672,7 +1666,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); - active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0); + active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0); tm_offset += tm_iids.pf_tids[i]; } @@ -1702,8 +1696,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, - enum protocol_type type, - u32 *p_cid) + enum protocol_type type, u32 *p_cid) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 rel_cid; @@ -1717,8 +1710,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, p_mngr->acquired[type].max_count); if (rel_cid >= p_mngr->acquired[type].max_count) { - DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", - type); + DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type); return -EINVAL; } @@ -1730,8 +1722,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, } static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, - u32 cid, - enum protocol_type *p_type) + u32 cid, enum protocol_type *p_type) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cid_acquired_map *p_map; @@ -1763,8 +1754,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, return true; } -void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, - u32 cid) +void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; enum protocol_type type; @@ -1781,8 +1771,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, __clear_bit(rel_cid, p_mngr->acquired[type].cid_map); } -int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, - struct qed_cxt_info *p_info) +int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 conn_cxt_size, hw_p_size, cxts_per_p, line; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 226cb08cc055..b900dfbb57ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1968,6 +1968,7 @@ static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev, if (!dcbx_info->operational.ieee) { DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 0e4f4a9306b5..5ae27f2d2fa5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -35,8 +35,7 @@ #include "qed_sriov.h" #include "qed_vf.h" -static spinlock_t qm_lock; -static bool qm_lock_init = false; +static DEFINE_SPINLOCK(qm_lock); /* API common to all protocols */ enum BAR_ID { @@ -44,8 +43,7 @@ enum BAR_ID { BAR_ID_1 /* Used for doorbells */ }; -static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, - enum BAR_ID bar_id) +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) { u32 bar_reg = (bar_id == BAR_ID_0 ? PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); @@ -70,8 +68,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, } } -void qed_init_dp(struct qed_dev *cdev, - u32 dp_module, u8 dp_level) +void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) { u32 i; @@ -543,8 +540,7 @@ int qed_resc_alloc(struct qed_dev *cdev) cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); if (!cdev->reset_stats) { DP_NOTICE(cdev, "Failed to allocate reset statistics\n"); - rc = -ENOMEM; - goto alloc_err; + goto alloc_no_mem; } return 0; @@ -605,9 +601,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, /* Make sure notification is not set before initiating final cleanup */ if (REG_RD(p_hwfn, addr)) { - DP_NOTICE( - p_hwfn, - "Unexpected; Found final cleanup notification before initiating final cleanup\n"); + DP_NOTICE(p_hwfn, + "Unexpected; Found final cleanup notification before initiating final cleanup\n"); REG_WR(p_hwfn, addr, 0); } @@ -701,17 +696,14 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev) continue; qed_init_cau_sb_entry(p_hwfn, &sb_entry, - p_block->function_id, - 0, 0); - STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, - sb_entry); + p_block->function_id, 0, 0); + STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); } } } static int qed_hw_init_common(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - int hw_mode) + struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; @@ -759,7 +751,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_port_unpretend(p_hwfn, p_ptt); rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); - if (rc != 0) + if (rc) return rc; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); @@ -780,6 +772,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); + qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); + qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); } /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); @@ -788,37 +783,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, } static int qed_hw_init_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - int hw_mode) + struct qed_ptt *p_ptt, int hw_mode) { - int rc = 0; - - rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); - if (rc != 0) - return rc; - - if (hw_mode & (1 << MODE_MF_SI)) { - u8 pf_id = 0; - - if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, - "PF[%08x] is first eth on engine\n", pf_id); - - /* We should have configured BIT for ppfid, i.e., the - * relative function number in the port. But there's a - * bug in LLH in BB where the ppfid is actually engine - * based, so we need to take this into account. - */ - qed_wr(p_hwfn, p_ptt, - NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); - } - - /* Take the protocol-based hit vector if there is a hit, - * otherwise take the other vector. - */ - qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); - } - return rc; + return qed_init_run(p_hwfn, p_ptt, PHASE_PORT, + p_hwfn->port_id, hw_mode); } static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, @@ -848,7 +816,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_int_igu_init_rt(p_hwfn); /* Set VLAN in NIG if needed */ - if (hw_mode & (1 << MODE_MF_SD)) { + if (hw_mode & BIT(MODE_MF_SD)) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, @@ -856,7 +824,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, } /* Enable classification by MAC if needed */ - if (hw_mode & (1 << MODE_MF_SI)) { + if (hw_mode & BIT(MODE_MF_SI)) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); STORE_RT_REG(p_hwfn, @@ -871,7 +839,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Cleanup chip from previous driver if such remains exist */ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); - if (rc != 0) + if (rc) return rc; /* PF Init sequence */ @@ -887,21 +855,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Pure runtime initializations - directly to the HW */ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); - if (hw_mode & (1 << MODE_MF_SI)) { - u8 pf_id = 0; - u32 val = 0; - - if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { - if (p_hwfn->rel_pf_id == pf_id) { - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, - "PF[%d] is first ETH on engine\n", - pf_id); - val = 1; - } - qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); - } - } - if (b_hw_start) { /* enable interrupts */ qed_int_igu_enable(p_hwfn, p_ptt, int_mode); @@ -950,8 +903,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, /* Read shadow of current MFW mailbox */ qed_mcp_read_mb(p_hwfn, p_main_ptt); memcpy(p_hwfn->mcp_info->mfw_mb_shadow, - p_hwfn->mcp_info->mfw_mb_cur, - p_hwfn->mcp_info->mfw_mb_length); + p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); } int qed_hw_init(struct qed_dev *cdev, @@ -971,7 +923,7 @@ int qed_hw_init(struct qed_dev *cdev, if (IS_PF(cdev)) { rc = qed_init_fw_data(cdev, bin_fw_data); - if (rc != 0) + if (rc) return rc; } @@ -988,8 +940,7 @@ int qed_hw_init(struct qed_dev *cdev, qed_calc_hw_mode(p_hwfn); - rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, - &load_code); + rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); if (rc) { DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); return rc; @@ -1004,11 +955,6 @@ int qed_hw_init(struct qed_dev *cdev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); - if (!qm_lock_init) { - spin_lock_init(&qm_lock); - qm_lock_init = true; - } - switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -1071,9 +1017,8 @@ int qed_hw_init(struct qed_dev *cdev, } #define QED_HW_STOP_RETRY_LIMIT (10) -static inline void qed_hw_timers_stop(struct qed_dev *cdev, - struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static void qed_hw_timers_stop(struct qed_dev *cdev, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int i; @@ -1084,8 +1029,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev, for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { if ((!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN)) && - (!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK))) + (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) break; /* Dependent on number of connection/tasks, possibly @@ -1190,8 +1134,7 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) } DP_VERBOSE(p_hwfn, - NETIF_MSG_IFDOWN, - "Shutting down the fastpath\n"); + NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); @@ -1219,14 +1162,13 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); } -static int qed_reg_assert(struct qed_hwfn *hwfn, - struct qed_ptt *ptt, u32 reg, - bool expected) +static int qed_reg_assert(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 reg, bool expected) { - u32 assert_val = qed_rd(hwfn, ptt, reg); + u32 assert_val = qed_rd(p_hwfn, p_ptt, reg); if (assert_val != expected) { - DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n", + DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n", reg, expected); return -EINVAL; } @@ -1306,8 +1248,7 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) /* Clean Previous errors if such exist */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, - PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, - 1 << p_hwfn->abs_pf_id); + PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, @@ -1317,7 +1258,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ - p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR); + p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); @@ -1326,6 +1268,10 @@ static void get_function_id(struct qed_hwfn *p_hwfn) PXP_CONCRETE_FID_PFID); p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PORT); + + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, + "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", + p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); } static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) @@ -1417,8 +1363,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) return 0; } -static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; @@ -1472,8 +1417,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; break; default: - DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", - core_cfg); + DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); break; } @@ -1484,11 +1428,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, speed_cap_mask)); - link->speed.advertised_speeds = - link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link->speed.advertised_speeds = link_temp; - p_hwfn->mcp_info->link_capabilities.speed_capabilities = - link->speed.advertised_speeds; + link_temp = link->speed.advertised_speeds; + p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + @@ -1517,8 +1461,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, link->speed.forced_speed = 100000; break; default: - DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", - link_temp); + DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); } link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; @@ -1628,10 +1571,10 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n", + "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, - p_hwfn->num_funcs_on_engine); + p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); } static int @@ -1703,10 +1646,9 @@ static int qed_get_dev_info(struct qed_dev *cdev) u32 tmp; /* Read Vendor Id / Device Id */ - pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, - &cdev->vendor_id); - pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, - &cdev->device_id); + pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); + pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); + cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_NUM); cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, @@ -1782,7 +1724,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* First hwfn learns basic information, e.g., number of hwfns */ if (!p_hwfn->my_id) { rc = qed_get_dev_info(p_hwfn->cdev); - if (rc != 0) + if (rc) goto err1; } @@ -2183,8 +2125,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) return 0; } -int qed_fw_vport(struct qed_hwfn *p_hwfn, - u8 src_id, u8 *dst_id) +int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { u8 min, max; @@ -2203,8 +2144,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn, return 0; } -int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, - u8 src_id, u8 *dst_id) +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { u8 min, max; @@ -2386,8 +2326,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. */ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, - u16 vport_id, u32 req_rate, - u32 min_pf_rate) + u16 vport_id, u32 req_rate, u32 min_pf_rate) { u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; int non_requested_count = 0, req_count = 0, i, num_vports; @@ -2471,7 +2410,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); - if (rc == 0) + if (!rc) qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, p_link->min_pf_rate); else diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 6f9d3b831a2a..a67b3554aabd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -536,6 +536,244 @@ struct core_conn_context { struct regpair ustorm_st_padding[2]; }; +enum core_error_handle { + LL2_DROP_PACKET, + LL2_DO_NOTHING, + LL2_ASSERT, + MAX_CORE_ERROR_HANDLE +}; + +enum core_event_opcode { + CORE_EVENT_TX_QUEUE_START, + CORE_EVENT_TX_QUEUE_STOP, + CORE_EVENT_RX_QUEUE_START, + CORE_EVENT_RX_QUEUE_STOP, + MAX_CORE_EVENT_OPCODE +}; + +enum core_l4_pseudo_checksum_mode { + CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH, + CORE_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_CORE_L4_PSEUDO_CHECKSUM_MODE +}; + +struct core_ll2_port_stats { + struct regpair gsi_invalid_hdr; + struct regpair gsi_invalid_pkt_length; + struct regpair gsi_unsupported_pkt_typ; + struct regpair gsi_crcchksm_error; +}; + +struct core_ll2_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes; + struct regpair sent_mcast_bytes; + struct regpair sent_bcast_bytes; + struct regpair sent_ucast_pkts; + struct regpair sent_mcast_pkts; + struct regpair sent_bcast_pkts; +}; + +struct core_ll2_rx_prod { + __le16 bd_prod; + __le16 cqe_prod; + __le32 reserved; +}; + +struct core_ll2_tstorm_per_queue_stat { + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; +}; + +struct core_ll2_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + +enum core_ramrod_cmd_id { + CORE_RAMROD_UNUSED, + CORE_RAMROD_RX_QUEUE_START, + CORE_RAMROD_TX_QUEUE_START, + CORE_RAMROD_RX_QUEUE_STOP, + CORE_RAMROD_TX_QUEUE_STOP, + MAX_CORE_RAMROD_CMD_ID +}; + +enum core_roce_flavor_type { + CORE_ROCE, + CORE_RROCE, + MAX_CORE_ROCE_FLAVOR_TYPE +}; + +struct core_rx_action_on_error { + u8 error_type; +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 +#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF +#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 +}; + +struct core_rx_bd { + struct regpair addr; + __le16 reserved[4]; +}; + +struct core_rx_bd_with_buff_len { + struct regpair addr; + __le16 buff_length; + __le16 reserved[3]; +}; + +union core_rx_bd_union { + struct core_rx_bd rx_bd; + struct core_rx_bd_with_buff_len rx_bd_with_len; +}; + +struct core_rx_cqe_opaque_data { + __le32 data[2]; +}; + +enum core_rx_cqe_type { + CORE_RX_CQE_ILLIGAL_TYPE, + CORE_RX_CQE_TYPE_REGULAR, + CORE_RX_CQE_TYPE_GSI_OFFLOAD, + CORE_RX_CQE_TYPE_SLOW_PATH, + MAX_CORE_RX_CQE_TYPE +}; + +struct core_rx_fast_path_cqe { + u8 type; + u8 placement_offset; + struct parsing_and_err_flags parse_flags; + __le16 packet_length; + __le16 vlan; + struct core_rx_cqe_opaque_data opaque_data; + __le32 reserved[4]; +}; + +struct core_rx_gsi_offload_cqe { + u8 type; + u8 data_length_error; + struct parsing_and_err_flags parse_flags; + __le16 data_length; + __le16 vlan; + __le32 src_mac_addrhi; + __le16 src_mac_addrlo; + u8 reserved1[2]; + __le32 gid_dst[4]; +}; + +struct core_rx_slow_path_cqe { + u8 type; + u8 ramrod_cmd_id; + __le16 echo; + __le32 reserved1[7]; +}; + +union core_rx_cqe_union { + struct core_rx_fast_path_cqe rx_cqe_fp; + struct core_rx_gsi_offload_cqe rx_cqe_gsi; + struct core_rx_slow_path_cqe rx_cqe_sp; +}; + +struct core_rx_start_ramrod_data { + struct regpair bd_base; + struct regpair cqe_pbl_addr; + __le16 mtu; + __le16 sb_id; + u8 sb_index; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 drop_ttl0_flg; + __le16 num_of_pbl_pages; + u8 inner_vlan_removal_en; + u8 queue_id; + u8 main_func_queue; + u8 mf_si_bcast_accept_all; + u8 mf_si_mcast_accept_all; + struct core_rx_action_on_error action_on_error; + u8 gsi_offload_flag; + u8 reserved[7]; +}; + +struct core_rx_stop_ramrod_data { + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 queue_id; + u8 reserved1; + __le16 reserved2[2]; +}; + +struct core_tx_bd_flags { + u8 as_bitfield; +#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1 +#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1 +#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2 +#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1 +#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3 +#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4 +#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1 +#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5 +#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 +#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 +}; + +struct core_tx_bd { + struct regpair addr; + __le16 nbytes; + __le16 nw_vlan_or_lb_echo; + u8 bitfield0; +#define CORE_TX_BD_NBDS_MASK 0xF +#define CORE_TX_BD_NBDS_SHIFT 0 +#define CORE_TX_BD_ROCE_FLAV_MASK 0x1 +#define CORE_TX_BD_ROCE_FLAV_SHIFT 4 +#define CORE_TX_BD_RESERVED0_MASK 0x7 +#define CORE_TX_BD_RESERVED0_SHIFT 5 + struct core_tx_bd_flags bd_flags; + __le16 bitfield1; +#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF +#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 +#define CORE_TX_BD_TX_DST_MASK 0x1 +#define CORE_TX_BD_TX_DST_SHIFT 14 +#define CORE_TX_BD_RESERVED1_MASK 0x1 +#define CORE_TX_BD_RESERVED1_SHIFT 15 +}; + +enum core_tx_dest { + CORE_TX_DEST_NW, + CORE_TX_DEST_LB, + MAX_CORE_TX_DEST +}; + +struct core_tx_start_ramrod_data { + struct regpair pbl_base_addr; + __le16 mtu; + __le16 sb_id; + u8 sb_index; + u8 stats_en; + u8 stats_id; + u8 conn_type; + __le16 pbl_size; + __le16 qm_pq_id; + u8 gsi_offload_flag; + u8 resrved[3]; +}; + +struct core_tx_stop_ramrod_data { + __le32 reserved0[2]; +}; + struct eth_mstorm_per_pf_stat { struct regpair gre_discard_pkts; struct regpair vxlan_discard_pkts; @@ -636,9 +874,33 @@ struct hsi_fp_ver_struct { }; /* Mstorm non-triggering VF zone */ +enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR, + VF_PF_CHANNEL_NOT_READY, + VF_ZONE_MSG_NOT_VALID, + VF_ZONE_FUNC_NOT_ENABLED, + ETH_PACKET_TOO_SMALL, + ETH_ILLEGAL_VLAN_MODE, + ETH_MTU_VIOLATION, + ETH_ILLEGAL_INBAND_TAGS, + ETH_VLAN_INSERT_AND_INBAND_VLAN, + ETH_ILLEGAL_NBDS, + ETH_FIRST_BD_WO_SOP, + ETH_INSUFFICIENT_BDS, + ETH_ILLEGAL_LSO_HDR_NBDS, + ETH_ILLEGAL_LSO_MSS, + ETH_ZERO_SIZE_BD, + ETH_ILLEGAL_LSO_HDR_LEN, + ETH_INSUFFICIENT_PAYLOAD, + ETH_EDPM_OUT_OF_SYNC, + ETH_TUNN_IPV6_EXT_NBD_ERR, + ETH_CONTROL_PACKET_VIOLATION, + MAX_MALICIOUS_VF_ERROR_ID +}; + struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; - struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF]; + struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; }; /* Mstorm VF zone */ @@ -705,13 +967,17 @@ struct pf_start_ramrod_data { struct protocol_dcb_data { u8 dcb_enable_flag; + u8 reserved_a; u8 dcb_priority; u8 dcb_tc; - u8 reserved; + u8 reserved_b; + u8 reserved0; }; struct pf_update_tunnel_config { u8 update_rx_pf_clss; + u8 update_rx_def_ucast_clss; + u8 update_rx_def_non_ucast_clss; u8 update_tx_pf_clss; u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; @@ -727,7 +993,7 @@ struct pf_update_tunnel_config { u8 tunnel_clss_ipgre; __le16 vxlan_udp_port; __le16 geneve_udp_port; - __le16 reserved[3]; + __le16 reserved[2]; }; struct pf_update_ramrod_data { @@ -736,16 +1002,17 @@ struct pf_update_ramrod_data { u8 update_fcoe_dcb_data_flag; u8 update_iscsi_dcb_data_flag; u8 update_roce_dcb_data_flag; + u8 update_rroce_dcb_data_flag; u8 update_iwarp_dcb_data_flag; u8 update_mf_vlan_flag; - u8 reserved; struct protocol_dcb_data eth_dcb_data; struct protocol_dcb_data fcoe_dcb_data; struct protocol_dcb_data iscsi_dcb_data; struct protocol_dcb_data roce_dcb_data; + struct protocol_dcb_data rroce_dcb_data; struct protocol_dcb_data iwarp_dcb_data; __le16 mf_vlan; - __le16 reserved2; + __le16 reserved; struct pf_update_tunnel_config tunnel_config; }; @@ -766,10 +1033,14 @@ enum protocol_version_array_key { MAX_PROTOCOL_VERSION_ARRAY_KEY }; -/* Pstorm non-triggering VF zone */ +struct rdma_sent_stats { + struct regpair sent_bytes; + struct regpair sent_pkts; +}; + struct pstorm_non_trigger_vf_zone { struct eth_pstorm_per_queue_stat eth_queue_stat; - struct regpair reserved[2]; + struct rdma_sent_stats rdma_stats; }; /* Pstorm VF zone */ @@ -786,7 +1057,11 @@ struct ramrod_header { __le16 echo; }; -/* Slowpath Element (SPQE) */ +struct rdma_rcv_stats { + struct regpair rcv_bytes; + struct regpair rcv_pkts; +}; + struct slow_path_element { struct ramrod_header hdr; struct regpair data_ptr; @@ -794,7 +1069,7 @@ struct slow_path_element { /* Tstorm non-triggering VF zone */ struct tstorm_non_trigger_vf_zone { - struct regpair reserved[2]; + struct rdma_rcv_stats rdma_stats; }; struct tstorm_per_port_stat { @@ -802,9 +1077,14 @@ struct tstorm_per_port_stat { struct regpair mac_error_discard; struct regpair mftag_filter_discard; struct regpair eth_mac_filter_discard; - struct regpair reserved[5]; + struct regpair ll2_mac_filter_discard; + struct regpair ll2_conn_disabled_discard; + struct regpair iscsi_irregular_pkt; + struct regpair reserved; + struct regpair roce_irregular_pkt; struct regpair eth_irregular_pkt; - struct regpair reserved1[2]; + struct regpair reserved1; + struct regpair preroce_irregular_pkt; struct regpair eth_gre_tunn_filter_discard; struct regpair eth_vxlan_tunn_filter_discard; struct regpair eth_geneve_tunn_filter_discard; @@ -870,7 +1150,13 @@ struct vf_stop_ramrod_data { __le32 reserved2; }; -/* Attentions status block */ +enum vf_zone_size_mode { + VF_ZONE_SIZE_MODE_DEFAULT, + VF_ZONE_SIZE_MODE_DOUBLE, + VF_ZONE_SIZE_MODE_QUAD, + MAX_VF_ZONE_SIZE_MODE +}; + struct atten_status_block { __le32 atten_bits; __le32 atten_ack; @@ -1579,6 +1865,7 @@ enum dbg_status { DBG_STATUS_REG_FIFO_BAD_DATA, DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, DBG_STATUS_DBG_ARRAY_NOT_SET, + DBG_STATUS_MULTI_BLOCKS_WITH_FILTER, MAX_DBG_STATUS }; @@ -1589,7 +1876,41 @@ enum dbg_status { /* Number of VLAN priorities */ #define NUM_OF_VLAN_PRIORITIES 8 -/* QM per-port init parameters */ +struct init_brb_ram_req { + __le32 guranteed_per_tc; + __le32 headroom_per_tc; + __le32 min_pkt_size; + __le32 max_ports_per_engine; + u8 num_active_tcs[MAX_NUM_PORTS]; +}; + +struct init_ets_tc_req { + u8 use_sp; + u8 use_wfq; + __le16 weight; +}; + +struct init_ets_req { + __le32 mtu; + struct init_ets_tc_req tc_req[NUM_OF_TCS]; +}; + +struct init_nig_lb_rl_req { + __le16 lb_mac_rate; + __le16 lb_rate; + __le32 mtu; + __le16 tc_rate[NUM_OF_PHYS_TCS]; +}; + +struct init_nig_pri_tc_map_entry { + u8 tc_id; + u8 valid; +}; + +struct init_nig_pri_tc_map_req { + struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; +}; + struct init_qm_port_params { u8 active; u8 active_phys_tcs; @@ -1619,7 +1940,7 @@ struct init_qm_vport_params { /* Width of GRC address in bits (addresses are specified in dwords) */ #define GRC_ADDR_BITS 23 -#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) +#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1) /* indicates an init that should be applied to any phase ID */ #define ANY_PHASE_ID 0xffff @@ -1674,11 +1995,11 @@ struct bin_buffer_hdr { /* binary init buffer types */ enum bin_init_buffer_type { - BIN_BUF_FW_VER_INFO, + BIN_BUF_INIT_FW_VER_INFO, BIN_BUF_INIT_CMD, BIN_BUF_INIT_VAL, BIN_BUF_INIT_MODE_TREE, - BIN_BUF_IRO, + BIN_BUF_INIT_IRO, MAX_BIN_INIT_BUFFER_TYPE }; @@ -1918,44 +2239,34 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, #define MAX_NAME_LEN 16 /* Win 2 */ -#define GTT_BAR0_MAP_REG_IGU_CMD \ - 0x00f000UL +#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL /* Win 3 */ -#define GTT_BAR0_MAP_REG_TSDM_RAM \ - 0x010000UL +#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL /* Win 4 */ -#define GTT_BAR0_MAP_REG_MSDM_RAM \ - 0x011000UL +#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL /* Win 5 */ -#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \ - 0x012000UL +#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL /* Win 6 */ -#define GTT_BAR0_MAP_REG_USDM_RAM \ - 0x013000UL +#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL /* Win 7 */ -#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \ - 0x014000UL +#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL /* Win 8 */ -#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \ - 0x015000UL +#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL /* Win 9 */ -#define GTT_BAR0_MAP_REG_XSDM_RAM \ - 0x016000UL +#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL /* Win 10 */ -#define GTT_BAR0_MAP_REG_YSDM_RAM \ - 0x017000UL +#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL /* Win 11 */ -#define GTT_BAR0_MAP_REG_PSDM_RAM \ - 0x018000UL +#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL /** * @brief qed_qm_pf_mem_size - prepare QM ILT sizes @@ -2003,7 +2314,7 @@ struct qed_qm_pf_rt_init_params { u16 num_vf_pqs; u8 start_vport; u8 num_vports; - u8 pf_wfq; + u16 pf_wfq; u32 pf_rl; struct init_qm_pq_params *pq_params; struct init_qm_vport_params *vport_params; @@ -2138,6 +2449,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define TSTORM_PORT_STAT_OFFSET(port_id) \ (IRO[1].base + ((port_id) * IRO[1].m1)) #define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[2].base + ((port_id) * IRO[2].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) #define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ (IRO[3].base + ((vf_id) * IRO[3].m1)) #define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) @@ -2153,42 +2467,90 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) #define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) +#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ + (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) +#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size) #define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[18].base + ((stat_counter_id) * IRO[18].m1)) #define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) #define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ (IRO[19].base + ((queue_id) * IRO[19].m1)) #define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size) +#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ + (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2)) +#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size) +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) #define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[21].base + ((pf_id) * IRO[21].m1)) + (IRO[22].base + ((pf_id) * IRO[22].m1)) #define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size) #define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[22].size) + (IRO[23].base + ((stat_counter_id) * IRO[23].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) #define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[23].base + ((pf_id) * IRO[23].m1)) -#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size) + (IRO[24].base + ((pf_id) * IRO[24].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size) #define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[24].base + ((stat_counter_id) * IRO[24].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size) + (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size) #define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[25].base + ((pf_id) * IRO[25].m1)) -#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size) + (IRO[26].base + ((pf_id) * IRO[26].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size) #define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \ - (IRO[26].base + ((ethtype) * IRO[26].m1)) -#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size) -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size) + (IRO[27].base + ((ethtype) * IRO[27].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size) +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size) #define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ - (IRO[28].base + ((pf_id) * IRO[28].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size) + (IRO[29].base + ((pf_id) * IRO[29].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[29].base + ((queue_id) * IRO[29].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size) - -static const struct iro iro_arr[46] = { + (IRO[30].base + ((queue_id) * IRO[30].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) +#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ + (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) +#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) +#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) +#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[37].base + ((pf_id) * IRO[37].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) +#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) +#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) +#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) +#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) +#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[42].base + ((pf_id) * IRO[42].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + +static const struct iro iro_arr[47] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb0, 0x78, 0x0, 0x0, 0x78}, {0x6318, 0x20, 0x0, 0x0, 0x20}, @@ -2201,20 +2563,21 @@ static const struct iro iro_arr[46] = { {0x3df0, 0x0, 0x0, 0x0, 0x78}, {0x29b0, 0x0, 0x0, 0x0, 0x78}, {0x4c38, 0x0, 0x0, 0x0, 0x78}, - {0x4a48, 0x0, 0x0, 0x0, 0x78}, + {0x4990, 0x0, 0x0, 0x0, 0x78}, {0x7e48, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, {0x60f8, 0x10, 0x0, 0x0, 0x10}, {0xb820, 0x30, 0x0, 0x0, 0x30}, {0x95b8, 0x30, 0x0, 0x0, 0x30}, - {0x4c18, 0x80, 0x0, 0x0, 0x40}, + {0x4b60, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0xc9a8, 0x0, 0x0, 0x0, 0x4}, - {0x4c58, 0x80, 0x0, 0x0, 0x20}, + {0x53a0, 0x80, 0x4, 0x0, 0x4}, + {0xc8f0, 0x0, 0x0, 0x0, 0x4}, + {0x4ba0, 0x80, 0x0, 0x0, 0x20}, {0x8050, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, {0x2b48, 0x80, 0x0, 0x0, 0x38}, - {0xdf88, 0x78, 0x0, 0x0, 0x78}, + {0xf188, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xacf0, 0x0, 0x0, 0x0, 0xf0}, {0xade0, 0x8, 0x0, 0x0, 0x8}, @@ -2226,455 +2589,457 @@ static const struct iro iro_arr[46] = { {0x200, 0x10, 0x8, 0x0, 0x8}, {0xb78, 0x10, 0x8, 0x0, 0x2}, {0xd888, 0x38, 0x0, 0x0, 0x24}, - {0x12120, 0x10, 0x0, 0x0, 0x8}, - {0x11b20, 0x38, 0x0, 0x0, 0x18}, + {0x12c38, 0x10, 0x0, 0x0, 0x8}, + {0x11aa0, 0x38, 0x0, 0x0, 0x18}, {0xa8c0, 0x30, 0x0, 0x0, 0x10}, {0x86f8, 0x28, 0x0, 0x0, 0x18}, - {0xeff8, 0x10, 0x0, 0x0, 0x10}, + {0x101f8, 0x10, 0x0, 0x0, 0x10}, {0xdd08, 0x48, 0x0, 0x0, 0x38}, - {0xf460, 0x20, 0x0, 0x0, 0x20}, + {0x10660, 0x20, 0x0, 0x0, 0x20}, {0x2b80, 0x80, 0x0, 0x0, 0x10}, {0x5000, 0x10, 0x0, 0x0, 0x10}, }; /* Runtime array offsets */ -#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 -#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 -#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 -#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 -#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 -#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 -#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 -#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 -#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 -#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 -#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 -#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 -#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 -#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 -#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 -#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 -#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 -#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 -#define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 -#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 -#define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6667 -#define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6669 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 -#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 -#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 -#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709 -#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29837 -#define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29904 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29964 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29965 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29966 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29967 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996 -#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252 -#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30508 -#define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30765 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30767 -#define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783 -#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30799 -#define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30815 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817 -#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833 -#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30849 -#define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31009 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31010 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011 -#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31523 -#define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035 -#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32547 -#define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33059 -#define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924 - -#define RUNTIME_ARRAY_SIZE 33925 +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 6667 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 6669 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29839 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29966 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29967 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29968 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30510 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30767 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30769 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30801 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30817 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30851 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31011 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31012 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31525 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32549 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 33061 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926 + +#define RUNTIME_ARRAY_SIZE 33927 /* The eth storm context for the Tstorm */ struct tstorm_eth_conn_st_ctx { @@ -3201,7 +3566,31 @@ struct eth_conn_context { struct mstorm_eth_conn_st_ctx mstorm_st_context; }; -/* opcodes for the event ring */ +enum eth_error_code { + ETH_OK = 0x00, + ETH_FILTERS_MAC_ADD_FAIL_FULL, + ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2, + ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2, + ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2, + ETH_FILTERS_MAC_DEL_FAIL_NOF, + ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2, + ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2, + ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC, + ETH_FILTERS_VLAN_ADD_FAIL_FULL, + ETH_FILTERS_VLAN_ADD_FAIL_DUP, + ETH_FILTERS_VLAN_DEL_FAIL_NOF, + ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1, + ETH_FILTERS_PAIR_ADD_FAIL_DUP, + ETH_FILTERS_PAIR_ADD_FAIL_FULL, + ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC, + ETH_FILTERS_PAIR_DEL_FAIL_NOF, + ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1, + ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC, + ETH_FILTERS_VNI_ADD_FAIL_FULL, + ETH_FILTERS_VNI_ADD_FAIL_DUP, + MAX_ETH_ERROR_CODE +}; + enum eth_event_opcode { ETH_EVENT_UNUSED, ETH_EVENT_VPORT_START, @@ -3269,7 +3658,13 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; -/* Ethernet Ramrod Command IDs */ +enum eth_ipv4_frag_type { + ETH_IPV4_NOT_FRAG, + ETH_IPV4_FIRST_FRAG, + ETH_IPV4_NON_FIRST_FRAG, + MAX_ETH_IPV4_FRAG_TYPE +}; + enum eth_ramrod_cmd_id { ETH_RAMROD_UNUSED, ETH_RAMROD_VPORT_START, @@ -3451,8 +3846,8 @@ struct rx_queue_start_ramrod_data { u8 toggle_val; u8 vf_rx_prod_index; - - u8 reserved[6]; + u8 vf_rx_prod_use_zone_a; + u8 reserved[5]; __le16 reserved1; struct regpair cqe_pbl_addr; struct regpair bd_base; @@ -3526,10 +3921,11 @@ struct tx_queue_start_ramrod_data { __le16 pxp_st_index; __le16 comp_agg_size; __le16 queue_zone_id; - __le16 test_dup_count; + __le16 reserved2; __le16 pbl_size; __le16 tx_queue_id; - + __le16 same_as_last_id; + __le16 reserved[3]; struct regpair pbl_base_addr; struct regpair bd_cons_address; }; @@ -4926,8 +5322,8 @@ struct roce_create_qp_resp_ramrod_data { #define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F @@ -4988,6 +5384,10 @@ enum roce_event_opcode { MAX_ROCE_EVENT_OPCODE }; +struct roce_init_func_ramrod_data { + struct rdma_init_func_ramrod_data rdma; +}; + struct roce_modify_qp_req_ramrod_data { __le16 flags; #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 @@ -7239,6 +7639,12 @@ struct public_drv_mb { #define DRV_MSG_CODE_MCP_RESET 0x00090000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 +#define DRV_MSG_CODE_GET_STATS 0x00130000 +#define DRV_MSG_CODE_STATS_TYPE_LAN 1 +#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 +#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 +#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 + #define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 @@ -7315,10 +7721,10 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_RESERVED4, MFW_DRV_MSG_BW_UPDATE, MFW_DRV_MSG_BW_UPDATE5, - MFW_DRV_MSG_BW_UPDATE6, - MFW_DRV_MSG_BW_UPDATE7, - MFW_DRV_MSG_BW_UPDATE8, - MFW_DRV_MSG_BW_UPDATE9, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index e17885321faf..8ebdc79b3850 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -44,8 +44,7 @@ struct qed_ptt_pool { int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) { - struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), - GFP_KERNEL); + struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL); int i; if (!p_pool) @@ -113,16 +112,14 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) return NULL; } -void qed_ptt_release(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { spin_lock_bh(&p_hwfn->p_ptt_pool->lock); list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); } -u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { /* The HW is using DWORDS and we need to translate it to Bytes */ return le32_to_cpu(p_ptt->pxp.offset) << 2; @@ -141,8 +138,7 @@ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) } void qed_ptt_set_win(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 new_hw_addr) + struct qed_ptt *p_ptt, u32 new_hw_addr) { u32 prev_hw_addr; @@ -166,8 +162,7 @@ void qed_ptt_set_win(struct qed_hwfn *p_hwfn, } static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 hw_addr) + struct qed_ptt *p_ptt, u32 hw_addr) { u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); u32 offset; @@ -224,10 +219,7 @@ u32 qed_rd(struct qed_hwfn *p_hwfn, static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - void *addr, - u32 hw_addr, - size_t n, - bool to_device) + void *addr, u32 hw_addr, size_t n, bool to_device) { u32 dw_count, *host_addr, hw_offset; size_t quota, done = 0; @@ -259,8 +251,7 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, } void qed_memcpy_from(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - void *dest, u32 hw_addr, size_t n) + struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", @@ -270,8 +261,7 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn, } void qed_memcpy_to(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 hw_addr, void *src, size_t n) + struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", @@ -280,9 +270,7 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn, qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); } -void qed_fid_pretend(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 fid) +void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid) { u16 control = 0; @@ -309,8 +297,7 @@ void qed_fid_pretend(struct qed_hwfn *p_hwfn, } void qed_port_pretend(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 port_id) + struct qed_ptt *p_ptt, u8 port_id) { u16 control = 0; @@ -326,8 +313,7 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn, *(u32 *)&p_ptt->pxp.pretend); } -void qed_port_unpretend(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u16 control = 0; @@ -429,28 +415,27 @@ u32 qed_dmae_idx_to_go_cmd(u8 idx) return DMAE_REG_GO_C0 + (idx << 2); } -static int -qed_dmae_post_command(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_dmae_post_command(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { - struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd; + struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; u8 idx_cmd = p_hwfn->dmae_info.channel, i; int qed_status = 0; /* verify address is not NULL */ - if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) || - ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) { + if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || + ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { DP_NOTICE(p_hwfn, "source or destination address 0 idx_cmd=%d\n" "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", - idx_cmd, - le32_to_cpu(command->opcode), - le16_to_cpu(command->opcode_b), - le16_to_cpu(command->length_dw), - le32_to_cpu(command->src_addr_hi), - le32_to_cpu(command->src_addr_lo), - le32_to_cpu(command->dst_addr_hi), - le32_to_cpu(command->dst_addr_lo)); + idx_cmd, + le32_to_cpu(p_command->opcode), + le16_to_cpu(p_command->opcode_b), + le16_to_cpu(p_command->length_dw), + le32_to_cpu(p_command->src_addr_hi), + le32_to_cpu(p_command->src_addr_lo), + le32_to_cpu(p_command->dst_addr_hi), + le32_to_cpu(p_command->dst_addr_lo)); return -EINVAL; } @@ -459,13 +444,13 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, NETIF_MSG_HW, "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", idx_cmd, - le32_to_cpu(command->opcode), - le16_to_cpu(command->opcode_b), - le16_to_cpu(command->length_dw), - le32_to_cpu(command->src_addr_hi), - le32_to_cpu(command->src_addr_lo), - le32_to_cpu(command->dst_addr_hi), - le32_to_cpu(command->dst_addr_lo)); + le32_to_cpu(p_command->opcode), + le16_to_cpu(p_command->opcode_b), + le16_to_cpu(p_command->length_dw), + le32_to_cpu(p_command->src_addr_hi), + le32_to_cpu(p_command->src_addr_lo), + le32_to_cpu(p_command->dst_addr_hi), + le32_to_cpu(p_command->dst_addr_lo)); /* Copy the command to DMAE - need to do it before every call * for source/dest address no reset. @@ -475,7 +460,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, */ for (i = 0; i < DMAE_CMD_SIZE; i++) { u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? - *(((u32 *)command) + i) : 0; + *(((u32 *)p_command) + i) : 0; qed_wr(p_hwfn, p_ptt, DMAE_REG_CMD_MEM + @@ -483,9 +468,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, (i * sizeof(u32)), data); } - qed_wr(p_hwfn, p_ptt, - qed_dmae_idx_to_go_cmd(idx_cmd), - DMAE_GO_VALUE); + qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); return qed_status; } @@ -498,9 +481,7 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(u32), - p_addr, - GFP_KERNEL); + sizeof(u32), p_addr, GFP_KERNEL); if (!*p_comp) { DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n"); goto err; @@ -543,8 +524,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn) p_phys = p_hwfn->dmae_info.completion_word_phys_addr; dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(u32), - p_hwfn->dmae_info.p_completion_word, - p_phys); + p_hwfn->dmae_info.p_completion_word, p_phys); p_hwfn->dmae_info.p_completion_word = NULL; } @@ -552,8 +532,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn) p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct dmae_cmd), - p_hwfn->dmae_info.p_dmae_cmd, - p_phys); + p_hwfn->dmae_info.p_dmae_cmd, p_phys); p_hwfn->dmae_info.p_dmae_cmd = NULL; } @@ -571,9 +550,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn) static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) { - u32 wait_cnt = 0; - u32 wait_cnt_limit = 10000; - + u32 wait_cnt_limit = 10000, wait_cnt = 0; int qed_status = 0; barrier(); @@ -606,7 +583,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, u64 dst_addr, u8 src_type, u8 dst_type, - u32 length) + u32 length_dw) { dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; @@ -624,7 +601,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], (void *)(uintptr_t)src_addr, - length * sizeof(u32)); + length_dw * sizeof(u32)); break; default: return -EINVAL; @@ -645,7 +622,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, return -EINVAL; } - cmd->length_dw = cpu_to_le16((u16)length); + cmd->length_dw = cpu_to_le16((u16)length_dw); qed_dmae_post_command(p_hwfn, p_ptt); @@ -654,16 +631,14 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, if (qed_status) { DP_NOTICE(p_hwfn, "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", - src_addr, - dst_addr, - length); + src_addr, dst_addr, length_dw); return qed_status; } if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) memcpy((void *)(uintptr_t)(dst_addr), &p_hwfn->dmae_info.p_intermediate_buffer[0], - length * sizeof(u32)); + length_dw * sizeof(u32)); return 0; } @@ -730,10 +705,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, if (qed_status) { DP_NOTICE(p_hwfn, "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", - qed_status, - src_addr, - dst_addr, - length_cur); + qed_status, src_addr, dst_addr, length_cur); break; } } @@ -743,10 +715,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u64 source_addr, - u32 grc_addr, - u32 size_in_dwords, - u32 flags) + u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags) { u32 grc_addr_in_dw = grc_addr / sizeof(u32); struct qed_dmae_params params; @@ -768,9 +737,10 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, return rc; } -int -qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, - dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) +int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 grc_addr, + dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) { u32 grc_addr_in_dw = grc_addr / sizeof(u32); struct qed_dmae_params params; @@ -791,12 +761,11 @@ qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, return rc; } -int -qed_dmae_host2host(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - dma_addr_t source_addr, - dma_addr_t dest_addr, - u32 size_in_dwords, struct qed_dmae_params *p_params) +int qed_dmae_host2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct qed_dmae_params *p_params) { int rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 9866a20d2128..8ce8564061d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -59,17 +59,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) p_hwfn->rt_data.b_valid[i] = false; } -void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, - u32 rt_offset, - u32 val) +void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { p_hwfn->rt_data.init_val[rt_offset] = val; p_hwfn->rt_data.b_valid[rt_offset] = true; } void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, - u32 rt_offset, u32 *p_val, - size_t size) + u32 rt_offset, u32 *p_val, size_t size) { size_t i; @@ -81,10 +78,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, static int qed_init_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, - u16 rt_offset, - u16 size, - bool b_must_dmae) + u32 addr, u16 rt_offset, u16 size, bool b_must_dmae) { u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; @@ -102,8 +96,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, * simply write the data instead of using dmae. */ if (!b_must_dmae) { - qed_wr(p_hwfn, p_ptt, addr + (i << 2), - p_init_val[i]); + qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); continue; } @@ -115,7 +108,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, rc = qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(p_init_val + i), addr + (i << 2), segment, 0); - if (rc != 0) + if (rc) return rc; /* Jump over the entire segment, including invalid entry */ @@ -182,9 +175,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, - u32 fill, - u32 fill_count) + u32 addr, u32 fill, u32 fill_count) { static u32 zero_buffer[DMAE_MAX_RW_SIZE]; @@ -199,15 +190,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, return qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(&zero_buffer[0]), - addr, fill_count, - QED_DMAE_FLAG_RW_REPL_SRC); + addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC); } static void qed_init_fill(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, - u32 fill, - u32 fill_count) + u32 addr, u32 fill, u32 fill_count) { u32 i; @@ -218,12 +206,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn, static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_write_op *cmd, - bool b_must_dmae, - bool b_can_dmae) + bool b_must_dmae, bool b_can_dmae) { + u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset); u32 data = le32_to_cpu(cmd->data); u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; - u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset); + u32 offset, output_len, input_len, max_size; struct qed_dev *cdev = p_hwfn->cdev; union init_array_hdr *hdr; @@ -233,8 +221,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, array_data = cdev->fw_data->arr_data; - hdr = (union init_array_hdr *)(array_data + - dmae_array_offset); + hdr = (union init_array_hdr *)(array_data + dmae_array_offset); data = le32_to_cpu(hdr->raw.data); switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { case INIT_ARR_ZIPPED: @@ -290,13 +277,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, /* init_ops write command */ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct init_write_op *cmd, - bool b_can_dmae) + struct init_write_op *p_cmd, bool b_can_dmae) { - u32 data = le32_to_cpu(cmd->data); - u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + u32 data = le32_to_cpu(p_cmd->data); bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); - union init_write_args *arg = &cmd->args; + u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + union init_write_args *arg = &p_cmd->args; int rc = 0; /* Sanitize */ @@ -309,20 +295,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { case INIT_SRC_INLINE: - qed_wr(p_hwfn, p_ptt, addr, - le32_to_cpu(arg->inline_val)); + data = le32_to_cpu(p_cmd->args.inline_val); + qed_wr(p_hwfn, p_ptt, addr, data); break; case INIT_SRC_ZEROS: - if (b_must_dmae || - (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64))) - rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, - le32_to_cpu(arg->zeros_count)); + data = le32_to_cpu(p_cmd->args.zeros_count); + if (b_must_dmae || (b_can_dmae && (data >= 64))) + rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data); else - qed_init_fill(p_hwfn, p_ptt, addr, 0, - le32_to_cpu(arg->zeros_count)); + qed_init_fill(p_hwfn, p_ptt, addr, 0, data); break; case INIT_SRC_ARRAY: - rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd, + rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd, b_must_dmae, b_can_dmae); break; case INIT_SRC_RUNTIME: @@ -353,8 +337,7 @@ static inline bool comp_or(u32 val, u32 expected_val) /* init_ops read/poll commands */ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct init_read_op *cmd) + struct qed_ptt *p_ptt, struct init_read_op *cmd) { bool (*comp_check)(u32 val, u32 expected_val); u32 delay = QED_INIT_POLL_PERIOD_US, val; @@ -412,35 +395,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn, } static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, - u16 *offset, - int modes) + u16 *p_offset, int modes) { struct qed_dev *cdev = p_hwfn->cdev; const u8 *modes_tree_buf; u8 arg1, arg2, tree_val; modes_tree_buf = cdev->fw_data->modes_tree_buf; - tree_val = modes_tree_buf[(*offset)++]; + tree_val = modes_tree_buf[(*p_offset)++]; switch (tree_val) { case INIT_MODE_OP_NOT: - return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1; + return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; case INIT_MODE_OP_OR: - arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes); - arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes); + arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); return arg1 | arg2; case INIT_MODE_OP_AND: - arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes); - arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes); + arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); return arg1 & arg2; default: tree_val -= MAX_INIT_MODE_OPS; - return (modes & (1 << tree_val)) ? 1 : 0; + return (modes & BIT(tree_val)) ? 1 : 0; } } static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, - struct init_if_mode_op *p_cmd, - int modes) + struct init_if_mode_op *p_cmd, int modes) { u16 offset = le16_to_cpu(p_cmd->modes_buf_offset); @@ -453,8 +434,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, struct init_if_phase_op *p_cmd, - u32 phase, - u32 phase_id) + u32 phase, u32 phase_id) { u32 data = le32_to_cpu(p_cmd->phase_data); u32 op_data = le32_to_cpu(p_cmd->op_data); @@ -468,10 +448,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, } int qed_init_run(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - int phase, - int phase_id, - int modes) + struct qed_ptt *p_ptt, int phase, int phase_id, int modes) { struct qed_dev *cdev = p_hwfn->cdev; u32 cmd_num, num_init_ops; @@ -557,7 +534,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) /* First Dword contains metadata and should be skipped */ buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32)); - offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset; + offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; fw->fw_ver_info = (struct fw_ver_info *)(data + offset); offset = buf_hdr[BIN_BUF_INIT_CMD].offset; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 8fa50fa23c8d..61ec973a06c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1775,10 +1775,9 @@ struct qed_sb_attn_info { }; static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, - struct qed_sb_attn_info *p_sb_desc) + struct qed_sb_attn_info *p_sb_desc) { - u16 rc = 0; - u16 index; + u16 rc = 0, index; /* Make certain HW write took affect */ mmiowb(); @@ -1802,15 +1801,13 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, * @param asserted_bits newly asserted bits * @return int */ -static int qed_int_assertion(struct qed_hwfn *p_hwfn, - u16 asserted_bits) +static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; u32 igu_mask; /* Mask the source of the attention in the IGU */ - igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - IGU_REG_ATTENTION_ENABLE); + igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); @@ -2041,7 +2038,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; if ((p_bit->flags & ATTENTION_PARITY) && - !!(parities & (1 << bit_idx))) + !!(parities & BIT(bit_idx))) qed_int_deassertion_parity(p_hwfn, p_bit, bit_idx); @@ -2114,8 +2111,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, ~((u32)deasserted_bits)); /* Unmask deasserted attentions in IGU */ - aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - IGU_REG_ATTENTION_ENABLE); + aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); @@ -2160,8 +2156,7 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, - "MFW indication via attention\n"); + DP_INFO(p_hwfn, "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); @@ -2173,18 +2168,14 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) return rc; } - if (deasserted_bits) { + if (deasserted_bits) rc = qed_int_deassertion(p_hwfn, deasserted_bits); - if (rc) - return rc; - } return rc; } static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, - void __iomem *igu_addr, - u32 ack_cons) + void __iomem *igu_addr, u32 ack_cons) { struct igu_prod_cons_update igu_ack = { 0 }; @@ -2242,9 +2233,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) /* Gather Interrupts/Attentions information */ if (!sb_info->sb_virt) { - DP_ERR( - p_hwfn->cdev, - "Interrupt Status block is NULL - cannot check for new interrupts!\n"); + DP_ERR(p_hwfn->cdev, + "Interrupt Status block is NULL - cannot check for new interrupts!\n"); } else { u32 tmp_index = sb_info->sb_ack; @@ -2255,9 +2245,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) } if (!sb_attn || !sb_attn->sb_attn) { - DP_ERR( - p_hwfn->cdev, - "Attentions Status block is NULL - cannot check for new attentions!\n"); + DP_ERR(p_hwfn->cdev, + "Attentions Status block is NULL - cannot check for new attentions!\n"); } else { u16 tmp_index = sb_attn->index; @@ -2313,8 +2302,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) if (p_sb->sb_attn) dma_free_coherent(&p_hwfn->cdev->pdev->dev, SB_ATTN_ALIGNED_SIZE(p_hwfn), - p_sb->sb_attn, - p_sb->sb_phys); + p_sb->sb_attn, p_sb->sb_phys); kfree(p_sb); } @@ -2337,8 +2325,7 @@ static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - void *sb_virt_addr, - dma_addr_t sb_phy_addr) + void *sb_virt_addr, dma_addr_t sb_phy_addr) { struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; int i, j, k; @@ -2378,8 +2365,8 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, { struct qed_dev *cdev = p_hwfn->cdev; struct qed_sb_attn_info *p_sb; - void *p_virt; dma_addr_t p_phys = 0; + void *p_virt; /* SB struct */ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); @@ -2412,9 +2399,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, struct cau_sb_entry *p_sb_entry, - u8 pf_id, - u16 vf_number, - u8 vf_valid) + u8 pf_id, u16 vf_number, u8 vf_valid) { struct qed_dev *cdev = p_hwfn->cdev; u32 cau_state; @@ -2428,12 +2413,6 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); - /* setting the time resultion to a fixed value ( = 1) */ - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, - QED_CAU_DEF_RX_TIMER_RES); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, - QED_CAU_DEF_TX_TIMER_RES); - cau_state = CAU_HC_DISABLE_STATE; if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { @@ -2468,9 +2447,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t sb_phys, - u16 igu_sb_id, - u16 vf_number, - u8 vf_valid) + u16 igu_sb_id, u16 vf_number, u8 vf_valid) { struct cau_sb_entry sb_entry; @@ -2514,8 +2491,7 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, timer_res = 2; timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, - QED_COAL_RX_STATE_MACHINE, - timeset); + QED_COAL_RX_STATE_MACHINE, timeset); if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) timer_res = 0; @@ -2541,8 +2517,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, u8 timeset) { struct cau_pi_entry pi_entry; - u32 sb_offset; - u32 pi_offset; + u32 sb_offset, pi_offset; if (IS_VF(p_hwfn->cdev)) return; @@ -2569,8 +2544,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, } void qed_int_sb_setup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_sb_info *sb_info) + struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) { /* zero status block and ack counter */ sb_info->sb_ack = 0; @@ -2590,8 +2564,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, * * @return u16 */ -static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, - u16 sb_id) +static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { u16 igu_sb_id; @@ -2603,8 +2576,12 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, else igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", - (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); + if (sb_id == QED_SP_SB_ID) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); + else + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); return igu_sb_id; } @@ -2612,9 +2589,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, int qed_int_sb_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info, - void *sb_virt_addr, - dma_addr_t sb_phy_addr, - u16 sb_id) + void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) { sb_info->sb_virt = sb_virt_addr; sb_info->sb_phys = sb_phy_addr; @@ -2650,8 +2625,7 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, } int qed_int_sb_release(struct qed_hwfn *p_hwfn, - struct qed_sb_info *sb_info, - u16 sb_id) + struct qed_sb_info *sb_info, u16 sb_id) { if (sb_id == QED_SP_SB_ID) { DP_ERR(p_hwfn, "Do Not free sp sb using this function"); @@ -2685,8 +2659,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) kfree(p_sb); } -static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_sb_sp_info *p_sb; dma_addr_t p_phys = 0; @@ -2721,9 +2694,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, - void *cookie, - u8 *sb_idx, - __le16 **p_fw_cons) + void *cookie, u8 *sb_idx, __le16 **p_fw_cons) { struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; int rc = -ENOMEM; @@ -2764,8 +2735,7 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) } void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_int_mode int_mode) + struct qed_ptt *p_ptt, enum qed_int_mode int_mode) { u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; @@ -2809,7 +2779,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { rc = qed_slowpath_irq_req(p_hwfn); - if (rc != 0) { + if (rc) { DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); return -EINVAL; } @@ -2822,8 +2792,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; } -void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { p_hwfn->b_int_enabled = 0; @@ -2950,13 +2919,11 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.opaque_fid, b_set); } -static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 sb_id) +static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 sb_id) { u32 val = qed_rd(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + - sizeof(u32) * sb_id); + IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); struct qed_igu_block *p_block; p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; @@ -2983,8 +2950,7 @@ out: return val; } -int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_igu_info *p_igu_info; u32 val, min_vf = 0, max_vf = 0; @@ -3104,22 +3070,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, */ void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) { - u32 igu_pf_conf = 0; - - igu_pf_conf |= IGU_PF_CONF_FUNC_EN; + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); } u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) { - u64 intr_status = 0; - u32 intr_status_lo = 0; - u32 intr_status_hi = 0; u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - IGU_CMD_INT_ACK_BASE; u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - IGU_CMD_INT_ACK_BASE; + u32 intr_status_hi = 0, intr_status_lo = 0; + u64 intr_status = 0; intr_status_lo = REG_RD(p_hwfn, GTT_BAR0_MAP_REG_IGU_CMD + @@ -3153,8 +3116,7 @@ static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) kfree(p_hwfn->sp_dpc); } -int qed_int_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int rc = 0; @@ -3169,10 +3131,9 @@ int qed_int_alloc(struct qed_hwfn *p_hwfn, return rc; } rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); - if (rc) { + if (rc) DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n"); - return rc; - } + return rc; } @@ -3183,8 +3144,7 @@ void qed_int_free(struct qed_hwfn *p_hwfn) qed_int_sp_dpc_free(p_hwfn); } -void qed_int_setup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); qed_int_sb_attn_setup(p_hwfn, p_ptt); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 401e738543b5..4409ea3f7d40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -52,7 +52,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, u16 rx_mode = 0; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc != 0) + if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); @@ -80,8 +80,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); /* TPA related fields */ - memset(&p_ramrod->tpa_param, 0, - sizeof(struct eth_vport_tpa_param)); + memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; @@ -102,6 +101,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, p_ramrod->tx_switching_en = p_params->tx_switching; + p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; + p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, p_params->concrete_fid); @@ -306,14 +308,14 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, memset(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); - if (p_params->update_approx_mcast_flg) { - p_ramrod->common.update_approx_mcast_flg = 1; - for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { - u32 *p_bins = (u32 *)p_params->bins; - __le32 val = cpu_to_le32(p_bins[i]); + if (!p_params->update_approx_mcast_flg) + return; - p_ramrod->approx_mcast.bins[i] = val; - } + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = (u32 *)p_params->bins; + + p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); } } @@ -336,7 +338,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, } rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc != 0) + if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); @@ -361,8 +363,8 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_cmn->tx_active_flg = p_params->vport_active_tx_flg; p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; p_cmn->accept_any_vlan = p_params->accept_any_vlan; - p_cmn->update_accept_any_vlan_flg = - p_params->update_accept_any_vlan_flg; + val = p_params->update_accept_any_vlan_flg; + p_cmn->update_accept_any_vlan_flg = val; p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; val = p_params->update_inner_vlan_removal_flg; @@ -411,7 +413,7 @@ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) return qed_vf_pf_vport_stop(p_hwfn); rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); - if (rc != 0) + if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); @@ -476,7 +478,7 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, rc = qed_sp_vport_update(p_hwfn, &vport_update_params, comp_mode, p_comp_data); - if (rc != 0) { + if (rc) { DP_ERR(cdev, "Update rx_mode failed %d\n", rc); return rc; } @@ -511,11 +513,12 @@ static int qed_sp_release_queue_cid( int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, u16 opaque_fid, u32 cid, - struct qed_queue_start_common_params *params, + struct qed_queue_start_common_params *p_params, u8 stats_id, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, bool b_use_zone_a_prod) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -526,23 +529,23 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, int rc = -EINVAL; /* Store information for the stop */ - p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; - p_rx_cid->cid = cid; - p_rx_cid->opaque_fid = opaque_fid; - p_rx_cid->vport_id = params->vport_id; + p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; + p_rx_cid->cid = cid; + p_rx_cid->opaque_fid = opaque_fid; + p_rx_cid->vport_id = p_params->vport_id; - rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id); - if (rc != 0) + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) return rc; - rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id); - if (rc != 0) + rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id); + if (rc) return rc; DP_VERBOSE(p_hwfn, QED_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", - opaque_fid, cid, params->queue_id, params->vport_id, - params->sb); + opaque_fid, + cid, p_params->queue_id, p_params->vport_id, p_params->sb); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -558,24 +561,28 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.rx_queue_start; - p_ramrod->sb_id = cpu_to_le16(params->sb); - p_ramrod->sb_index = params->sb_idx; - p_ramrod->vport_id = abs_vport_id; - p_ramrod->stats_counter_id = stats_id; - p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); - p_ramrod->complete_cqe_flg = 0; - p_ramrod->complete_event_flg = 1; + p_ramrod->sb_id = cpu_to_le16(p_params->sb); + p_ramrod->sb_index = p_params->sb_idx; + p_ramrod->vport_id = abs_vport_id; + p_ramrod->stats_counter_id = stats_id; + p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->complete_cqe_flg = 0; + p_ramrod->complete_event_flg = 1; - p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); + p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); - p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); + p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); - p_ramrod->vf_rx_prod_index = params->vf_qid; - if (params->vf_qid) + if (p_params->vf_qid || b_use_zone_a_prod) { + p_ramrod->vf_rx_prod_index = p_params->vf_qid; DP_VERBOSE(p_hwfn, QED_MSG_SP, - "Queue is meant for VF rxq[%04x]\n", params->vf_qid); + "Queue%s is meant for VF rxq[%02x]\n", + b_use_zone_a_prod ? " [legacy]" : "", + p_params->vf_qid); + p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod; + } return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -583,7 +590,7 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, static int qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u16 opaque_fid, - struct qed_queue_start_common_params *params, + struct qed_queue_start_common_params *p_params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, @@ -597,20 +604,20 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, if (IS_VF(p_hwfn->cdev)) { return qed_vf_pf_rxq_start(p_hwfn, - params->queue_id, - params->sb, - params->sb_idx, + p_params->queue_id, + p_params->sb, + (u8)p_params->sb_idx, bd_max_bytes, bd_chain_phys_addr, cqe_pbl_addr, cqe_pbl_size, pp_prod); } - rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); - if (rc != 0) + rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue); + if (rc) return rc; - rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id); - if (rc != 0) + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); + if (rc) return rc; *pp_prod = (u8 __iomem *)p_hwfn->regview + @@ -622,9 +629,8 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, (u32 *)(&init_prod_val)); /* Allocate a CID for the queue */ - p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; - rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, - &p_rx_cid->cid); + p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid); if (rc) { DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); return rc; @@ -634,14 +640,13 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, opaque_fid, p_rx_cid->cid, - params, + p_params, abs_stats_id, bd_max_bytes, bd_chain_phys_addr, - cqe_pbl_addr, - cqe_pbl_size); + cqe_pbl_addr, cqe_pbl_size, false); - if (rc != 0) + if (rc) qed_sp_release_queue_cid(p_hwfn, p_rx_cid); return rc; @@ -788,21 +793,20 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, if (rc) return rc; - p_ramrod = &p_ent->ramrod.tx_queue_start; - p_ramrod->vport_id = abs_vport_id; + p_ramrod = &p_ent->ramrod.tx_queue_start; + p_ramrod->vport_id = abs_vport_id; - p_ramrod->sb_id = cpu_to_le16(p_params->sb); - p_ramrod->sb_index = p_params->sb_idx; - p_ramrod->stats_counter_id = stats_id; + p_ramrod->sb_id = cpu_to_le16(p_params->sb); + p_ramrod->sb_index = p_params->sb_idx; + p_ramrod->stats_counter_id = stats_id; - p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); - p_ramrod->pbl_size = cpu_to_le16(pbl_size); + p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); + + p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); - pq_id = qed_get_qm_pq(p_hwfn, - PROTOCOLID_ETH, - p_pq_params); - p_ramrod->qm_pq_id = cpu_to_le16(pq_id); + pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params); + p_ramrod->qm_pq_id = cpu_to_le16(pq_id); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -836,8 +840,7 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, memset(&pq_params, 0, sizeof(pq_params)); /* Allocate a CID for the queue */ - rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, - &p_tx_cid->cid); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid); if (rc) { DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); return rc; @@ -896,8 +899,7 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); } -static enum eth_filter_action -qed_filter_action(enum qed_filter_opcode opcode) +static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) { enum eth_filter_action action = MAX_ETH_FILTER_ACTION; @@ -1033,19 +1035,19 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); if (p_filter_cmd->opcode == QED_FILTER_MOVE) { - p_second_filter->type = p_first_filter->type; - p_second_filter->mac_msb = p_first_filter->mac_msb; - p_second_filter->mac_mid = p_first_filter->mac_mid; - p_second_filter->mac_lsb = p_first_filter->mac_lsb; - p_second_filter->vlan_id = p_first_filter->vlan_id; - p_second_filter->vni = p_first_filter->vni; + p_second_filter->type = p_first_filter->type; + p_second_filter->mac_msb = p_first_filter->mac_msb; + p_second_filter->mac_mid = p_first_filter->mac_mid; + p_second_filter->mac_lsb = p_first_filter->mac_lsb; + p_second_filter->vlan_id = p_first_filter->vlan_id; + p_second_filter->vni = p_first_filter->vni; p_first_filter->action = ETH_FILTER_ACTION_REMOVE; p_first_filter->vport_id = vport_to_remove_from; - p_second_filter->action = ETH_FILTER_ACTION_ADD; - p_second_filter->vport_id = vport_to_add_to; + p_second_filter->action = ETH_FILTER_ACTION_ADD; + p_second_filter->vport_id = vport_to_add_to; } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { p_first_filter->vport_id = vport_to_add_to; memcpy(p_second_filter, p_first_filter, @@ -1086,7 +1088,7 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, &p_ramrod, &p_ent, comp_mode, p_comp_data); - if (rc != 0) { + if (rc) { DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); return rc; } @@ -1094,10 +1096,8 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, p_header->assert_on_error = p_filter_cmd->assert_on_error; rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (rc != 0) { - DP_ERR(p_hwfn, - "Unicast filter ADD command failed %d\n", - rc); + if (rc) { + DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); return rc; } @@ -1136,15 +1136,10 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, * Return: ******************************************************************************/ static u32 qed_calc_crc32c(u8 *crc32_packet, - u32 crc32_length, - u32 crc32_seed, - u8 complement) + u32 crc32_length, u32 crc32_seed, u8 complement) { - u32 byte = 0; - u32 bit = 0; - u8 msb = 0; - u8 current_byte = 0; - u32 crc32_result = crc32_seed; + u32 byte = 0, bit = 0, crc32_result = crc32_seed; + u8 msb = 0, current_byte = 0; if ((!crc32_packet) || (crc32_length == 0) || @@ -1164,9 +1159,7 @@ static u32 qed_calc_crc32c(u8 *crc32_packet, return crc32_result; } -static inline u32 qed_crc32c_le(u32 seed, - u8 *mac, - u32 len) +static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) { u32 packet_buf[2] = { 0 }; @@ -1196,17 +1189,14 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, u8 abs_vport_id = 0; int rc, i; - if (p_filter_cmd->opcode == QED_FILTER_ADD) { + if (p_filter_cmd->opcode == QED_FILTER_ADD) rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, &abs_vport_id); - if (rc) - return rc; - } else { + else rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, &abs_vport_id); - if (rc) - return rc; - } + if (rc) + return rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -1244,11 +1234,11 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, /* Convert to correct endianity */ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + struct vport_update_ramrod_mcast *p_ramrod_bins; u32 *p_bins = (u32 *)bins; - struct vport_update_ramrod_mcast *approx_mcast; - approx_mcast = &p_ramrod->approx_mcast; - approx_mcast->bins[i] = cpu_to_le32(p_bins[i]); + p_ramrod_bins = &p_ramrod->approx_mcast; + p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); } } @@ -1286,8 +1276,7 @@ static int qed_filter_mcast_cmd(struct qed_dev *cdev, rc = qed_sp_eth_filter_mcast(p_hwfn, opaque_fid, p_filter_cmd, - comp_mode, - p_comp_data); + comp_mode, p_comp_data); } return rc; } @@ -1314,9 +1303,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, rc = qed_sp_eth_filter_ucast(p_hwfn, opaque_fid, p_filter_cmd, - comp_mode, - p_comp_data); - if (rc != 0) + comp_mode, p_comp_data); + if (rc) break; } @@ -1590,8 +1578,7 @@ out: } } -void qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) +void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) { u32 i; @@ -1698,6 +1685,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, qed_vf_get_num_vlan_filters(&cdev->hwfns[0], &info->num_vlan_filters); qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); + + info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; } qed_fill_dev_info(cdev, &info->common); @@ -1766,8 +1755,7 @@ static int qed_start_vport(struct qed_dev *cdev, return 0; } -static int qed_stop_vport(struct qed_dev *cdev, - u8 vport_id) +static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) { int rc, i; @@ -1775,8 +1763,7 @@ static int qed_stop_vport(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; rc = qed_sp_vport_stop(p_hwfn, - p_hwfn->hw_info.opaque_fid, - vport_id); + p_hwfn->hw_info.opaque_fid, vport_id); if (rc) { DP_ERR(cdev, "Failed to stop VPORT\n"); @@ -1801,10 +1788,8 @@ static int qed_update_vport(struct qed_dev *cdev, /* Translate protocol params into sp params */ sp_params.vport_id = params->vport_id; - sp_params.update_vport_active_rx_flg = - params->update_vport_active_flg; - sp_params.update_vport_active_tx_flg = - params->update_vport_active_flg; + sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; + sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; sp_params.update_tx_switching_flg = params->update_tx_switching_flg; @@ -1817,8 +1802,7 @@ static int qed_update_vport(struct qed_dev *cdev, * We need to re-fix the rss values per engine for CMT. */ if (cdev->num_hwfns > 1 && params->update_rss_flg) { - struct qed_update_vport_rss_params *rss = - ¶ms->rss_params; + struct qed_update_vport_rss_params *rss = ¶ms->rss_params; int k, max = 0; /* Find largest entry, since it's possible RSS needs to @@ -1861,8 +1845,8 @@ static int qed_update_vport(struct qed_dev *cdev, QED_RSS_IND_TABLE_SIZE * sizeof(u16)); memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); + sp_params.rss_params = &sp_rss_params; } - sp_params.rss_params = &sp_rss_params; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -1893,8 +1877,8 @@ static int qed_start_rxq(struct qed_dev *cdev, u16 cqe_pbl_size, void __iomem **pp_prod) { - int rc, hwfn_index; struct qed_hwfn *p_hwfn; + int rc, hwfn_index; hwfn_index = params->rss_id % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; @@ -1935,8 +1919,7 @@ static int qed_stop_rxq(struct qed_dev *cdev, rc = qed_sp_eth_rx_queue_stop(p_hwfn, params->rx_queue_id / cdev->num_hwfns, - params->eq_completion_only, - false); + params->eq_completion_only, false); if (rc) { DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id); return rc; @@ -2047,11 +2030,11 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, memset(&accept_flags, 0, sizeof(accept_flags)); - accept_flags.update_rx_mode_config = 1; - accept_flags.update_tx_mode_config = 1; - accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | - QED_ACCEPT_MCAST_MATCHED | - QED_ACCEPT_BCAST; + accept_flags.update_rx_mode_config = 1; + accept_flags.update_tx_mode_config = 1; + accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | + QED_ACCEPT_MCAST_MATCHED | + QED_ACCEPT_BCAST; accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | QED_ACCEPT_MCAST_MATCHED | QED_ACCEPT_BCAST; @@ -2072,9 +2055,8 @@ static int qed_configure_filter_ucast(struct qed_dev *cdev, struct qed_filter_ucast ucast; if (!params->vlan_valid && !params->mac_valid) { - DP_NOTICE( - cdev, - "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); + DP_NOTICE(cdev, + "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); return -EINVAL; } @@ -2135,8 +2117,7 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev, for (i = 0; i < mcast.num_mc_addrs; i++) ether_addr_copy(mcast.mac[i], params->mac[i]); - return qed_filter_mcast_cmd(cdev, &mcast, - QED_SPQ_MODE_CB, NULL); + return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); } static int qed_configure_filter(struct qed_dev *cdev, @@ -2153,15 +2134,13 @@ static int qed_configure_filter(struct qed_dev *cdev, accept_flags = params->filter.accept_flags; return qed_configure_filter_rx_mode(cdev, accept_flags); default: - DP_NOTICE(cdev, "Unknown filter type %d\n", - (int)params->type); + DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); return -EINVAL; } } static int qed_fp_cqe_completion(struct qed_dev *dev, - u8 rss_id, - struct eth_slow_path_rx_cqe *cqe) + u8 rss_id, struct eth_slow_path_rx_cqe *cqe) { return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], cqe); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 002114543451..e495d62fcc03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -102,6 +102,8 @@ struct qed_sp_vport_start_params { u16 opaque_fid; u8 vport_id; u16 mtu; + bool check_mac; + bool check_ethtype; }; int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, @@ -213,6 +215,8 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); +void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); + int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); @@ -223,7 +227,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, u8 stats_id, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, bool b_use_zone_a_prod); int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, u16 opaque_fid, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c7dc34bfdd0a..32f71ee57191 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -51,8 +51,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME); static int __init qed_init(void) { - pr_notice("qed_init called\n"); - pr_info("%s", version); return 0; @@ -106,8 +104,7 @@ static void qed_free_pci(struct qed_dev *cdev) /* Performs PCI initializations as well as initializing PCI-related parameters * in the device structrue. Returns 0 in case of success. */ -static int qed_init_pci(struct qed_dev *cdev, - struct pci_dev *pdev) +static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) { u8 rev_id; int rc; @@ -263,8 +260,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) } /* Sets the requested power state */ -static int qed_set_power_state(struct qed_dev *cdev, - pci_power_t state) +static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) { if (!cdev) return -ENODEV; @@ -366,8 +362,8 @@ static int qed_enable_msix(struct qed_dev *cdev, DP_NOTICE(cdev, "Trying to enable MSI-X with less vectors (%d out of %d)\n", cnt, int_params->in.num_vectors); - rc = pci_enable_msix_exact(cdev->pdev, - int_params->msix_table, cnt); + rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, + cnt); if (!rc) rc = cnt; } @@ -439,6 +435,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) } out: + if (!rc) + DP_INFO(cdev, "Using %s interrupts\n", + int_params->out.int_mode == QED_INT_MODE_INTA ? + "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? + "MSI" : "MSIX"); cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; return rc; @@ -514,19 +515,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) int qed_slowpath_irq_req(struct qed_hwfn *hwfn) { struct qed_dev *cdev = hwfn->cdev; + u32 int_mode; int rc = 0; u8 id; - if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + int_mode = cdev->int_params.out.int_mode; + if (int_mode == QED_INT_MODE_MSIX) { id = hwfn->my_id; snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", id, cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); rc = request_irq(cdev->int_params.msix_table[id].vector, qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); - if (!rc) - DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), - "Requested slowpath MSI-X\n"); } else { unsigned long flags = 0; @@ -541,6 +541,13 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn) flags, cdev->name, cdev); } + if (rc) + DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); + else + DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), + "Requested slowpath %s\n", + (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); + return rc; } @@ -974,8 +981,7 @@ static u32 qed_sb_init(struct qed_dev *cdev, } static u32 qed_sb_release(struct qed_dev *cdev, - struct qed_sb_info *sb_info, - u16 sb_id) + struct qed_sb_info *sb_info, u16 sb_id) { struct qed_hwfn *p_hwfn; int hwfn_index; @@ -1025,20 +1031,23 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) link_params->speed.autoneg = params->autoneg; if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { link_params->speed.advertised_speeds = 0; - if ((params->adv_speeds & SUPPORTED_1000baseT_Half) || - (params->adv_speeds & SUPPORTED_1000baseT_Full)) + if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) || + (params->adv_speeds & QED_LM_1000baseT_Full_BIT)) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; - if (params->adv_speeds & SUPPORTED_10000baseKR_Full) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; - if (params->adv_speeds & SUPPORTED_40000baseLR4_Full) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; - if (params->adv_speeds & 0) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; - if (params->adv_speeds & 0) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; + if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; } @@ -1168,50 +1177,56 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->link_up = true; /* TODO - at the moment assume supported and advertised speed equal */ - if_link->supported_caps = SUPPORTED_FIBRE; + if_link->supported_caps = QED_LM_FIBRE_BIT; if (params.speed.autoneg) - if_link->supported_caps |= SUPPORTED_Autoneg; + if_link->supported_caps |= QED_LM_Autoneg_BIT; if (params.pause.autoneg || (params.pause.forced_rx && params.pause.forced_tx)) - if_link->supported_caps |= SUPPORTED_Asym_Pause; + if_link->supported_caps |= QED_LM_Asym_Pause_BIT; if (params.pause.autoneg || params.pause.forced_rx || params.pause.forced_tx) - if_link->supported_caps |= SUPPORTED_Pause; + if_link->supported_caps |= QED_LM_Pause_BIT; if_link->advertised_caps = if_link->supported_caps; if (params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - if_link->advertised_caps |= SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full; + if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | + QED_LM_1000baseT_Full_BIT; if (params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - if_link->advertised_caps |= SUPPORTED_10000baseKR_Full; + if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - if_link->advertised_caps |= 0; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT; if (params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - if_link->advertised_caps |= 0; + if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT; if (link_caps.speed_capabilities & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - if_link->supported_caps |= SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full; + if_link->supported_caps |= QED_LM_1000baseT_Half_BIT | + QED_LM_1000baseT_Full_BIT; if (link_caps.speed_capabilities & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - if_link->supported_caps |= SUPPORTED_10000baseKR_Full; + if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - if_link->supported_caps |= SUPPORTED_40000baseLR4_Full; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT; if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - if_link->supported_caps |= 0; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT; if (link_caps.speed_capabilities & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - if_link->supported_caps |= 0; + if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT; if (link.link_up) if_link->speed = link.speed; @@ -1231,33 +1246,29 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; /* Link partner capabilities */ - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_1G_HD) - if_link->lp_caps |= SUPPORTED_1000baseT_Half; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_1G_FD) - if_link->lp_caps |= SUPPORTED_1000baseT_Full; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_10G) - if_link->lp_caps |= SUPPORTED_10000baseKR_Full; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_40G) - if_link->lp_caps |= SUPPORTED_40000baseLR4_Full; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_50G) - if_link->lp_caps |= 0; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_100G) - if_link->lp_caps |= 0; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD) + if_link->lp_caps |= QED_LM_1000baseT_Half_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) + if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) + if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) + if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) + if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) + if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) + if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; if (link.an_complete) - if_link->lp_caps |= SUPPORTED_Autoneg; + if_link->lp_caps |= QED_LM_Autoneg_BIT; if (link.partner_adv_pause) - if_link->lp_caps |= SUPPORTED_Pause; + if_link->lp_caps |= QED_LM_Pause_BIT; if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) - if_link->lp_caps |= SUPPORTED_Asym_Pause; + if_link->lp_caps |= QED_LM_Asym_Pause_BIT; } static void qed_get_current_link(struct qed_dev *cdev, @@ -1391,3 +1402,24 @@ const struct qed_common_ops qed_common_ops_pass = { .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, }; + +void qed_get_protocol_stats(struct qed_dev *cdev, + enum qed_mcp_protocol_type type, + union qed_mcp_protocol_stats *stats) +{ + struct qed_eth_stats eth_stats; + + memset(stats, 0, sizeof(*stats)); + + switch (type) { + case QED_MCP_LAN_STATS: + qed_get_vport_stats(cdev, ð_stats); + stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts; + stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; + stats->lan_stats.fcs_err = -1; + break; + default: + DP_ERR(cdev, "Invalid protocol type = %d\n", type); + return; + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index a240f26344a4..4c212667b482 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -54,8 +54,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) return true; } -void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PORT); @@ -68,8 +67,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); } -void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); u32 tmp, i; @@ -99,8 +97,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) return 0; } -static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info = p_hwfn->mcp_info; u32 drv_mb_offsize, mfw_mb_offsize; @@ -143,8 +140,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info; u32 size; @@ -165,9 +161,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); - p_info->mfw_mb_shadow = - kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS( - p_info->mfw_mb_length), GFP_KERNEL); + p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err; @@ -189,8 +183,7 @@ err: * access is achieved by setting a blocking flag, which will fail other * competing contexts to send their mailboxes. */ -static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, - u32 cmd) +static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd) { spin_lock_bh(&p_hwfn->mcp_info->lock); @@ -221,15 +214,13 @@ static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, return 0; } -static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, - u32 cmd) +static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd) { if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) spin_unlock_bh(&p_hwfn->mcp_info->lock); } -int qed_mcp_reset(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; u8 delay = CHIP_MCP_RESP_ITER_US; @@ -326,7 +317,8 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); } else { /* FW BUG! */ - DP_ERR(p_hwfn, "MFW failed to respond!\n"); + DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n", + cmd, param); *o_mcp_resp = 0; rc = -EAGAIN; } @@ -342,7 +334,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; } @@ -399,8 +391,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, } int qed_mcp_load_req(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *p_load_code) + struct qed_ptt *p_ptt, u32 *p_load_code) { struct qed_dev *cdev = p_hwfn->cdev; struct qed_mcp_mb_params mb_params; @@ -527,8 +518,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, - transceiver_data))); + offsetof(struct public_port, transceiver_data))); transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE); @@ -540,8 +530,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, } static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool b_reset) + struct qed_ptt *p_ptt, bool b_reset) { struct qed_mcp_link_state *p_link; u8 max_bw, min_bw; @@ -557,8 +546,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, "Received link update [0x%08x] from mfw [Addr 0x%x]\n", status, (u32)(p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, - link_status))); + offsetof(struct public_port, link_status))); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link indications\n"); @@ -635,6 +623,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? QED_LINK_PARTNER_SPEED_20G : 0; p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_25G : 0; + p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? QED_LINK_PARTNER_SPEED_40G : 0; p_link->partner_adv_speed |= @@ -722,6 +713,48 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) return 0; } +static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum MFW_DRV_MSG_TYPE type) +{ + enum qed_mcp_protocol_type stats_type; + union qed_mcp_protocol_stats stats; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + u32 hsi_param; + + switch (type) { + case MFW_DRV_MSG_GET_LAN_STATS: + stats_type = QED_MCP_LAN_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; + break; + case MFW_DRV_MSG_GET_FCOE_STATS: + stats_type = QED_MCP_FCOE_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; + break; + case MFW_DRV_MSG_GET_ISCSI_STATS: + stats_type = QED_MCP_ISCSI_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; + break; + case MFW_DRV_MSG_GET_RDMA_STATS: + stats_type = QED_MCP_RDMA_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; + break; + default: + DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type); + return; + } + + qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats); + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_STATS; + mb_params.param = hsi_param; + memcpy(&union_data, &stats, sizeof(stats)); + mb_params.p_data_src = &union_data; + qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); +} + static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, struct public_func *p_shmem_info) { @@ -752,8 +785,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct public_func *p_data, - int pfid) + struct public_func *p_data, int pfid) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_FUNC); @@ -763,51 +795,20 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, memset(p_data, 0, sizeof(*p_data)); - size = min_t(u32, sizeof(*p_data), - QED_SECTION_SIZE(mfw_path_offsize)); + size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); for (i = 0; i < size / sizeof(u32); i++) ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, func_addr + (i << 2)); return size; } -int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *p_pf) -{ - struct public_func shmem_info; - int i; - - /* Find first Ethernet interface in port */ - for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev); - i += p_hwfn->cdev->num_ports_in_engines) { - qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID_BY_REL(p_hwfn, i)); - - if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) - continue; - - if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) == - FUNC_MF_CFG_PROTOCOL_ETHERNET) { - *p_pf = (u8)i; - return 0; - } - } - - DP_NOTICE(p_hwfn, - "Failed to find on port an ethernet interface in MF_SI mode\n"); - - return -EINVAL; -} - -static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_function_info *p_info; struct public_func shmem_info; u32 resp = 0, param = 0; - qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID(p_hwfn)); + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); qed_read_pf_bandwidth(p_hwfn, &shmem_info); @@ -867,6 +868,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_GET_LAN_STATS: + case MFW_DRV_MSG_GET_FCOE_STATS: + case MFW_DRV_MSG_GET_ISCSI_STATS: + case MFW_DRV_MSG_GET_RDMA_STATS: + qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i); + break; case MFW_DRV_MSG_BW_UPDATE: qed_mcp_update_bw(p_hwfn, p_ptt); break; @@ -940,8 +947,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_get_media_type(struct qed_dev *cdev, - u32 *p_media_type) +int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) { struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_ptt *p_ptt; @@ -950,7 +956,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, return -EINVAL; if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; } @@ -1003,15 +1009,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, struct qed_mcp_function_info *info; struct public_func shmem_info; - qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID(p_hwfn)); + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); info = &p_hwfn->mcp_info->func_info; info->pause_on_host = (shmem_info.config & FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; - if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, - &info->protocol)) { + if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) { DP_ERR(p_hwfn, "Unknown personality %08x\n", (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); return -EINVAL; @@ -1072,15 +1076,13 @@ struct qed_mcp_link_capabilities return &p_hwfn->mcp_info->link_capabilities; } -int qed_mcp_drain(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NIG_DRAIN, 1000, - &resp, ¶m); + DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); /* Wait for the drain to complete before returning */ msleep(1020); @@ -1089,8 +1091,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn, } int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *p_flash_size) + struct qed_ptt *p_ptt, u32 *p_flash_size) { u32 flash_size; @@ -1168,8 +1169,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, return rc; } -int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - enum qed_led_mode mode) +int qed_mcp_set_led(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_led_mode mode) { u32 resp = 0, param = 0, drv_mb_param; int rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 7f319aa1b229..c6372fa574b7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -60,9 +60,10 @@ struct qed_mcp_link_state { #define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) #define QED_LINK_PARTNER_SPEED_10G BIT(2) #define QED_LINK_PARTNER_SPEED_20G BIT(3) -#define QED_LINK_PARTNER_SPEED_40G BIT(4) -#define QED_LINK_PARTNER_SPEED_50G BIT(5) -#define QED_LINK_PARTNER_SPEED_100G BIT(6) +#define QED_LINK_PARTNER_SPEED_25G BIT(4) +#define QED_LINK_PARTNER_SPEED_40G BIT(5) +#define QED_LINK_PARTNER_SPEED_50G BIT(6) +#define QED_LINK_PARTNER_SPEED_100G BIT(7) u32 partner_adv_speed; bool partner_tx_flow_ctrl_en; @@ -105,6 +106,47 @@ struct qed_mcp_drv_version { u8 name[MCP_DRV_VER_STR_SIZE - 4]; }; +struct qed_mcp_lan_stats { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; +}; + +struct qed_mcp_fcoe_stats { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct qed_mcp_iscsi_stats { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct qed_mcp_rdma_stats { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_byts; +}; + +enum qed_mcp_protocol_type { + QED_MCP_LAN_STATS, + QED_MCP_FCOE_STATS, + QED_MCP_ISCSI_STATS, + QED_MCP_RDMA_STATS +}; + +union qed_mcp_protocol_stats { + struct qed_mcp_lan_stats lan_stats; + struct qed_mcp_fcoe_stats fcoe_stats; + struct qed_mcp_iscsi_stats iscsi_stats; + struct qed_mcp_rdma_stats rdma_stats; +}; + /** * @brief - returns the link params of the hw function * @@ -458,6 +500,4 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, u8 min_bw); -int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *p_pf); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f6b86ca1ff79..b49d47f3de71 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -116,8 +116,14 @@ 0x1009c4UL #define QM_REG_PF_EN \ 0x2f2ea4UL +#define TCFC_REG_WEAK_ENABLE_VF \ + 0x2d0704UL #define TCFC_REG_STRONG_ENABLE_PF \ 0x2d0708UL +#define TCFC_REG_STRONG_ENABLE_VF \ + 0x2d070cUL +#define CCFC_REG_WEAK_ENABLE_VF \ + 0x2e0704UL #define CCFC_REG_STRONG_ENABLE_PF \ 0x2e0708UL #define PGLUE_B_REG_PGL_ADDR_88_F0 \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index a52f3fc051f5..2888eb0628f8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -25,9 +25,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, - u8 cmd, - u8 protocol, - struct qed_sp_init_data *p_data) + u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) { u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; struct qed_spq_entry *p_ent = NULL; @@ -38,7 +36,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, rc = qed_spq_get_entry(p_hwfn, pp_ent); - if (rc != 0) + if (rc) return rc; p_ent = *pp_ent; @@ -321,8 +319,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, - PROTOCOLID_COMMON, - &init_data); + PROTOCOLID_COMMON, &init_data); if (rc) return rc; @@ -356,8 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); - qed_tunn_set_pf_start_params(p_hwfn, p_tunn, - &p_ramrod->tunnel_config); + qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (IS_MF_SI(p_hwfn)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; @@ -389,8 +385,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", - sb, sb_index, - p_ramrod->outer_tag); + sb, sb_index, p_ramrod->outer_tag); rc = qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index d73456eab1d7..0265a32c8681 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -41,8 +41,7 @@ ***************************************************************************/ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, void *cookie, - union event_ring_data *data, - u8 fw_return_code) + union event_ring_data *data, u8 fw_return_code) { struct qed_spq_comp_done *comp_done; @@ -109,9 +108,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, /*************************************************************************** * SPQ entries inner API ***************************************************************************/ -static int -qed_spq_fill_entry(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent) +static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) { p_ent->flags = 0; @@ -189,8 +187,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, } static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, - struct qed_spq *p_spq, - struct qed_spq_entry *p_ent) + struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { struct qed_chain *p_chain = &p_hwfn->p_spq->chain; u16 echo = qed_chain_get_prod_idx(p_chain); @@ -255,8 +252,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, /*************************************************************************** * EQ API ***************************************************************************/ -void qed_eq_prod_update(struct qed_hwfn *p_hwfn, - u16 prod) +void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) { u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); @@ -267,9 +263,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, mmiowb(); } -int qed_eq_completion(struct qed_hwfn *p_hwfn, - void *cookie) - +int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) { struct qed_eq *p_eq = cookie; struct qed_chain *p_chain = &p_eq->chain; @@ -323,8 +317,7 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, return rc; } -struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, - u16 num_elem) +struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) { struct qed_eq *p_eq; @@ -348,11 +341,8 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, } /* register EQ completion on the SP SB */ - qed_int_register_cb(p_hwfn, - qed_eq_completion, - p_eq, - &p_eq->eq_sb_index, - &p_eq->p_fw_cons); + qed_int_register_cb(p_hwfn, qed_eq_completion, + p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); return p_eq; @@ -361,14 +351,12 @@ eq_allocate_fail: return NULL; } -void qed_eq_setup(struct qed_hwfn *p_hwfn, - struct qed_eq *p_eq) +void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq) { qed_chain_reset(&p_eq->chain); } -void qed_eq_free(struct qed_hwfn *p_hwfn, - struct qed_eq *p_eq) +void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq) { if (!p_eq) return; @@ -379,10 +367,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn, /*************************************************************************** * CQE API - manipulate EQ functionality ***************************************************************************/ -static int qed_cqe_completion( - struct qed_hwfn *p_hwfn, - struct eth_slow_path_rx_cqe *cqe, - enum protocol_type protocol) +static int qed_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe, + enum protocol_type protocol) { if (IS_VF(p_hwfn->cdev)) return 0; @@ -463,8 +450,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) u32 capacity; /* SPQ struct */ - p_spq = - kzalloc(sizeof(struct qed_spq), GFP_KERNEL); + p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); if (!p_spq) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n"); return -ENOMEM; @@ -525,9 +511,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn) kfree(p_spq); } -int -qed_spq_get_entry(struct qed_hwfn *p_hwfn, - struct qed_spq_entry **pp_ent) +int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; @@ -538,14 +522,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn, if (list_empty(&p_spq->free_pool)) { p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC); if (!p_ent) { + DP_NOTICE(p_hwfn, + "Failed to allocate an SPQ entry for a pending ramrod\n"); rc = -ENOMEM; goto out_unlock; } p_ent->queue = &p_spq->unlimited_pending; } else { p_ent = list_first_entry(&p_spq->free_pool, - struct qed_spq_entry, - list); + struct qed_spq_entry, list); list_del(&p_ent->list); p_ent->queue = &p_spq->pending; } @@ -564,8 +549,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); } -void qed_spq_return_entry(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent) +void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { spin_lock_bh(&p_hwfn->p_spq->lock); __qed_spq_return_entry(p_hwfn, p_ent); @@ -586,10 +570,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, * * @return int */ -static int -qed_spq_add_entry(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent, - enum spq_priority priority) +static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + enum spq_priority priority) { struct qed_spq *p_spq = p_hwfn->p_spq; @@ -604,8 +587,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_en2; p_en2 = list_first_entry(&p_spq->free_pool, - struct qed_spq_entry, - list); + struct qed_spq_entry, list); list_del(&p_en2->list); /* Copy the ring element physical pointer to the new @@ -655,8 +637,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) * Posting new Ramrods ***************************************************************************/ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, - struct list_head *head, - u32 keep_reserve) + struct list_head *head, u32 keep_reserve) { struct qed_spq *p_spq = p_hwfn->p_spq; int rc; @@ -690,8 +671,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) break; p_ent = list_first_entry(&p_spq->unlimited_pending, - struct qed_spq_entry, - list); + struct qed_spq_entry, list); if (!p_ent) return -EINVAL; @@ -705,8 +685,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) } int qed_spq_post(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent, - u8 *fw_return_code) + struct qed_spq_entry *p_ent, u8 *fw_return_code) { int rc = 0; struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; @@ -803,8 +782,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, return -EINVAL; spin_lock_bh(&p_spq->lock); - list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, - list) { + list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; @@ -846,15 +824,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, if (!found) { DP_NOTICE(p_hwfn, - "Failed to find an entry this EQE completes\n"); + "Failed to find an entry this EQE [echo %04x] completes\n", + le16_to_cpu(echo)); return -EEXIST; } - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n", + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Complete EQE [echo %04x]: func %p cookie %p)\n", + le16_to_cpu(echo), p_ent->comp_cb.function, p_ent->comp_cb.cookie); if (found->comp_cb.function) found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, fw_return_code); + else + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Got a completion without a callback function\n"); if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || (found->queue == &p_spq->unlimited_pending)) @@ -901,14 +886,12 @@ consq_allocate_fail: return NULL; } -void qed_consq_setup(struct qed_hwfn *p_hwfn, - struct qed_consq *p_consq) +void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq) { qed_chain_reset(&p_consq->chain); } -void qed_consq_free(struct qed_hwfn *p_hwfn, - struct qed_consq *p_consq) +void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq) { if (!p_consq) return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 15399da268d9..cb68674640f9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -60,7 +60,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) } fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; - if (fp_minor > ETH_HSI_VER_MINOR) { + if (fp_minor > ETH_HSI_VER_MINOR && + fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", @@ -699,7 +700,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, &qzone_id); reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; - val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; + val = enable ? (vf->abs_vf_id | BIT(8)) : 0; qed_wr(p_hwfn, p_ptt, reg_addr, val); } } @@ -1090,13 +1091,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, /* Prepare response for all extended tlvs if they are found by PF */ for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { - if (!(tlvs_mask & (1 << i))) + if (!(tlvs_mask & BIT(i))) continue; resp = qed_add_tlv(p_hwfn, &p_mbx->offset, qed_iov_vport_to_tlv(p_hwfn, i), size); - if (tlvs_accepted & (1 << i)) + if (tlvs_accepted & BIT(i)) resp->hdr.status = status; else resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; @@ -1241,6 +1242,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_req->num_vlan_filters, p_resp->num_vlan_filters, p_req->num_mc_filters, p_resp->num_mc_filters); + + /* Some legacy OSes are incapable of correctly handling this + * failure. + */ + if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) && + (p_vf->acquire.vfdev_info.os_type == + VFPF_ACQUIRE_OS_WINDOWS)) + return PFVF_STATUS_SUCCESS; + return PFVF_STATUS_NO_RESOURCE; } @@ -1280,22 +1291,42 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, memset(resp, 0, sizeof(*resp)); + /* Write the PF version so that VF would know which version + * is supported - might be later overriden. This guarantees that + * VF could recognize legacy PF based on lack of versions in reply. + */ + pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; + pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; + + if (vf->state != VF_FREE && vf->state != VF_STOPPED) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", + vf->abs_vf_id, vf->state); + goto out; + } + /* Validate FW compatibility */ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { - DP_INFO(p_hwfn, - "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", - vf->abs_vf_id, - req->vfdev_info.eth_fp_hsi_major, - req->vfdev_info.eth_fp_hsi_minor, - ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); - - /* Write the PF version so that VF would know which version - * is supported. - */ - pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; - pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) { + struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; - goto out; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] is pre-fastpath HSI\n", + vf->abs_vf_id); + p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; + p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; + } else { + DP_INFO(p_hwfn, + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", + vf->abs_vf_id, + req->vfdev_info.eth_fp_hsi_major, + req->vfdev_info.eth_fp_hsi_minor, + ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + + goto out; + } } /* On 100g PFs, prevent old VFs from loading */ @@ -1334,8 +1365,11 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, pfdev_info->fw_minor = FW_MINOR_VERSION; pfdev_info->fw_rev = FW_REVISION_VERSION; pfdev_info->fw_eng = FW_ENGINEERING_VERSION; - pfdev_info->minor_fp_hsi = min_t(u8, - ETH_HSI_VER_MINOR, + + /* Incorrect when legacy, but doesn't matter as legacy isn't reading + * this field. + */ + pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, req->vfdev_info.eth_fp_hsi_minor); pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); @@ -1438,14 +1472,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, filter.type = QED_FILTER_VLAN; filter.vlan = p_vf->shadow_config.vlans[i].vid; - DP_VERBOSE(p_hwfn, - QED_MSG_IOV, + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", filter.vlan, p_vf->relative_vf_id); - rc = qed_sp_eth_filter_ucast(p_hwfn, - p_vf->opaque_fid, - &filter, - QED_SPQ_MODE_CB, NULL); + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to configure VLAN [%04x] to VF [%04x]\n", @@ -1463,7 +1494,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, { int rc = 0; - if ((events & (1 << VLAN_ADDR_FORCED)) && + if ((events & BIT(VLAN_ADDR_FORCED)) && !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); @@ -1479,7 +1510,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, if (!p_vf->vport_instance) return -EINVAL; - if (events & (1 << MAC_ADDR_FORCED)) { + if (events & BIT(MAC_ADDR_FORCED)) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1502,7 +1533,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, p_vf->configured_features |= 1 << MAC_ADDR_FORCED; } - if (events & (1 << VLAN_ADDR_FORCED)) { + if (events & BIT(VLAN_ADDR_FORCED)) { struct qed_sp_vport_update_params vport_update; u8 removal; int i; @@ -1572,7 +1603,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, if (filter.vlan) p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; else - p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED); + p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); } /* If forced features are terminated, we need to configure the shadow @@ -1619,8 +1650,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, qed_int_cau_conf_sb(p_hwfn, p_ptt, start->sb_addr[sb_id], - vf->igu_sbs[sb_id], - vf->abs_vf_id, 1); + vf->igu_sbs[sb_id], vf->abs_vf_id, 1); } qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); @@ -1632,7 +1662,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, * vfs that would still be fine, since they passed '0' as padding]. */ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; - if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { + if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { u8 vf_req = start->only_untagged; vf_info->bulletin.p_virt->default_only_untagged = vf_req; @@ -1650,9 +1680,10 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, params.vport_id = vf->vport_id; params.max_buffers_per_cqe = start->max_buffers_per_cqe; params.mtu = vf->mtu; + params.check_mac = true; rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); - if (rc != 0) { + if (rc) { DP_ERR(p_hwfn, "qed_iov_vf_mbx_start_vport returned error %d\n", rc); status = PFVF_STATUS_FAILURE; @@ -1679,7 +1710,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, vf->spoof_chk = false; rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); - if (rc != 0) { + if (rc) { DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", rc); status = PFVF_STATUS_FAILURE; @@ -1695,21 +1726,32 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_vf_info *vf, u8 status) + struct qed_vf_info *vf, + u8 status, bool b_legacy) { struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct pfvf_start_queue_resp_tlv *p_tlv; struct vfpf_start_rxq_tlv *req; + u16 length; mbx->offset = (u8 *)mbx->reply_virt; + /* Taking a bigger struct instead of adding a TLV to list was a + * mistake, but one which we're now stuck with, as some older + * clients assume the size of the previous response. + */ + if (!b_legacy) + length = sizeof(*p_tlv); + else + length = sizeof(struct pfvf_def_resp_tlv); + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, - sizeof(*p_tlv)); + length); qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ - if (status == PFVF_STATUS_SUCCESS) { + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { req = &mbx->req_virt->start_rxq; p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + offsetof(struct mstorm_vf_zone, @@ -1717,7 +1759,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, sizeof(struct eth_rx_prod_data) * req->rx_qid; } - qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); + qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, @@ -1728,6 +1770,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; struct vfpf_start_rxq_tlv *req; + bool b_legacy_vf = false; int rc; memset(¶ms, 0, sizeof(params)); @@ -1743,13 +1786,27 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, params.sb = req->hw_sb; params.sb_idx = req->sb_index; + /* Legacy VFs have their Producers in a different location, which they + * calculate on their own and clean the producer prior to this. + */ + if (vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) { + b_legacy_vf = true; + } else { + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), + 0); + } + rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, vf->vf_queues[req->rx_qid].fw_cid, ¶ms, vf->abs_vf_id + 0x10, req->bd_max_bytes, req->rxq_addr, - req->cqe_pbl_addr, req->cqe_pbl_size); + req->cqe_pbl_addr, req->cqe_pbl_size, + b_legacy_vf); if (rc) { status = PFVF_STATUS_FAILURE; @@ -1760,7 +1817,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, } out: - qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); + qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); } static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, @@ -1769,23 +1826,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, { struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct pfvf_start_queue_resp_tlv *p_tlv; + bool b_legacy = false; + u16 length; mbx->offset = (u8 *)mbx->reply_virt; + /* Taking a bigger struct instead of adding a TLV to list was a + * mistake, but one which we're now stuck with, as some older + * clients assume the size of the previous response. + */ + if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) + b_legacy = true; + + if (!b_legacy) + length = sizeof(*p_tlv); + else + length = sizeof(struct pfvf_def_resp_tlv); + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, - sizeof(*p_tlv)); + length); qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ - if (status == PFVF_STATUS_SUCCESS) { + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { u16 qid = mbx->req_virt->start_txq.tx_qid; p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid, DQ_DEMS_LEGACY); } - qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status); + qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); } static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, @@ -2045,7 +2117,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; /* Ignore the VF request if we're forcing a vlan */ - if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) { + if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { p_data->update_inner_vlan_removal_flg = 1; p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; } @@ -2340,7 +2412,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, /* In forced mode, we're willing to remove entries - but we don't add * new ones. */ - if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)) + if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) return 0; if (p_params->opcode == QED_FILTER_ADD || @@ -2374,7 +2446,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, int i; /* If we're in forced-mode, we don't allow any change */ - if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) + if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) return 0; /* First remove entries and then add new ones */ @@ -2509,7 +2581,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, } /* Determine if the unicast filtering is acceptible by PF */ - if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && + if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && (params.type == QED_FILTER_VLAN || params.type == QED_FILTER_MAC_VLAN)) { /* Once VLAN is forced or PVID is set, do not allow @@ -2521,7 +2593,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, goto out; } - if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) && + if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && (params.type == QED_FILTER_MAC || params.type == QED_FILTER_MAC_VLAN)) { if (!ether_addr_equal(p_bulletin->mac, params.mac) || @@ -2749,7 +2821,7 @@ cleanup: /* Mark VF for ack and clean pending state */ if (p_vf->state == VF_RESET) p_vf->state = VF_STOPPED; - ack_vfs[vfid / 32] |= (1 << (vfid % 32)); + ack_vfs[vfid / 32] |= BIT((vfid % 32)); p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= ~(1ULL << (rel_vf_id % 64)); p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= @@ -2805,7 +2877,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) continue; vfid = p_vf->abs_vf_id; - if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { + if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; u16 rel_vf_id = p_vf->relative_vf_id; @@ -3064,8 +3136,7 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, vf_info->bulletin.p_virt->valid_bitmap |= feature; /* Forced MAC will disable MAC_ADDR */ - vf_info->bulletin.p_virt->valid_bitmap &= - ~(1 << VFPF_BULLETIN_MAC_ADDR); + vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } @@ -3163,7 +3234,7 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, if (!p_vf || !p_vf->bulletin.p_virt) return NULL; - if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))) + if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) return NULL; return p_vf->bulletin.p_virt->mac; @@ -3177,7 +3248,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) if (!p_vf || !p_vf->bulletin.p_virt) return 0; - if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))) + if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) return 0; return p_vf->bulletin.p_virt->pvid; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 9b780b31b15c..3c9071de5472 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -46,6 +46,17 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) return p_tlv; } +static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) +{ + union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF request status = 0x%x, PF reply status = 0x%x\n", + req_status, resp->default_resp.hdr.status); + + mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); +} + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; @@ -103,16 +114,12 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) "VF <-- PF Timeout [Type %d]\n", p_req->first_tlv.tl.type); rc = -EBUSY; - goto exit; } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "PF response: %d [Type %d]\n", *done, p_req->first_tlv.tl.type); } -exit: - mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); - return rc; } @@ -191,6 +198,9 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) DP_VERBOSE(p_hwfn, QED_MSG_IOV, "attempting to acquire resources\n"); + /* Clear response buffer, as this might be a re-send */ + memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + /* send acquire request */ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) @@ -205,9 +215,12 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) /* PF agrees to allocate our resources */ if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { - DP_INFO(p_hwfn, - "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n"); - return -EINVAL; + /* It's possible legacy PF mistakenly accepted; + * but we don't care - simply mark it as + * legacy and continue. + */ + req->vfdev_info.capabilities |= + VFPF_ACQUIRE_CAP_PRE_FP_HSI; } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); resources_acquired = true; @@ -215,27 +228,55 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) attempts < VF_ACQUIRE_THRESH) { qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, &resp->resc); + } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { + if (pfdev_info->major_fp_hsi && + (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { + DP_NOTICE(p_hwfn, + "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", + pfdev_info->major_fp_hsi, + pfdev_info->minor_fp_hsi, + ETH_HSI_VER_MAJOR, + ETH_HSI_VER_MINOR, + pfdev_info->major_fp_hsi); + rc = -EINVAL; + goto exit; + } - /* Clear response buffer */ - memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); - } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) && - pfdev_info->major_fp_hsi && - (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { - DP_NOTICE(p_hwfn, - "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", - pfdev_info->major_fp_hsi, - pfdev_info->minor_fp_hsi, - ETH_HSI_VER_MAJOR, - ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi); - return -EINVAL; + if (!pfdev_info->major_fp_hsi) { + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) { + DP_NOTICE(p_hwfn, + "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); + rc = -EINVAL; + goto exit; + } else { + DP_INFO(p_hwfn, + "PF is old - try re-acquire to see if it supports FW-version override\n"); + req->vfdev_info.capabilities |= + VFPF_ACQUIRE_CAP_PRE_FP_HSI; + continue; + } + } + + /* If PF/VF are using same Major, PF must have had + * it's reasons. Simply fail. + */ + DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); + rc = -EINVAL; + goto exit; } else { DP_ERR(p_hwfn, "PF returned error %d to VF acquisition request\n", resp->hdr.status); - return -EAGAIN; + rc = -EAGAIN; + goto exit; } } + /* Mark the PF as legacy, if needed */ + if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) + p_iov->b_pre_fp_hsi = true; + /* Update bulletin board size with response from PF */ p_iov->bulletin.size = resp->bulletin_size; @@ -253,14 +294,18 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) } } - if (ETH_HSI_VER_MINOR && + if (!p_iov->b_pre_fp_hsi && + ETH_HSI_VER_MINOR && (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { DP_INFO(p_hwfn, "PF is using older fastpath HSI; %02x.%02x is configured\n", ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); } - return 0; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; } int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) @@ -347,6 +392,9 @@ free_p_iov: return -ENOMEM; } +#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A +#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, u8 rx_qid, @@ -374,6 +422,21 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; + /* If PF is legacy, we'll need to calculate producers ourselves + * as well as clean them. + */ + if (pp_prod && p_iov->b_pre_fp_hsi) { + u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; + u32 init_prod_val = 0; + + *pp_prod = (u8 __iomem *)p_hwfn->regview + + MSTORM_QZONE_START(p_hwfn->cdev) + + hw_qid * MSTORM_QZONE_SIZE; + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), + (u32 *)(&init_prod_val)); + } /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -381,13 +444,15 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, resp = &p_iov->pf2vf_reply->queue_start; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } /* Learn the address of the producer from the response */ - if (pp_prod) { + if (pp_prod && !p_iov->b_pre_fp_hsi) { u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; @@ -399,6 +464,8 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)&init_prod_val); } +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -424,10 +491,15 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -470,13 +542,27 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, } if (pp_doorbell) { - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; + /* Modern PFs provide the actual offsets, while legacy + * provided only the queue id. + */ + if (!p_iov->b_pre_fp_hsi) { + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + resp->offset; + } else { + u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; + u32 db_addr; + + db_addr = qed_db_addr(cid, DQ_DEMS_LEGACY); + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + db_addr; + } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", tx_queue_id, *pp_doorbell, resp->offset); } exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -501,10 +587,15 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -543,10 +634,15 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -567,10 +663,15 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -770,13 +871,18 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); +exit: + qed_vf_pf_req_end(p_hwfn, rc); + return rc; } @@ -797,14 +903,19 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EAGAIN; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EAGAIN; + goto exit; + } p_hwfn->b_int_enabled = 0; - return 0; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; } int qed_vf_pf_release(struct qed_hwfn *p_hwfn) @@ -828,6 +939,8 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn) if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) rc = -EAGAIN; + qed_vf_pf_req_end(p_hwfn, rc); + p_hwfn->b_int_enabled = 0; if (p_iov->vf2pf_request) @@ -896,12 +1009,17 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EAGAIN; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EAGAIN; + goto exit; + } - return 0; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; } int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) @@ -920,12 +1038,17 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; +exit: + qed_vf_pf_req_end(p_hwfn, rc); - return 0; + return rc; } u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index b23ce58e932f..35db7a28aa13 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -86,7 +86,7 @@ struct vfpf_acquire_tlv { struct vfpf_first_tlv first_tlv; struct vf_pf_vfdev_info { -#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0) +#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ u64 capabilities; u8 fw_major; @@ -551,6 +551,11 @@ struct qed_vf_iov { /* we set aside a copy of the acquire response */ struct pfvf_acquire_resp_tlv acquire_resp; + + /* In case PF originates prior to the fp-hsi version comparison, + * this has to be propagated as it affects the fastpath. + */ + bool b_pre_fp_hsi; }; #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 02b06d4e40ae..e01adce4a966 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -25,7 +25,7 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 10 -#define QEDE_REVISION_VERSION 1 +#define QEDE_REVISION_VERSION 9 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ @@ -36,6 +36,8 @@ struct qede_stats { u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; u64 rx_ucast_bytes; u64 rx_mcast_bytes; u64 rx_bcast_bytes; @@ -124,16 +126,22 @@ struct qede_dev { (edev)->dev_info.num_tc) struct qede_fastpath *fp_array; - u16 req_rss; - u16 num_rss; + u8 req_num_tx; + u8 fp_num_tx; + u8 req_num_rx; + u8 fp_num_rx; + u16 req_queues; + u16 num_queues; u8 num_tc; -#define QEDE_RSS_CNT(edev) ((edev)->num_rss) -#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \ - (edev)->num_tc) -#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss) -#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss) +#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) +#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) +#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \ + (edev)->num_tc) +#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \ + QEDE_TSS_COUNT(edev)) +#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev)) #define QEDE_TX_QUEUE(edev, txqidx) \ - (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \ + (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\ (edev), (txqidx))]) struct qed_int_info int_info; @@ -235,6 +243,7 @@ struct qede_rx_queue { u16 num_rx_buffers; u16 rxq_id; + u64 rcv_pkts; u64 rx_hw_errors; u64 rx_alloc_errors; u64 rx_ip_frags; @@ -263,6 +272,10 @@ struct qede_tx_queue { union db_prod tx_db; u16 num_tx_buffers; + u64 xmit_pkts; + u64 stopped_cnt; + + bool is_legacy; }; #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ @@ -277,7 +290,11 @@ struct qede_tx_queue { struct qede_fastpath { struct qede_dev *edev; - u8 rss_id; +#define QEDE_FASTPATH_TX BIT(0) +#define QEDE_FASTPATH_RX BIT(1) +#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) + u8 type; + u8 id; struct napi_struct napi; struct qed_sb_info *sb_info; struct qede_rx_queue *rxq; @@ -337,6 +354,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, #define QEDE_MIN_PKT_LEN 64 #define QEDE_RX_HDR_SIZE 256 -#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) +#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) #endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f8492cac9290..4d45945bc34c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -35,6 +35,7 @@ static const struct { u64 offset; char string[ETH_GSTRING_LEN]; } qede_rqstats_arr[] = { + QEDE_RQSTAT(rcv_pkts), QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_alloc_errors), QEDE_RQSTAT(rx_ip_frags), @@ -44,6 +45,24 @@ static const struct { #define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ qede_rqstats_arr[(sindex)].offset))) +#define QEDE_TQSTAT_OFFSET(stat_name) \ + (offsetof(struct qede_tx_queue, stat_name)) +#define QEDE_TQSTAT_STRING(stat_name) (#stat_name) +#define QEDE_TQSTAT(stat_name) \ + {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)} +#define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr) +static const struct { + u64 offset; + char string[ETH_GSTRING_LEN]; +} qede_tqstats_arr[] = { + QEDE_TQSTAT(xmit_pkts), + QEDE_TQSTAT(stopped_cnt), +}; + +#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \ + (*((u64 *)(((u64)(&dev->fp_array[tssid].txqs[tcid])) +\ + qede_tqstats_arr[(sindex)].offset))) + static const struct { u64 offset; char string[ETH_GSTRING_LEN]; @@ -107,6 +126,8 @@ static const struct { QEDE_PF_STAT(mftag_filter_discards), QEDE_PF_STAT(mac_filter_discards), QEDE_STAT(tx_err_drop_pkts), + QEDE_STAT(ttl0_discard), + QEDE_STAT(packet_too_big_discard), QEDE_STAT(coalesced_pkts), QEDE_STAT(coalesced_events), @@ -151,17 +172,29 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { int i, j, k; + for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { + int tc; + + for (j = 0; j < QEDE_NUM_RQSTATS; j++) + sprintf(buf + (k + j) * ETH_GSTRING_LEN, + "%d: %s", i, qede_rqstats_arr[j].string); + k += QEDE_NUM_RQSTATS; + for (tc = 0; tc < edev->num_tc; tc++) { + for (j = 0; j < QEDE_NUM_TQSTATS; j++) + sprintf(buf + (k + j) * ETH_GSTRING_LEN, + "%d.%d: %s", i, tc, + qede_tqstats_arr[j].string); + k += QEDE_NUM_TQSTATS; + } + } + for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { if (IS_VF(edev) && qede_stats_arr[i].pf_only) continue; - strcpy(buf + j * ETH_GSTRING_LEN, + strcpy(buf + (k + j) * ETH_GSTRING_LEN, qede_stats_arr[i].string); j++; } - - for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++) - strcpy(buf + j * ETH_GSTRING_LEN, - qede_rqstats_arr[k].string); } static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -197,19 +230,30 @@ static void qede_get_ethtool_stats(struct net_device *dev, mutex_lock(&edev->qede_lock); + for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) { + int tc; + + if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) { + for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) + buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid); + } + + if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++) + buf[cnt++] = QEDE_TQSTATS_DATA(edev, + sidx, + qid, tc); + } + } + } + for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) continue; buf[cnt++] = QEDE_STATS_DATA(edev, sidx); } - for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { - buf[cnt] = 0; - for (qid = 0; qid < edev->num_rss; qid++) - buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid); - cnt++; - } - mutex_unlock(&edev->qede_lock); } @@ -227,7 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) if (qede_stats_arr[i].pf_only) num_stats--; } - return num_stats + QEDE_NUM_RQSTATS; + return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS + + QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc; case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; case ETH_SS_TEST: @@ -249,78 +294,150 @@ static u32 qede_get_priv_flags(struct net_device *dev) return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; } -static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +struct qede_link_mode_mapping { + u32 qed_link_mode; + u32 ethtool_link_mode; +}; + +static const struct qede_link_mode_mapping qed_lm_map[] = { + {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, + {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, + {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, + {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, + {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT}, + {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, + {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, + {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, + {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, + {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, + {QED_LM_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, +}; + +#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ +{ \ + int i; \ + \ + for (i = 0; i < QED_LM_COUNT; i++) { \ + if ((caps) & (qed_lm_map[i].qed_link_mode)) \ + __set_bit(qed_lm_map[i].ethtool_link_mode,\ + lk_ksettings->link_modes.name); \ + } \ +} + +#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \ +{ \ + int i; \ + \ + for (i = 0; i < QED_LM_COUNT; i++) { \ + if (test_bit(qed_lm_map[i].ethtool_link_mode, \ + lk_ksettings->link_modes.name)) \ + caps |= qed_lm_map[i].qed_link_mode; \ + } \ +} + +static int qede_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { + struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; memset(¤t_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, ¤t_link); - cmd->supported = current_link.supported_caps; - cmd->advertising = current_link.advertised_caps; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported) + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising) + + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising) + if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { - ethtool_cmd_speed_set(cmd, current_link.speed); - cmd->duplex = current_link.duplex; + base->speed = current_link.speed; + base->duplex = current_link.duplex; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; } - cmd->port = current_link.port; - cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : - AUTONEG_DISABLE; - cmd->lp_advertising = current_link.lp_caps; + base->port = current_link.port; + base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; return 0; } -static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int qede_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { + const struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params params; - u32 speed; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { - DP_INFO(edev, - "Link settings are not allowed to be changed\n"); + DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } - memset(¤t_link, 0, sizeof(current_link)); memset(¶ms, 0, sizeof(params)); edev->ops->common->get_link(edev->cdev, ¤t_link); - speed = ethtool_cmd_speed(cmd); params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (base->autoneg == AUTONEG_ENABLE) { params.autoneg = true; params.forced_speed = 0; - params.adv_speeds = cmd->advertising; - } else { /* forced speed */ + QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising) + } else { /* forced speed */ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; params.autoneg = false; - params.forced_speed = speed; - switch (speed) { + params.forced_speed = base->speed; + switch (base->speed) { case SPEED_10000: if (!(current_link.supported_caps & - SUPPORTED_10000baseKR_Full)) { + QED_LM_10000baseKR_Full_BIT)) { DP_INFO(edev, "10G speed not supported\n"); return -EINVAL; } - params.adv_speeds = SUPPORTED_10000baseKR_Full; + params.adv_speeds = QED_LM_10000baseKR_Full_BIT; + break; + case SPEED_25000: + if (!(current_link.supported_caps & + QED_LM_25000baseKR_Full_BIT)) { + DP_INFO(edev, "25G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_25000baseKR_Full_BIT; break; case SPEED_40000: if (!(current_link.supported_caps & - SUPPORTED_40000baseLR4_Full)) { + QED_LM_40000baseLR4_Full_BIT)) { DP_INFO(edev, "40G speed not supported\n"); return -EINVAL; } - params.adv_speeds = SUPPORTED_40000baseLR4_Full; + params.adv_speeds = QED_LM_40000baseLR4_Full_BIT; + break; + case SPEED_50000: + if (!(current_link.supported_caps & + QED_LM_50000baseKR2_Full_BIT)) { + DP_INFO(edev, "50G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_50000baseKR2_Full_BIT; + break; + case SPEED_100000: + if (!(current_link.supported_caps & + QED_LM_100000baseKR4_Full_BIT)) { + DP_INFO(edev, "100G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_100000baseKR4_Full_BIT; break; default: - DP_INFO(edev, "Unsupported speed %u\n", speed); + DP_INFO(edev, "Unsupported speed %u\n", base->speed); return -EINVAL; } } @@ -368,8 +485,7 @@ static u32 qede_get_msglevel(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); - return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | - edev->dp_module; + return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module; } static void qede_set_msglevel(struct net_device *ndev, u32 level) @@ -393,8 +509,7 @@ static int qede_nway_reset(struct net_device *dev) struct qed_link_params link_params; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { - DP_INFO(edev, - "Link settings are not allowed to be changed\n"); + DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } @@ -467,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev, rxc = (u16)coal->rx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs; - for_each_rss(i) { + for_each_queue(i) { sb_id = edev->fp_array[i].sb_info->igu_sb_id; rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, (u8)i, sb_id); @@ -563,7 +678,7 @@ static int qede_set_pauseparam(struct net_device *dev, memset(¶ms, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; if (epause->autoneg) { - if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { + if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { DP_INFO(edev, "autoneg not supported\n"); return -EINVAL; } @@ -619,45 +734,70 @@ static void qede_get_channels(struct net_device *dev, struct qede_dev *edev = netdev_priv(dev); channels->max_combined = QEDE_MAX_RSS_CNT(edev); - channels->combined_count = QEDE_RSS_CNT(edev); + channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - + edev->fp_num_rx; + channels->tx_count = edev->fp_num_tx; + channels->rx_count = edev->fp_num_rx; } static int qede_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct qede_dev *edev = netdev_priv(dev); + u32 count; DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", channels->rx_count, channels->tx_count, channels->other_count, channels->combined_count); - /* We don't support separate rx / tx, nor `other' channels. */ - if (channels->rx_count || channels->tx_count || - channels->other_count || (channels->combined_count == 0) || - (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { + count = channels->rx_count + channels->tx_count + + channels->combined_count; + + /* We don't support `other' channels */ + if (channels->other_count) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "command parameters not supported\n"); return -EINVAL; } + if (!(channels->combined_count || (channels->rx_count && + channels->tx_count))) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "need to request at least one transmit and one receive channel\n"); + return -EINVAL; + } + + if (count > QEDE_MAX_RSS_CNT(edev)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "requested channels = %d max supported channels = %d\n", + count, QEDE_MAX_RSS_CNT(edev)); + return -EINVAL; + } + /* Check if there was a change in the active parameters */ - if (channels->combined_count == QEDE_RSS_CNT(edev)) { + if ((count == QEDE_QUEUE_CNT(edev)) && + (channels->tx_count == edev->fp_num_tx) && + (channels->rx_count == edev->fp_num_rx)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "No change in active parameters\n"); return 0; } /* We need the number of queues to be divisible between the hwfns */ - if (channels->combined_count % edev->dev_info.common.num_hwfns) { + if ((count % edev->dev_info.common.num_hwfns) || + (channels->tx_count % edev->dev_info.common.num_hwfns) || + (channels->rx_count % edev->dev_info.common.num_hwfns)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "Number of channels must be divisable by %04x\n", + "Number of channels must be divisible by %04x\n", edev->dev_info.common.num_hwfns); return -EINVAL; } /* Set number of queues and reload if necessary */ - edev->req_rss = channels->combined_count; + edev->req_queues = count; + edev->req_num_tx = channels->tx_count; + edev->req_num_rx = channels->rx_count; if (netif_running(dev)) qede_reload(edev, NULL, NULL); @@ -727,7 +867,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, switch (info->cmd) { case ETHTOOL_GRXRINGS: - info->data = edev->num_rss; + info->data = QEDE_RSS_COUNT(edev); return 0; case ETHTOOL_GRXFH: return qede_get_rss_flags(edev, info); @@ -930,7 +1070,7 @@ static void qede_netif_start(struct qede_dev *edev) if (!netif_running(edev->ndev)) return; - for_each_rss(i) { + for_each_queue(i) { /* Update and reenable interrupts */ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); napi_enable(&edev->fp_array[i].napi); @@ -942,7 +1082,7 @@ static void qede_netif_stop(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { napi_disable(&edev->fp_array[i].napi); /* Disable interrupts */ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); @@ -952,11 +1092,23 @@ static void qede_netif_stop(struct qede_dev *edev) static int qede_selftest_transmit_traffic(struct qede_dev *edev, struct sk_buff *skb) { - struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; + struct qede_tx_queue *txq = NULL; struct eth_tx_1st_bd *first_bd; dma_addr_t mapping; int i, idx, val; + for_each_queue(i) { + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + txq = edev->fp_array[i].txqs; + break; + } + } + + if (!txq) { + DP_NOTICE(edev, "Tx path is not available\n"); + return -1; + } + /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; txq->sw_tx_ring[idx].skb = skb; @@ -1020,14 +1172,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, static int qede_selftest_receive_traffic(struct qede_dev *edev) { - struct qede_rx_queue *rxq = edev->fp_array[0].rxq; u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; struct eth_fast_path_rx_reg_cqe *fp_cqe; + struct qede_rx_queue *rxq = NULL; struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; u8 *data_ptr; int i; + for_each_queue(i) { + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + rxq = edev->fp_array[i].rxq; + break; + } + } + + if (!rxq) { + DP_NOTICE(edev, "Rx path is not available\n"); + return -1; + } + /* The packet is expected to receive on rx-queue 0 even though RSS is * enabled. This is because the queue 0 is configured as the default * queue and that the loopback traffic is not IP. @@ -1228,8 +1392,8 @@ static int qede_get_tunable(struct net_device *dev, } static const struct ethtool_ops qede_ethtool_ops = { - .get_settings = qede_get_settings, - .set_settings = qede_set_settings, + .get_link_ksettings = qede_get_link_ksettings, + .set_link_ksettings = qede_set_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, @@ -1260,7 +1424,7 @@ static const struct ethtool_ops qede_ethtool_ops = { }; static const struct ethtool_ops qede_vf_ethtool_ops = { - .get_settings = qede_get_settings, + .get_link_ksettings = qede_get_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a6eb6af8cbe8..b4a56e61631a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -222,7 +222,7 @@ int __init qede_init(void) { int ret; - pr_notice("qede_init: %s\n", version); + pr_info("qede_init: %s\n", version); qed_ops = qed_get_eth_ops(); if (!qed_ops) { @@ -253,7 +253,8 @@ int __init qede_init(void) static void __exit qede_cleanup(void) { - pr_notice("qede_cleanup called\n"); + if (debug & QED_LOG_INFO_MASK) + pr_info("qede_cleanup called\n"); unregister_netdevice_notifier(&qede_netdev_notifier); pci_unregister_driver(&qede_pci_driver); @@ -270,8 +271,7 @@ module_exit(qede_cleanup); /* Unmap the data and free skb */ static int qede_free_tx_pkt(struct qede_dev *edev, - struct qede_tx_queue *txq, - int *len) + struct qede_tx_queue *txq, int *len) { u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; struct sk_buff *skb = txq->sw_tx_ring[idx].skb; @@ -329,8 +329,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev, static void qede_free_failed_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, struct eth_tx_1st_bd *first_bd, - int nbd, - bool data_split) + int nbd, bool data_split) { u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; struct sk_buff *skb = txq->sw_tx_ring[idx].skb; @@ -339,8 +338,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, /* Return prod to its position before this skb was handled */ qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), - first_bd); + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); @@ -366,8 +364,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, /* Return again prod to its position before this skb was handled */ qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), - first_bd); + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); /* Free skb */ dev_kfree_skb_any(skb); @@ -376,8 +373,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, } static u32 qede_xmit_type(struct qede_dev *edev, - struct sk_buff *skb, - int *ipv6_ext) + struct sk_buff *skb, int *ipv6_ext) { u32 rc = XMIT_L4_CSUM; __be16 l3_proto; @@ -434,15 +430,13 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, } static int map_frag_to_bd(struct qede_dev *edev, - skb_frag_t *frag, - struct eth_tx_bd *bd) + skb_frag_t *frag, struct eth_tx_bd *bd) { dma_addr_t mapping; /* Map skb non-linear frag data for DMA */ mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { DP_NOTICE(edev, "Unable to map frag - dropping packet\n"); return -ENOMEM; @@ -504,9 +498,8 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) } /* Main transmit function */ -static -netdev_tx_t qede_start_xmit(struct sk_buff *skb, - struct net_device *ndev) +static netdev_tx_t qede_start_xmit(struct sk_buff *skb, + struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); struct netdev_queue *netdev_txq; @@ -526,12 +519,11 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); - WARN_ON(txq_index >= QEDE_TSS_CNT(edev)); + WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); txq = QEDE_TX_QUEUE(edev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index); - WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < - (MAX_SKB_FRAGS + 1)); + WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); @@ -606,6 +598,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; } + /* Legacy FW had flipped behavior in regard to this bit - + * I.e., needed to set to prevent FW from touching encapsulated + * packets when it didn't need to. + */ + if (unlikely(txq->is_legacy)) + first_bd->data.bitfields ^= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't * support parsing IPv6 with extension header/s. @@ -731,6 +731,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, qede_update_tx_producer(txq); netif_tx_stop_queue(netdev_txq); + txq->stopped_cnt++; DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, "Stop queue was called\n"); /* paired memory barrier is in qede_tx_int(), we have to keep @@ -764,8 +765,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq) return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); } -static int qede_tx_int(struct qede_dev *edev, - struct qede_tx_queue *txq) +static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; u16 hw_bd_cons; @@ -791,6 +791,7 @@ static int qede_tx_int(struct qede_dev *edev, bytes_compl += len; pkts_compl++; txq->sw_tx_cons++; + txq->xmit_pkts++; } netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); @@ -963,8 +964,7 @@ static inline void qede_update_rx_prod(struct qede_dev *edev, static u32 qede_get_rxhash(struct qede_dev *edev, u8 bitfields, - __le32 rss_hash, - enum pkt_hash_types *rxhash_type) + __le32 rss_hash, enum pkt_hash_types *rxhash_type) { enum rss_hash_type htype; @@ -993,12 +993,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) static inline void qede_skb_receive(struct qede_dev *edev, struct qede_fastpath *fp, - struct sk_buff *skb, - u16 vlan_tag) + struct sk_buff *skb, u16 vlan_tag) { if (vlan_tag) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&fp->napi, skb); } @@ -1021,8 +1019,7 @@ static void qede_set_gro_params(struct qede_dev *edev, static int qede_fill_frag_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, - u8 tpa_agg_index, - u16 len_on_bd) + u8 tpa_agg_index, u16 len_on_bd) { struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; @@ -1209,7 +1206,7 @@ static void qede_gro_receive(struct qede_dev *edev, #endif send_skb: - skb_record_rx_queue(skb, fp->rss_id); + skb_record_rx_queue(skb, fp->rxq->rxq_id); qede_skb_receive(edev, fp, skb, vlan_tag); } @@ -1413,7 +1410,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { edev->ops->eth_cqe_completion( - edev->cdev, fp->rss_id, + edev->cdev, fp->id, (struct eth_slow_path_rx_cqe *)cqe); goto next_cqe; } @@ -1470,7 +1467,7 @@ alloc_skb: skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, - "Build_skb failed, dropping incoming packet\n"); + "skb allocation failed, dropping incoming packet\n"); qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); rxq->rx_alloc_errors++; goto next_cqe; @@ -1578,14 +1575,13 @@ alloc_skb: skb->protocol = eth_type_trans(skb, edev->ndev); rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields, - fp_cqe->rss_hash, - &rxhash_type); + fp_cqe->rss_hash, &rxhash_type); skb_set_hash(skb, rx_hash, rxhash_type); qede_set_skb_csum(skb, csum_flag); - skb_record_rx_queue(skb, fp->rss_id); + skb_record_rx_queue(skb, fp->rxq->rxq_id); qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); next_rx_only: @@ -1604,6 +1600,8 @@ next_cqe: /* don't consume bd rx buffer */ /* Update producers */ qede_update_rx_prod(edev, rxq); + rxq->rcv_pkts += rx_pkt; + return rx_pkt; } @@ -1616,10 +1614,12 @@ static int qede_poll(struct napi_struct *napi, int budget) u8 tc; for (tc = 0; tc < edev->num_tc; tc++) - if (qede_txq_has_work(&fp->txqs[tc])) + if (likely(fp->type & QEDE_FASTPATH_TX) && + qede_txq_has_work(&fp->txqs[tc])) qede_tx_int(edev, &fp->txqs[tc]); - rx_work_done = qede_has_rx_work(fp->rxq) ? + rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) ? qede_rx_int(fp, budget) : 0; if (rx_work_done < budget) { qed_sb_update_sb_idx(fp->sb_info); @@ -1639,8 +1639,10 @@ static int qede_poll(struct napi_struct *napi, int budget) rmb(); /* Fall out from the NAPI loop if needed */ - if (!(qede_has_rx_work(fp->rxq) || - qede_has_tx_work(fp))) { + if (!((likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) || + (likely(fp->type & QEDE_FASTPATH_TX) && + qede_has_tx_work(fp)))) { napi_complete(napi); /* Update and reenable interrupts */ @@ -1711,6 +1713,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->ops->get_vport_stats(edev->cdev, &stats); edev->stats.no_buff_discards = stats.no_buff_discards; + edev->stats.packet_too_big_discard = stats.packet_too_big_discard; + edev->stats.ttl0_discard = stats.ttl0_discard; edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; @@ -1790,9 +1794,9 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; } -static struct rtnl_link_stats64 *qede_get_stats64( - struct net_device *dev, - struct rtnl_link_stats64 *stats) +static +struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct qede_dev *edev = netdev_priv(dev); @@ -2106,8 +2110,7 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) } DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "marked vlan %d as non-configured\n", - vlan->vid); + "marked vlan %d as non-configured\n", vlan->vid); } edev->accept_any_vlan = false; @@ -2149,7 +2152,7 @@ static void qede_udp_tunnel_add(struct net_device *dev, edev->vxlan_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", t_port); set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); @@ -2160,7 +2163,7 @@ static void qede_udp_tunnel_add(struct net_device *dev, edev->geneve_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", t_port); set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); break; @@ -2184,7 +2187,7 @@ static void qede_udp_tunnel_del(struct net_device *dev, edev->vxlan_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", t_port); set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); @@ -2195,7 +2198,7 @@ static void qede_udp_tunnel_del(struct net_device *dev, edev->geneve_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", t_port); set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); break; @@ -2240,15 +2243,13 @@ static const struct net_device_ops qede_netdev_ops = { static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, struct pci_dev *pdev, struct qed_dev_eth_info *info, - u32 dp_module, - u8 dp_level) + u32 dp_module, u8 dp_level) { struct net_device *ndev; struct qede_dev *edev; ndev = alloc_etherdev_mqs(sizeof(*edev), - info->num_queues, - info->num_queues); + info->num_queues, info->num_queues); if (!ndev) { pr_err("etherdev allocation failed\n"); return NULL; @@ -2264,6 +2265,9 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->q_num_rx_buffers = NUM_RX_BDS_DEF; edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", + info->num_queues, info->num_queues); + SET_NETDEV_DEV(ndev, &pdev->dev); memset(&edev->stats, 0, sizeof(edev->stats)); @@ -2352,7 +2356,7 @@ static void qede_free_fp_array(struct qede_dev *edev) struct qede_fastpath *fp; int i; - for_each_rss(i) { + for_each_queue(i) { fp = &edev->fp_array[i]; kfree(fp->sb_info); @@ -2361,22 +2365,33 @@ static void qede_free_fp_array(struct qede_dev *edev) } kfree(edev->fp_array); } - edev->num_rss = 0; + + edev->num_queues = 0; + edev->fp_num_tx = 0; + edev->fp_num_rx = 0; } static int qede_alloc_fp_array(struct qede_dev *edev) { + u8 fp_combined, fp_rx = edev->fp_num_rx; struct qede_fastpath *fp; int i; - edev->fp_array = kcalloc(QEDE_RSS_CNT(edev), + edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev), sizeof(*edev->fp_array), GFP_KERNEL); if (!edev->fp_array) { DP_NOTICE(edev, "fp array allocation failed\n"); goto err; } - for_each_rss(i) { + fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx; + + /* Allocate the FP elements for Rx queues followed by combined and then + * the Tx. This ordering should be maintained so that the respective + * queues (Rx or Tx) will be together in the fastpath array and the + * associated ids will be sequential. + */ + for_each_queue(i) { fp = &edev->fp_array[i]; fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); @@ -2385,16 +2400,33 @@ static int qede_alloc_fp_array(struct qede_dev *edev) goto err; } - fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); - if (!fp->rxq) { - DP_NOTICE(edev, "RXQ struct allocation failed\n"); - goto err; + if (fp_rx) { + fp->type = QEDE_FASTPATH_RX; + fp_rx--; + } else if (fp_combined) { + fp->type = QEDE_FASTPATH_COMBINED; + fp_combined--; + } else { + fp->type = QEDE_FASTPATH_TX; } - fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL); - if (!fp->txqs) { - DP_NOTICE(edev, "TXQ array allocation failed\n"); - goto err; + if (fp->type & QEDE_FASTPATH_TX) { + fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), + GFP_KERNEL); + if (!fp->txqs) { + DP_NOTICE(edev, + "TXQ array allocation failed\n"); + goto err; + } + } + + if (fp->type & QEDE_FASTPATH_RX) { + fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); + if (!fp->rxq) { + DP_NOTICE(edev, + "RXQ struct allocation failed\n"); + goto err; + } } } @@ -2456,7 +2488,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, bool is_vf, enum qede_probe_mode mode) { struct qed_probe_params probe_params; - struct qed_slowpath_params params; + struct qed_slowpath_params sp_params; struct qed_dev_eth_info dev_info; struct qede_dev *edev; struct qed_dev *cdev; @@ -2479,14 +2511,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_update_pf_params(cdev); /* Start the Slowpath-process */ - memset(¶ms, 0, sizeof(struct qed_slowpath_params)); - params.int_mode = QED_INT_MODE_MSIX; - params.drv_major = QEDE_MAJOR_VERSION; - params.drv_minor = QEDE_MINOR_VERSION; - params.drv_rev = QEDE_REVISION_VERSION; - params.drv_eng = QEDE_ENGINEERING_VERSION; - strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE); - rc = qed_ops->common->slowpath_start(cdev, ¶ms); + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.int_mode = QED_INT_MODE_MSIX; + sp_params.drv_major = QEDE_MAJOR_VERSION; + sp_params.drv_minor = QEDE_MINOR_VERSION; + sp_params.drv_rev = QEDE_REVISION_VERSION; + sp_params.drv_eng = QEDE_ENGINEERING_VERSION; + strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); + rc = qed_ops->common->slowpath_start(cdev, &sp_params); if (rc) { pr_notice("Cannot start slowpath\n"); goto err1; @@ -2589,7 +2621,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) qed_ops->common->slowpath_stop(cdev); qed_ops->common->remove(cdev); - pr_notice("Ending successfully qede_remove\n"); + dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } static void qede_remove(struct pci_dev *pdev) @@ -2608,8 +2640,8 @@ static int qede_set_num_queues(struct qede_dev *edev) u16 rss_num; /* Setup queues according to possible resources*/ - if (edev->req_rss) - rss_num = edev->req_rss; + if (edev->req_queues) + rss_num = edev->req_queues; else rss_num = netif_get_num_default_rss_queues() * edev->dev_info.common.num_hwfns; @@ -2619,11 +2651,15 @@ static int qede_set_num_queues(struct qede_dev *edev) rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); if (rc > 0) { /* Managed to request interrupts for our queues */ - edev->num_rss = rc; + edev->num_queues = rc; DP_INFO(edev, "Managed %d [of %d] RSS queues\n", - QEDE_RSS_CNT(edev), rss_num); + QEDE_QUEUE_CNT(edev), rss_num); rc = 0; } + + edev->fp_num_tx = edev->req_num_tx; + edev->fp_num_rx = edev->req_num_rx; + return rc; } @@ -2637,16 +2673,14 @@ static void qede_free_mem_sb(struct qede_dev *edev, /* This function allocates fast-path status block memory */ static int qede_alloc_mem_sb(struct qede_dev *edev, - struct qed_sb_info *sb_info, - u16 sb_id) + struct qed_sb_info *sb_info, u16 sb_id) { struct status_block *sb_virt; dma_addr_t sb_phys; int rc; sb_virt = dma_alloc_coherent(&edev->pdev->dev, - sizeof(*sb_virt), - &sb_phys, GFP_KERNEL); + sizeof(*sb_virt), &sb_phys, GFP_KERNEL); if (!sb_virt) { DP_ERR(edev, "Status block allocation failed\n"); return -ENOMEM; @@ -2678,16 +2712,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev, data = rx_buf->data; dma_unmap_page(&edev->pdev->dev, - rx_buf->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); + rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE); rx_buf->data = NULL; __free_page(data); } } -static void qede_free_sge_mem(struct qede_dev *edev, - struct qede_rx_queue *rxq) { +static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) +{ int i; if (edev->gro_disable) @@ -2706,8 +2739,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, } } -static void qede_free_mem_rxq(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { qede_free_sge_mem(edev, rxq); @@ -2729,9 +2761,6 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct eth_rx_bd *rx_bd; dma_addr_t mapping; struct page *data; - u16 rx_buf_size; - - rx_buf_size = rxq->rx_buf_size; data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!data)) { @@ -2766,8 +2795,7 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, return 0; } -static int qede_alloc_sge_mem(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) { dma_addr_t mapping; int i; @@ -2814,15 +2842,14 @@ err: } /* This function allocates all memory needed per Rx queue */ -static int qede_alloc_mem_rxq(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; - rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + - edev->ndev->mtu; + rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; + if (rxq->rx_buf_size > PAGE_SIZE) rxq->rx_buf_size = PAGE_SIZE; @@ -2876,8 +2903,7 @@ err: return rc; } -static void qede_free_mem_txq(struct qede_dev *edev, - struct qede_tx_queue *txq) +static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { /* Free the parallel SW ring */ kfree(txq->sw_tx_ring); @@ -2887,8 +2913,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, } /* This function allocates all memory needed per Tx queue */ -static int qede_alloc_mem_txq(struct qede_dev *edev, - struct qede_tx_queue *txq) +static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { int size, rc; union eth_tx_bd_types *p_virt; @@ -2920,41 +2945,45 @@ err: } /* This function frees all memory of a single fp */ -static void qede_free_mem_fp(struct qede_dev *edev, - struct qede_fastpath *fp) +static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { int tc; qede_free_mem_sb(edev, fp->sb_info); - qede_free_mem_rxq(edev, fp->rxq); + if (fp->type & QEDE_FASTPATH_RX) + qede_free_mem_rxq(edev, fp->rxq); - for (tc = 0; tc < edev->num_tc; tc++) - qede_free_mem_txq(edev, &fp->txqs[tc]); + if (fp->type & QEDE_FASTPATH_TX) + for (tc = 0; tc < edev->num_tc; tc++) + qede_free_mem_txq(edev, &fp->txqs[tc]); } /* This function allocates all memory needed for a single fp (i.e. an entity - * which contains status block, one rx queue and multiple per-TC tx queues. + * which contains status block, one rx queue and/or multiple per-TC tx queues. */ -static int qede_alloc_mem_fp(struct qede_dev *edev, - struct qede_fastpath *fp) +static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { int rc, tc; - rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id); - if (rc) - goto err; - - rc = qede_alloc_mem_rxq(edev, fp->rxq); + rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); if (rc) goto err; - for (tc = 0; tc < edev->num_tc; tc++) { - rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); + if (fp->type & QEDE_FASTPATH_RX) { + rc = qede_alloc_mem_rxq(edev, fp->rxq); if (rc) goto err; } + if (fp->type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); + if (rc) + goto err; + } + } + return 0; err: return rc; @@ -2964,7 +2993,7 @@ static void qede_free_mem_load(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; qede_free_mem_fp(edev, fp); @@ -2974,16 +3003,16 @@ static void qede_free_mem_load(struct qede_dev *edev) /* This function allocates all qede memory at NIC load. */ static int qede_alloc_mem_load(struct qede_dev *edev) { - int rc = 0, rss_id; + int rc = 0, queue_id; - for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) { - struct qede_fastpath *fp = &edev->fp_array[rss_id]; + for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) { + struct qede_fastpath *fp = &edev->fp_array[queue_id]; rc = qede_alloc_mem_fp(edev, fp); if (rc) { DP_ERR(edev, "Failed to allocate memory for fastpath - rss id = %d\n", - rss_id); + queue_id); qede_free_mem_load(edev); return rc; } @@ -2995,30 +3024,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev) /* This function inits fp content and resets the SB, RXQ and TXQ structures */ static void qede_init_fp(struct qede_dev *edev) { - int rss_id, txq_index, tc; + int queue_id, rxq_index = 0, txq_index = 0, tc; struct qede_fastpath *fp; - for_each_rss(rss_id) { - fp = &edev->fp_array[rss_id]; + for_each_queue(queue_id) { + fp = &edev->fp_array[queue_id]; fp->edev = edev; - fp->rss_id = rss_id; + fp->id = queue_id; memset((void *)&fp->napi, 0, sizeof(fp->napi)); memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info)); - memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); - fp->rxq->rxq_id = rss_id; + if (fp->type & QEDE_FASTPATH_RX) { + memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); + fp->rxq->rxq_id = rxq_index++; + } - memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs))); - for (tc = 0; tc < edev->num_tc; tc++) { - txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; - fp->txqs[tc].index = txq_index; + if (fp->type & QEDE_FASTPATH_TX) { + memset((void *)fp->txqs, 0, + (edev->num_tc * sizeof(*fp->txqs))); + for (tc = 0; tc < edev->num_tc; tc++) { + fp->txqs[tc].index = txq_index + + tc * QEDE_TSS_COUNT(edev); + if (edev->dev_info.is_legacy) + fp->txqs[tc].is_legacy = true; + } + txq_index++; } snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", - edev->ndev->name, rss_id); + edev->ndev->name, queue_id); } edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); @@ -3028,12 +3065,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev) { int rc = 0; - rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev)); + rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev)); if (rc) { DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); return rc; } - rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev)); + + rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev)); if (rc) { DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); return rc; @@ -3046,7 +3084,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { napi_disable(&edev->fp_array[i].napi); netif_napi_del(&edev->fp_array[i].napi); @@ -3058,7 +3096,7 @@ static void qede_napi_add_enable(struct qede_dev *edev) int i; /* Add NAPI objects */ - for_each_rss(i) { + for_each_queue(i) { netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll, NAPI_POLL_WEIGHT); napi_enable(&edev->fp_array[i].napi); @@ -3087,14 +3125,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev) int i, rc; /* Sanitize number of interrupts == number of prepared RSS queues */ - if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) { + if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) { DP_ERR(edev, "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", - QEDE_RSS_CNT(edev), edev->int_info.msix_cnt); + QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt); return -EINVAL; } - for (i = 0; i < QEDE_RSS_CNT(edev); i++) { + for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { rc = request_irq(edev->int_info.msix[i].vector, qede_msix_fp_int, 0, edev->fp_array[i].name, &edev->fp_array[i]); @@ -3139,18 +3177,17 @@ static int qede_setup_irqs(struct qede_dev *edev) /* qed should learn receive the RSS ids and callbacks */ ops = edev->ops->common; - for (i = 0; i < QEDE_RSS_CNT(edev); i++) + for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) ops->simd_handler_config(edev->cdev, &edev->fp_array[i], i, qede_simd_fp_handler); - edev->int_info.used_cnt = QEDE_RSS_CNT(edev); + edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev); } return 0; } static int qede_drain_txq(struct qede_dev *edev, - struct qede_tx_queue *txq, - bool allow_drain) + struct qede_tx_queue *txq, bool allow_drain) { int rc, cnt = 1000; @@ -3202,45 +3239,53 @@ static int qede_stop_queues(struct qede_dev *edev) } /* Flush Tx queues. If needed, request drain from MCP */ - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; - for (tc = 0; tc < edev->num_tc; tc++) { - struct qede_tx_queue *txq = &fp->txqs[tc]; + if (fp->type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + struct qede_tx_queue *txq = &fp->txqs[tc]; - rc = qede_drain_txq(edev, txq, true); - if (rc) - return rc; + rc = qede_drain_txq(edev, txq, true); + if (rc) + return rc; + } } } - /* Stop all Queues in reverse order*/ - for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) { + /* Stop all Queues in reverse order */ + for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { struct qed_stop_rxq_params rx_params; - /* Stop the Tx Queue(s)*/ - for (tc = 0; tc < edev->num_tc; tc++) { - struct qed_stop_txq_params tx_params; - - tx_params.rss_id = i; - tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i; - rc = edev->ops->q_tx_stop(cdev, &tx_params); - if (rc) { - DP_ERR(edev, "Failed to stop TXQ #%d\n", - tx_params.tx_queue_id); - return rc; + /* Stop the Tx Queue(s) */ + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + struct qed_stop_txq_params tx_params; + u8 val; + + tx_params.rss_id = i; + val = edev->fp_array[i].txqs[tc].index; + tx_params.tx_queue_id = val; + rc = edev->ops->q_tx_stop(cdev, &tx_params); + if (rc) { + DP_ERR(edev, "Failed to stop TXQ #%d\n", + tx_params.tx_queue_id); + return rc; + } } } - /* Stop the Rx Queue*/ - memset(&rx_params, 0, sizeof(rx_params)); - rx_params.rss_id = i; - rx_params.rx_queue_id = i; + /* Stop the Rx Queue */ + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + memset(&rx_params, 0, sizeof(rx_params)); + rx_params.rss_id = i; + rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id; - rc = edev->ops->q_rx_stop(cdev, &rx_params); - if (rc) { - DP_ERR(edev, "Failed to stop RXQ #%d\n", i); - return rc; + rc = edev->ops->q_rx_stop(cdev, &rx_params); + if (rc) { + DP_ERR(edev, "Failed to stop RXQ #%d\n", i); + return rc; + } } } @@ -3263,7 +3308,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) struct qed_start_vport_params start = {0}; bool reset_rss_indir = false; - if (!edev->num_rss) { + if (!edev->num_queues) { DP_ERR(edev, "Cannot update V-VPORT as active as there are no Rx queues\n"); return -EINVAL; @@ -3287,50 +3332,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; - dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table; - - memset(&q_params, 0, sizeof(q_params)); - q_params.rss_id = i; - q_params.queue_id = i; - q_params.vport_id = 0; - q_params.sb = fp->sb_info->igu_sb_id; - q_params.sb_idx = RX_PI; - - rc = edev->ops->q_rx_start(cdev, &q_params, - fp->rxq->rx_buf_size, - fp->rxq->rx_bd_ring.p_phys_addr, - phys_table, - fp->rxq->rx_comp_ring.page_cnt, - &fp->rxq->hw_rxq_prod_addr); - if (rc) { - DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); - return rc; - } + dma_addr_t p_phys_table; + u32 page_cnt; + + if (fp->type & QEDE_FASTPATH_RX) { + struct qede_rx_queue *rxq = fp->rxq; + __le16 *val; + + memset(&q_params, 0, sizeof(q_params)); + q_params.rss_id = i; + q_params.queue_id = rxq->rxq_id; + q_params.vport_id = 0; + q_params.sb = fp->sb_info->igu_sb_id; + q_params.sb_idx = RX_PI; + + p_phys_table = + qed_chain_get_pbl_phys(&rxq->rx_comp_ring); + page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); + + rc = edev->ops->q_rx_start(cdev, &q_params, + rxq->rx_buf_size, + rxq->rx_bd_ring.p_phys_addr, + p_phys_table, + page_cnt, + &rxq->hw_rxq_prod_addr); + if (rc) { + DP_ERR(edev, "Start RXQ #%d failed %d\n", i, + rc); + return rc; + } - fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; + val = &fp->sb_info->sb_virt->pi_array[RX_PI]; + rxq->hw_cons_ptr = val; - qede_update_rx_prod(edev, fp->rxq); + qede_update_rx_prod(edev, rxq); + } + + if (!(fp->type & QEDE_FASTPATH_TX)) + continue; for (tc = 0; tc < edev->num_tc; tc++) { struct qede_tx_queue *txq = &fp->txqs[tc]; - int txq_index = tc * QEDE_RSS_CNT(edev) + i; + + p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); + page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); memset(&q_params, 0, sizeof(q_params)); q_params.rss_id = i; - q_params.queue_id = txq_index; + q_params.queue_id = txq->index; q_params.vport_id = 0; q_params.sb = fp->sb_info->igu_sb_id; q_params.sb_idx = TX_PI(tc); rc = edev->ops->q_tx_start(cdev, &q_params, - txq->tx_pbl.pbl.p_phys_table, - txq->tx_pbl.page_cnt, + p_phys_table, page_cnt, &txq->doorbell_addr); if (rc) { DP_ERR(edev, "Start TXQ #%d failed %d\n", - txq_index, rc); + txq->index, rc); return rc; } @@ -3361,13 +3422,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) } /* Fill struct with RSS params */ - if (QEDE_RSS_CNT(edev) > 1) { + if (QEDE_RSS_COUNT(edev) > 1) { vport_update_params.update_rss_flg = 1; /* Need to validate current RSS config uses valid entries */ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { if (edev->rss_params.rss_ind_table[i] >= - edev->num_rss) { + QEDE_RSS_COUNT(edev)) { reset_rss_indir = true; break; } @@ -3380,7 +3441,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { u16 indir_val; - val = QEDE_RSS_CNT(edev); + val = QEDE_RSS_COUNT(edev); indir_val = ethtool_rxfh_indir_default(i, val); edev->rss_params.rss_ind_table[i] = indir_val; } @@ -3509,7 +3570,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) if (rc) goto err1; DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", - QEDE_RSS_CNT(edev), edev->num_tc); + QEDE_QUEUE_CNT(edev), edev->num_tc); rc = qede_set_real_num_queues(edev); if (rc) @@ -3562,7 +3623,9 @@ err2: err1: edev->ops->common->set_fp_int(edev->cdev, 0); qede_free_fp_array(edev); - edev->num_rss = 0; + edev->num_queues = 0; + edev->fp_num_tx = 0; + edev->fp_num_rx = 0; err0: return rc; } diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 4e5d5e953e15..f1109661a533 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1011,7 +1011,6 @@ struct ravb_private { struct work_struct work; /* MII transceiver section. */ struct mii_bus *mii_bus; /* MDIO bus control */ - struct phy_device *phydev; /* PHY device control */ int link; phy_interface_t phy_interface; int msg_enable; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 1e1cc0fad17f..cad23ba06904 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -942,7 +942,7 @@ out: static void ravb_adjust_link(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = ndev->phydev; bool new_state = false; if (phydev->link) { @@ -1032,48 +1032,47 @@ static int ravb_phy_init(struct net_device *ndev) phy_attached_info(phydev); - priv->phydev = phydev; - return 0; } /* PHY control start function */ static int ravb_phy_start(struct net_device *ndev) { - struct ravb_private *priv = netdev_priv(ndev); int error; error = ravb_phy_init(ndev); if (error) return error; - phy_start(priv->phydev); + phy_start(ndev->phydev); return 0; } -static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int ravb_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ravb_private *priv = netdev_priv(ndev); int error = -ENODEV; unsigned long flags; - if (priv->phydev) { + if (ndev->phydev) { spin_lock_irqsave(&priv->lock, flags); - error = phy_ethtool_gset(priv->phydev, ecmd); + error = phy_ethtool_ksettings_get(ndev->phydev, cmd); spin_unlock_irqrestore(&priv->lock, flags); } return error; } -static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int ravb_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct ravb_private *priv = netdev_priv(ndev); unsigned long flags; int error; - if (!priv->phydev) + if (!ndev->phydev) return -ENODEV; spin_lock_irqsave(&priv->lock, flags); @@ -1081,11 +1080,11 @@ static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) /* Disable TX and RX */ ravb_rcv_snd_disable(ndev); - error = phy_ethtool_sset(priv->phydev, ecmd); + error = phy_ethtool_ksettings_set(ndev->phydev, cmd); if (error) goto error_exit; - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) priv->duplex = 1; else priv->duplex = 0; @@ -1110,9 +1109,9 @@ static int ravb_nway_reset(struct net_device *ndev) int error = -ENODEV; unsigned long flags; - if (priv->phydev) { + if (ndev->phydev) { spin_lock_irqsave(&priv->lock, flags); - error = phy_start_aneg(priv->phydev); + error = phy_start_aneg(ndev->phydev); spin_unlock_irqrestore(&priv->lock, flags); } @@ -1309,8 +1308,6 @@ static int ravb_get_ts_info(struct net_device *ndev, } static const struct ethtool_ops ravb_ethtool_ops = { - .get_settings = ravb_get_settings, - .set_settings = ravb_set_settings, .nway_reset = ravb_nway_reset, .get_msglevel = ravb_get_msglevel, .set_msglevel = ravb_set_msglevel, @@ -1321,6 +1318,8 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ringparam = ravb_get_ringparam, .set_ringparam = ravb_set_ringparam, .get_ts_info = ravb_get_ts_info, + .get_link_ksettings = ravb_get_link_ksettings, + .set_link_ksettings = ravb_set_link_ksettings, }; static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, @@ -1661,10 +1660,9 @@ static int ravb_close(struct net_device *ndev) } /* PHY disconnect */ - if (priv->phydev) { - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); } if (priv->chip_id != RCAR_GEN2) { @@ -1753,8 +1751,7 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) /* ioctl to device function */ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) { - struct ravb_private *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; @@ -1876,6 +1873,20 @@ static int ravb_set_gti(struct net_device *ndev) return 0; } +static void ravb_set_config_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + if (priv->chip_id == RCAR_GEN2) { + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + /* Set CSEL value */ + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); + } else { + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | + CCC_GAC | CCC_CSEL_HPB); + } +} + static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1978,14 +1989,7 @@ static int ravb_probe(struct platform_device *pdev) ndev->ethtool_ops = &ravb_ethtool_ops; /* Set AVB config mode */ - if (chip_id == RCAR_GEN2) { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); - /* Set CSEL value */ - ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); - } else { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | - CCC_GAC | CCC_CSEL_HPB); - } + ravb_set_config_mode(ndev); /* Set GTI value */ error = ravb_set_gti(ndev); @@ -2097,6 +2101,54 @@ static int ravb_remove(struct platform_device *pdev) } #ifdef CONFIG_PM +static int ravb_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + int ret = 0; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + ret = ravb_close(ndev); + } + + return ret; +} + +static int ravb_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ravb_private *priv = netdev_priv(ndev); + int ret = 0; + + /* All register have been reset to default values. + * Restore all registers which where setup at probe time and + * reopen device if it was running before system suspended. + */ + + /* Set AVB config mode */ + ravb_set_config_mode(ndev); + + /* Set GTI value */ + ret = ravb_set_gti(ndev); + if (ret) + return ret; + + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + + /* Restore descriptor base address table */ + ravb_write(ndev, priv->desc_bat_dma, DBAT); + + if (netif_running(ndev)) { + ret = ravb_open(ndev); + if (ret < 0) + return ret; + netif_device_attach(ndev); + } + + return ret; +} + static int ravb_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() @@ -2110,6 +2162,7 @@ static int ravb_runtime_nop(struct device *dev) } static const struct dev_pm_ops ravb_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume) SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL) }; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 799d58d86e6d..1f8240aec086 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1723,7 +1723,7 @@ out: static void sh_eth_adjust_link(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - struct phy_device *phydev = mdp->phydev; + struct phy_device *phydev = ndev->phydev; int new_state = 0; if (phydev->link) { @@ -1800,51 +1800,48 @@ static int sh_eth_phy_init(struct net_device *ndev) phy_attached_info(phydev); - mdp->phydev = phydev; - return 0; } /* PHY control start function */ static int sh_eth_phy_start(struct net_device *ndev) { - struct sh_eth_private *mdp = netdev_priv(ndev); int ret; ret = sh_eth_phy_init(ndev); if (ret) return ret; - phy_start(mdp->phydev); + phy_start(ndev->phydev); return 0; } -static int sh_eth_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) +static int sh_eth_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct sh_eth_private *mdp = netdev_priv(ndev); unsigned long flags; int ret; - if (!mdp->phydev) + if (!ndev->phydev) return -ENODEV; spin_lock_irqsave(&mdp->lock, flags); - ret = phy_ethtool_gset(mdp->phydev, ecmd); + ret = phy_ethtool_ksettings_get(ndev->phydev, cmd); spin_unlock_irqrestore(&mdp->lock, flags); return ret; } -static int sh_eth_set_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) +static int sh_eth_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct sh_eth_private *mdp = netdev_priv(ndev); unsigned long flags; int ret; - if (!mdp->phydev) + if (!ndev->phydev) return -ENODEV; spin_lock_irqsave(&mdp->lock, flags); @@ -1852,11 +1849,11 @@ static int sh_eth_set_settings(struct net_device *ndev, /* disable tx and rx */ sh_eth_rcv_snd_disable(ndev); - ret = phy_ethtool_sset(mdp->phydev, ecmd); + ret = phy_ethtool_ksettings_set(ndev->phydev, cmd); if (ret) goto error_exit; - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) mdp->duplex = 1; else mdp->duplex = 0; @@ -2067,11 +2064,11 @@ static int sh_eth_nway_reset(struct net_device *ndev) unsigned long flags; int ret; - if (!mdp->phydev) + if (!ndev->phydev) return -ENODEV; spin_lock_irqsave(&mdp->lock, flags); - ret = phy_start_aneg(mdp->phydev); + ret = phy_start_aneg(ndev->phydev); spin_unlock_irqrestore(&mdp->lock, flags); return ret; @@ -2198,8 +2195,6 @@ static int sh_eth_set_ringparam(struct net_device *ndev, } static const struct ethtool_ops sh_eth_ethtool_ops = { - .get_settings = sh_eth_get_settings, - .set_settings = sh_eth_set_settings, .get_regs_len = sh_eth_get_regs_len, .get_regs = sh_eth_get_regs, .nway_reset = sh_eth_nway_reset, @@ -2211,6 +2206,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { .get_sset_count = sh_eth_get_sset_count, .get_ringparam = sh_eth_get_ringparam, .set_ringparam = sh_eth_set_ringparam, + .get_link_ksettings = sh_eth_get_link_ksettings, + .set_link_ksettings = sh_eth_set_link_ksettings, }; /* network device open function */ @@ -2408,10 +2405,9 @@ static int sh_eth_close(struct net_device *ndev) sh_eth_dev_exit(ndev); /* PHY Disconnect */ - if (mdp->phydev) { - phy_stop(mdp->phydev); - phy_disconnect(mdp->phydev); - mdp->phydev = NULL; + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); } free_irq(ndev->irq, ndev); @@ -2429,8 +2425,7 @@ static int sh_eth_close(struct net_device *ndev) /* ioctl to device function */ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { - struct sh_eth_private *mdp = netdev_priv(ndev); - struct phy_device *phydev = mdp->phydev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index c62380e34a1d..d050f37f3e0f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -518,7 +518,6 @@ struct sh_eth_private { /* MII transceiver section. */ u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ - struct phy_device *phydev; /* PHY device control */ int link; phy_interface_t phy_interface; int msg_enable; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index f0b09b05ed3f..1f0c08602eba 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2412,7 +2412,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker, skb->protocol = eth_type_trans(skb, rocker_port->dev); if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) - skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; + skb->offload_fwd_mark = 1; rocker_port->dev->stats.rx_packets++; rocker_port->dev->stats.rx_bytes += skb->len; diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 1ca796316173..fcad907baecf 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2558,7 +2558,6 @@ static int ofdpa_port_init(struct rocker_port *rocker_port) struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int err; - switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false); rocker_port_set_learning(rocker_port, !!(ofdpa_port->brport_flags & BR_LEARNING)); @@ -2817,7 +2816,6 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex); ofdpa_port->bridge_dev = bridge; - switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true); return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); } @@ -2836,8 +2834,6 @@ static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) ofdpa_port_internal_vlan_id_get(ofdpa_port, ofdpa_port->dev->ifindex); - switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev, - false); ofdpa_port->bridge_dev = NULL; err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e00a669e9e09..00279da6a1e8 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -177,7 +177,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx) static int efx_ef10_init_datapath_caps(struct efx_nic *efx) { - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); struct efx_ef10_nic_data *nic_data = efx->nic_data; size_t outlen; int rc; @@ -188,7 +188,7 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) outbuf, sizeof(outbuf), &outlen); if (rc) return rc; - if (outlen < sizeof(outbuf)) { + if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { netif_err(efx, drv, efx->net_dev, "unable to read datapath firmware capabilities\n"); return -EIO; @@ -197,6 +197,12 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) nic_data->datapath_caps = MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); + if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) + nic_data->datapath_caps2 = MCDI_DWORD(outbuf, + GET_CAPABILITIES_V2_OUT_FLAGS2); + else + nic_data->datapath_caps2 = 0; + /* record the DPCPU firmware IDs to determine VEB vswitching support. */ nic_data->rx_dpcpu_fw_id = @@ -227,6 +233,116 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) return rc > 0 ? rc : -ERANGE; } +static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int implemented; + unsigned int enabled; + int rc; + + nic_data->workaround_35388 = false; + nic_data->workaround_61265 = false; + + rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); + + if (rc == -ENOSYS) { + /* Firmware without GET_WORKAROUNDS - not a problem. */ + rc = 0; + } else if (rc == 0) { + /* Bug61265 workaround is always enabled if implemented. */ + if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) + nic_data->workaround_61265 = true; + + if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { + nic_data->workaround_35388 = true; + } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { + /* Workaround is implemented but not enabled. + * Try to enable it. + */ + rc = efx_mcdi_set_workaround(efx, + MC_CMD_WORKAROUND_BUG35388, + true, NULL); + if (rc == 0) + nic_data->workaround_35388 = true; + /* If we failed to set the workaround just carry on. */ + rc = 0; + } + } + + netif_dbg(efx, probe, efx->net_dev, + "workaround for bug 35388 is %sabled\n", + nic_data->workaround_35388 ? "en" : "dis"); + netif_dbg(efx, probe, efx->net_dev, + "workaround for bug 61265 is %sabled\n", + nic_data->workaround_61265 ? "en" : "dis"); + + return rc; +} + +static void efx_ef10_process_timer_config(struct efx_nic *efx, + const efx_dword_t *data) +{ + unsigned int max_count; + + if (EFX_EF10_WORKAROUND_61265(efx)) { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); + efx->timer_max_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); + } else if (EFX_EF10_WORKAROUND_35388(efx)) { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); + max_count = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); + efx->timer_max_ns = max_count * efx->timer_quantum_ns; + } else { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); + max_count = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); + efx->timer_max_ns = max_count * efx->timer_quantum_ns; + } + + netif_dbg(efx, probe, efx->net_dev, + "got timer properties from MC: quantum %u ns; max %u ns\n", + efx->timer_quantum_ns, efx->timer_max_ns); +} + +static int efx_ef10_get_timer_config(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); + int rc; + + rc = efx_ef10_get_timer_workarounds(efx); + if (rc) + return rc; + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, + outbuf, sizeof(outbuf), NULL); + + if (rc == 0) { + efx_ef10_process_timer_config(efx, outbuf); + } else if (rc == -ENOSYS || rc == -EPERM) { + /* Not available - fall back to Huntington defaults. */ + unsigned int quantum; + + rc = efx_ef10_get_sysclk_freq(efx); + if (rc < 0) + return rc; + + quantum = 1536000 / rc; /* 1536 cycles */ + efx->timer_quantum_ns = quantum; + efx->timer_max_ns = efx->type->timer_period_max * quantum; + rc = 0; + } else { + efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, + MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, + NULL, 0, rc); + } + + return rc; +} + static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); @@ -527,32 +643,9 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail5; - rc = efx_ef10_get_sysclk_freq(efx); + rc = efx_ef10_get_timer_config(efx); if (rc < 0) goto fail5; - efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ - - /* Check whether firmware supports bug 35388 workaround. - * First try to enable it, then if we get EPERM, just - * ask if it's already enabled - */ - rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL); - if (rc == 0) { - nic_data->workaround_35388 = true; - } else if (rc == -EPERM) { - unsigned int enabled; - - rc = efx_mcdi_get_workarounds(efx, NULL, &enabled); - if (rc) - goto fail3; - nic_data->workaround_35388 = enabled & - MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; - } else if (rc != -ENOSYS && rc != -ENOENT) { - goto fail5; - } - netif_dbg(efx, probe, efx->net_dev, - "workaround for bug 35388 is %sabled\n", - nic_data->workaround_35388 ? "en" : "dis"); rc = efx_mcdi_mon_probe(efx); if (rc && rc != -EPERM) @@ -1440,9 +1533,10 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ (1ULL << GENERIC_STAT_rx_noskb_drops)) -/* These statistics are only provided by the 10G MAC. For a 10G/40G - * switchable port we do not expose these because they might not - * include all the packets they should. +/* On 7000 series NICs, these statistics are only provided by the 10G MAC. + * For a 10G/40G switchable port we do not expose these because they might + * not include all the packets they should. + * On 8000 series NICs these statistics are always provided. */ #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ (1ULL << EF10_STAT_port_tx_lt64) | \ @@ -1488,10 +1582,15 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) return 0; - if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { raw_mask |= HUNT_40G_EXTRA_STAT_MASK; - else + /* 8000 series have everything even at 40G */ + if (nic_data->datapath_caps2 & + (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) + raw_mask |= HUNT_10G_ONLY_STAT_MASK; + } else { raw_mask |= HUNT_10G_ONLY_STAT_MASK; + } if (nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) @@ -1617,7 +1716,6 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) efx_ef10_get_stat_mask(efx, mask); dma_stats = efx->stats_buffer.addr; - nic_data = efx->nic_data; generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; if (generation_end == EFX_MC_STATS_GENERATION_INVALID) @@ -1744,27 +1842,43 @@ static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, static void efx_ef10_push_irq_moderation(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; - unsigned int mode, value; + unsigned int mode, usecs; efx_dword_t timer_cmd; - if (channel->irq_moderation) { + if (channel->irq_moderation_us) { mode = 3; - value = channel->irq_moderation - 1; + usecs = channel->irq_moderation_us; } else { mode = 0; - value = 0; + usecs = 0; } - if (EFX_EF10_WORKAROUND_35388(efx)) { + if (EFX_EF10_WORKAROUND_61265(efx)) { + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); + unsigned int ns = usecs * 1000; + + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, + channel->channel); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); + + efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, + inbuf, sizeof(inbuf), 0, NULL, 0); + } else if (EFX_EF10_WORKAROUND_35388(efx)) { + unsigned int ticks = efx_usecs_to_ticks(efx, usecs); + EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, - ERF_DD_EVQ_IND_TIMER_VAL, value); + ERF_DD_EVQ_IND_TIMER_VAL, ticks); efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, channel->channel); } else { + unsigned int ticks = efx_usecs_to_ticks(efx, usecs); + EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, - ERF_DZ_TC_TIMER_VAL, value); + ERF_DZ_TC_TIMER_VAL, ticks); efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, channel->channel); } @@ -1935,14 +2049,18 @@ static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void efx_ef10_irq_test_generate(struct efx_nic *efx) +static int efx_ef10_irq_test_generate(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); + if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, + NULL) == 0) + return -ENOTSUPP; + BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); - (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, + return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, inbuf, sizeof(inbuf), NULL, 0, NULL); } @@ -2536,13 +2654,12 @@ fail: static int efx_ef10_ev_init(struct efx_channel *channel) { MCDI_DECLARE_BUF(inbuf, - MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / - EFX_BUF_SIZE)); - MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); + MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / + EFX_BUF_SIZE)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; struct efx_nic *efx = channel->efx; struct efx_ef10_nic_data *nic_data; - bool supports_rx_merge; size_t inlen, outlen; unsigned int enabled, implemented; dma_addr_t dma_addr; @@ -2550,9 +2667,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel) int i; nic_data = efx->nic_data; - supports_rx_merge = - !!(nic_data->datapath_caps & - 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); /* Fill event queue with all ones (i.e. empty events) */ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); @@ -2561,11 +2675,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel) MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); /* INIT_EVQ expects index in vector table, not absolute */ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); - MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, - INIT_EVQ_IN_FLAG_INTERRUPTING, 1, - INIT_EVQ_IN_FLAG_RX_MERGE, 1, - INIT_EVQ_IN_FLAG_TX_MERGE, 1, - INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); @@ -2574,6 +2683,27 @@ static int efx_ef10_ev_init(struct efx_channel *channel) MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); + if (nic_data->datapath_caps2 & + 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) { + /* Use the new generic approach to specifying event queue + * configuration, requesting lower latency or higher throughput. + * The options that actually get used appear in the output. + */ + MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS, + INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_V2_IN_FLAG_TYPE, + MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); + } else { + bool cut_thru = !(nic_data->datapath_caps & + 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); + + MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, + INIT_EVQ_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_IN_FLAG_RX_MERGE, 1, + INIT_EVQ_IN_FLAG_TX_MERGE, 1, + INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru); + } + dma_addr = channel->eventq.buf.dma_addr; for (i = 0; i < entries; ++i) { MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); @@ -2584,6 +2714,13 @@ static int efx_ef10_ev_init(struct efx_channel *channel) rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, outbuf, sizeof(outbuf), &outlen); + + if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN) + netif_dbg(efx, drv, efx->net_dev, + "Channel %d using event queue flags %08x\n", + channel->channel, + MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS)); + /* IRQ return is ignored */ if (channel->channel || rc) return rc; @@ -2591,8 +2728,8 @@ static int efx_ef10_ev_init(struct efx_channel *channel) /* Successfully created event queue on channel 0 */ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); if (rc == -ENOSYS) { - /* GET_WORKAROUNDS was implemented before the bug26807 - * workaround, thus the latter must be unavailable in this fw + /* GET_WORKAROUNDS was implemented before this workaround, + * thus it must be unavailable in this firmware. */ nic_data->workaround_26807 = false; rc = 0; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 14b821b1c880..f3826ae28bac 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -281,6 +281,27 @@ static int efx_process_channel(struct efx_channel *channel, int budget) * NAPI guarantees serialisation of polls of the same device, which * provides the guarantee required by efx_process_channel(). */ +static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) +{ + int step = efx->irq_mod_step_us; + + if (channel->irq_mod_score < irq_adapt_low_thresh) { + if (channel->irq_moderation_us > step) { + channel->irq_moderation_us -= step; + efx->type->push_irq_moderation(channel); + } + } else if (channel->irq_mod_score > irq_adapt_high_thresh) { + if (channel->irq_moderation_us < + efx->irq_rx_moderation_us) { + channel->irq_moderation_us += step; + efx->type->push_irq_moderation(channel); + } + } + + channel->irq_count = 0; + channel->irq_mod_score = 0; +} + static int efx_poll(struct napi_struct *napi, int budget) { struct efx_channel *channel = @@ -301,22 +322,7 @@ static int efx_poll(struct napi_struct *napi, int budget) if (efx_channel_has_rx_queue(channel) && efx->irq_rx_adaptive && unlikely(++channel->irq_count == 1000)) { - if (unlikely(channel->irq_mod_score < - irq_adapt_low_thresh)) { - if (channel->irq_moderation > 1) { - channel->irq_moderation -= 1; - efx->type->push_irq_moderation(channel); - } - } else if (unlikely(channel->irq_mod_score > - irq_adapt_high_thresh)) { - if (channel->irq_moderation < - efx->irq_rx_moderation) { - channel->irq_moderation += 1; - efx->type->push_irq_moderation(channel); - } - } - channel->irq_count = 0; - channel->irq_mod_score = 0; + efx_update_irq_mod(efx, channel); } efx_filter_rfs_expire(channel); @@ -1703,6 +1709,7 @@ static int efx_probe_nic(struct efx_nic *efx) netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); /* Initialise the interrupt moderation settings */ + efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, true); @@ -1949,14 +1956,21 @@ static void efx_remove_all(struct efx_nic *efx) * Interrupt moderation * **************************************************************************/ - -static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns) +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) { if (usecs == 0) return 0; - if (usecs * 1000 < quantum_ns) + if (usecs * 1000 < efx->timer_quantum_ns) return 1; /* never round down to 0 */ - return usecs * 1000 / quantum_ns; + return usecs * 1000 / efx->timer_quantum_ns; +} + +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) +{ + /* We must round up when converting ticks to microseconds + * because we round down when converting the other way. + */ + return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); } /* Set interrupt moderation parameters */ @@ -1965,21 +1979,16 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, bool rx_may_override_tx) { struct efx_channel *channel; - unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max * - efx->timer_quantum_ns, - 1000); - unsigned int tx_ticks; - unsigned int rx_ticks; + unsigned int timer_max_us; EFX_ASSERT_RESET_SERIALISED(efx); - if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max) - return -EINVAL; + timer_max_us = efx->timer_max_ns / 1000; - tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns); - rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns); + if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) + return -EINVAL; - if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && + if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && !rx_may_override_tx) { netif_err(efx, drv, efx->net_dev, "Channels are shared. " "RX and TX IRQ moderation must be equal\n"); @@ -1987,12 +1996,12 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, } efx->irq_rx_adaptive = rx_adaptive; - efx->irq_rx_moderation = rx_ticks; + efx->irq_rx_moderation_us = rx_usecs; efx_for_each_channel(channel, efx) { if (efx_channel_has_rx_queue(channel)) - channel->irq_moderation = rx_ticks; + channel->irq_moderation_us = rx_usecs; else if (efx_channel_has_tx_queues(channel)) - channel->irq_moderation = tx_ticks; + channel->irq_moderation_us = tx_usecs; } return 0; @@ -2001,26 +2010,21 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, unsigned int *rx_usecs, bool *rx_adaptive) { - /* We must round up when converting ticks to microseconds - * because we round down when converting the other way. - */ - *rx_adaptive = efx->irq_rx_adaptive; - *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation * - efx->timer_quantum_ns, - 1000); + *rx_usecs = efx->irq_rx_moderation_us; /* If channels are shared between RX and TX, so is IRQ * moderation. Otherwise, IRQ moderation is the same for all * TX channels and is not adaptive. */ - if (efx->tx_channel_offset == 0) + if (efx->tx_channel_offset == 0) { *tx_usecs = *rx_usecs; - else - *tx_usecs = DIV_ROUND_UP( - efx->channel[efx->tx_channel_offset]->irq_moderation * - efx->timer_quantum_ns, - 1000); + } else { + struct efx_channel *tx_channel; + + tx_channel = efx->channel[efx->tx_channel_offset]; + *tx_usecs = tx_channel->irq_moderation_us; + } } /************************************************************************** diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index c3ae739e9c7a..342ae16e1f2d 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -204,6 +204,8 @@ int efx_try_recovery(struct efx_nic *efx); /* Global */ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, unsigned int rx_usecs, bool rx_adaptive, bool rx_may_override_tx); diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index d790cb8d9db3..1a7092602aec 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -378,12 +378,15 @@ static void falcon_push_irq_moderation(struct efx_channel *channel) struct efx_nic *efx = channel->efx; /* Set timer register */ - if (channel->irq_moderation) { + if (channel->irq_moderation_us) { + unsigned int ticks; + + ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us); EFX_POPULATE_DWORD_2(timer_cmd, FRF_AB_TC_TIMER_MODE, FFE_BB_TIMER_MODE_INT_HLDOFF, FRF_AB_TC_TIMER_VAL, - channel->irq_moderation - 1); + ticks - 1); } else { EFX_POPULATE_DWORD_2(timer_cmd, FRF_AB_TC_TIMER_MODE, @@ -2373,6 +2376,8 @@ static int falcon_probe_nic(struct efx_nic *efx) EFX_MAX_CHANNELS); efx->max_tx_channels = efx->max_channels; efx->timer_quantum_ns = 4968; /* 621 cycles */ + efx->timer_max_ns = efx->type->timer_period_max * + efx->timer_quantum_ns; /* Initialise I2C adapter */ board = falcon_board(efx); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 4c83739d158f..4762ec444cb8 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1477,9 +1477,10 @@ void efx_farch_irq_disable_master(struct efx_nic *efx) * Interrupt must already have been enabled, otherwise nasty things * may happen. */ -void efx_farch_irq_test_generate(struct efx_nic *efx) +int efx_farch_irq_test_generate(struct efx_nic *efx) { efx_farch_interrupts(efx, true, true); + return 0; } /* Process a fatal interrupt diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d28e7dd8fa3c..9fbc12a8f80c 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -548,7 +548,10 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, err_len, rc); } - async->complete(efx, async->cookie, rc, outbuf, data_len); + + if (async->complete) + async->complete(efx, async->cookie, rc, outbuf, + min(async->outlen, data_len)); kfree(async); efx_mcdi_release(mcdi); diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index c9a5b003caaf..ccceafc15896 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -2645,16 +2645,20 @@ #define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 /* enum: CSR IREG bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1 -/* enum: RX DPCPU bus. */ +/* enum: RX0 DPCPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2 /* enum: TX0 DPCPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3 /* enum: TX1 DPCPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4 -/* enum: RX DICPU bus. */ +/* enum: RX0 DICPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5 /* enum: TX DICPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6 +/* enum: RX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7 +/* enum: RX1 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 /* Pattern written to RAM / register */ #define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 /* Actual value read from RAM / register */ @@ -3612,6 +3616,8 @@ #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 @@ -4389,6 +4395,8 @@ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. */ #define MC_CMD_WORKAROUND_BUG26807 0x6 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_WORKAROUND_BUG61265 0x7 /* 0 = disable the workaround indicated by TYPE; any non-zero value = enable * the workaround */ @@ -4413,7 +4421,6 @@ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1 * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80. - * Anything else: currently undefined. Locks required: None. Return code: 0. */ #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b @@ -5479,6 +5486,8 @@ #define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 #define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 #define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 #define LICENSED_V3_FEATURES_MASK_LBN 0 #define LICENSED_V3_FEATURES_MASK_WIDTH 64 @@ -5634,6 +5643,109 @@ /* Only valid if INTRFLAG was true */ #define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +/* MC_CMD_INIT_EVQ_V2_IN msgrequest */ +#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +/* tbd */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4 +/* enum: All initialisation flags specified by host. */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the lowest latency achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the best throughput achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2 +/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by + * firmware based on licenses and firmware variant. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64 + +/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +/* Actual configuration applied on the card */ +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 + /* QUEUE_CRC_MODE structuredef */ #define QUEUE_CRC_MODE_LEN 1 #define QUEUE_CRC_MODE_MODE_LBN 0 @@ -5697,8 +5809,8 @@ #define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10 -#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10 +#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -7854,6 +7966,20 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -7910,6 +8036,288 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 #define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 +/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) in not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 + /***********************************/ /* MC_CMD_V2_EXTN @@ -9026,7 +9434,7 @@ */ #define MC_CMD_GET_RXDP_CONFIG 0xc2 -#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */ #define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0 @@ -10125,7 +10533,9 @@ * that this operation returns a zero-length response */ #define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 -/* enum: report counts of installed licenses */ +/* enum: report counts of installed licenses Returns EAGAIN if license + * processing (updating) has been started but not yet completed. + */ #define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 /* MC_CMD_LICENSING_V3_OUT msgresponse */ @@ -10763,6 +11173,8 @@ #define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 /* enum: Bug 26807 features present in firmware (multicast filter chaining) */ #define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80 /***********************************/ @@ -11280,22 +11692,110 @@ #define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_RX_BALANCING_IN msgrequest */ -#define MC_CMD_RX_BALANCING_IN_LEN 4 +#define MC_CMD_RX_BALANCING_IN_LEN 16 /* The RX port whose upconverter table will be modified */ #define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 -#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1 /* The VLAN priority associated to the table index and vFIFO */ -#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1 -#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4 /* The resulting bit of SRC^DST for indexing the table */ -#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2 -#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8 /* The RX engine to which the vFIFO in the table entry will point to */ -#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3 -#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1 +#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12 /* MC_CMD_RX_BALANCING_OUT msgresponse */ #define MC_CMD_RX_BALANCING_OUT_LEN 0 +/***********************************/ +/* MC_CMD_SET_EVQ_TMR + * Update the timer load, timer reload and timer mode values for a given EVQ. + * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will + * be rounded up to the granularity supported by the hardware, then truncated + * to the range supported by the hardware. The resulting value after the + * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS + * and TMR_RELOAD_ACT_NS). + */ +#define MC_CMD_SET_EVQ_TMR 0x120 + +#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_EVQ_TMR_IN msgrequest */ +#define MC_CMD_SET_EVQ_TMR_IN_LEN 16 +/* Function-relative queue instance */ +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0 +/* Requested value for timer load (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4 +/* Requested value for timer reload (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8 +/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12 +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ + +/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */ +#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8 +/* Actual value for timer load (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0 +/* Actual value for timer reload (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4 + + +/***********************************/ +/* MC_CMD_GET_EVQ_TMR_PROPERTIES + * Query properties about the event queue timers. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122 + +#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0 + +/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36 +/* Reserved for future use. */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0 +/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in + * nanoseconds) for each increment of the timer load/reload count. The + * requested duration of a timer is this value multiplied by the timer + * load/reload count. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4 +/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value + * allowed for timer load/reload counts. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8 +/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a + * multiple of this step size will be rounded in an implementation defined + * manner. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12 +/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only + * meaningful if MC_CMD_SET_EVQ_TMR is implemented. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16 +/* Timer durations requested via MCDI that are not a multiple of this step size + * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20 +/* For timers updated using the bug35388 workaround, this is the time interval + * (in nanoseconds) for each increment of the timer load/reload count. The + * requested duration of a timer is this value multiplied by the timer + * load/reload count. This field is only meaningful if the bug35388 workaround + * is enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 +/* For timers updated using the bug35388 workaround, this is the maximum value + * allowed for timer load/reload counts. This field is only meaningful if the + * bug35388 workaround is enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 +/* For timers updated using the bug35388 workaround, timer load/reload counts + * not a multiple of this step size will be rounded in an implementation + * defined manner. This field is only meaningful if the bug35388 workaround is + * enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9ff062a36ea8..0a2504b5dad5 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -392,7 +392,7 @@ enum efx_sync_events_state { * @eventq_init: Event queue initialised flag * @enabled: Channel enabled indicator * @irq: IRQ number (MSI and MSI-X only) - * @irq_moderation: IRQ moderation value (in hardware ticks) + * @irq_moderation_us: IRQ moderation value (in microseconds) * @napi_dev: Net device used with NAPI * @napi_str: NAPI control structure * @state: state for NAPI vs busy polling @@ -433,7 +433,7 @@ struct efx_channel { bool eventq_init; bool enabled; int irq; - unsigned int irq_moderation; + unsigned int irq_moderation_us; struct net_device *napi_dev; struct napi_struct napi_str; #ifdef CONFIG_NET_RX_BUSY_POLL @@ -810,8 +810,10 @@ struct vfdi_status; * @membase: Memory BAR value * @interrupt_mode: Interrupt mode * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds + * @timer_max_ns: Interrupt timer maximum value, in nanoseconds * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues - * @irq_rx_moderation: IRQ moderation time for RX event queues + * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues + * @irq_rx_moderation_us: IRQ moderation time for RX event queues * @msg_enable: Log message enable flags * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. * @reset_pending: Bitmask for pending resets @@ -940,8 +942,10 @@ struct efx_nic { enum efx_int_mode interrupt_mode; unsigned int timer_quantum_ns; + unsigned int timer_max_ns; bool irq_rx_adaptive; - unsigned int irq_rx_moderation; + unsigned int irq_mod_step_us; + unsigned int irq_rx_moderation_us; u32 msg_enable; enum nic_state state; @@ -1271,7 +1275,7 @@ struct efx_nic_type { int (*mcdi_poll_reboot)(struct efx_nic *efx); void (*mcdi_reboot_detected)(struct efx_nic *efx); void (*irq_enable_master)(struct efx_nic *efx); - void (*irq_test_generate)(struct efx_nic *efx); + int (*irq_test_generate)(struct efx_nic *efx); void (*irq_disable_non_ev)(struct efx_nic *efx); irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 89b83e59e1dc..aa1945a858d5 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -66,11 +66,11 @@ void efx_nic_event_test_start(struct efx_channel *channel) channel->efx->type->ev_test_generate(channel); } -void efx_nic_irq_test_start(struct efx_nic *efx) +int efx_nic_irq_test_start(struct efx_nic *efx) { efx->last_irq_cpu = -1; smp_wmb(); - efx->type->irq_test_generate(efx); + return efx->type->irq_test_generate(efx); } /* Hook interrupt handler(s) diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 96944c3c9d14..73bee7ea332a 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -507,10 +507,13 @@ enum { * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 * @workaround_26807: Flag: firmware supports workaround for bug 26807 + * @workaround_61265: Flag: firmware supports workaround for bug 61265 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated * after MC reboot * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of * %MC_CMD_GET_CAPABILITIES response) + * @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of + * %MC_CMD_GET_CAPABILITIES response) * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU * @vport_id: The function's vport ID, only relevant for PFs @@ -540,8 +543,10 @@ struct efx_ef10_nic_data { u64 stats[EF10_STAT_COUNT]; bool workaround_35388; bool workaround_26807; + bool workaround_61265; bool must_check_datapath_caps; u32 datapath_caps; + u32 datapath_caps2; unsigned int rx_dpcpu_fw_id; unsigned int tx_dpcpu_fw_id; unsigned int vport_id; @@ -741,12 +746,12 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff) /* Interrupts */ int efx_nic_init_interrupt(struct efx_nic *efx); -void efx_nic_irq_test_start(struct efx_nic *efx); +int efx_nic_irq_test_start(struct efx_nic *efx); void efx_nic_fini_interrupt(struct efx_nic *efx); /* Falcon/Siena interrupts */ void efx_farch_irq_enable_master(struct efx_nic *efx); -void efx_farch_irq_test_generate(struct efx_nic *efx); +int efx_farch_irq_test_generate(struct efx_nic *efx); void efx_farch_irq_disable_master(struct efx_nic *efx); irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index c771e0af4e06..dd204d9704c6 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1306,7 +1306,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; - channel->irq_moderation = 0; + channel->irq_moderation_us = 0; channel->rx_queue.core_index = 0; return efx_ptp_probe(efx, channel); diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 9d78830da609..cd38b44ae23a 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -135,11 +135,19 @@ static int efx_test_interrupts(struct efx_nic *efx, { unsigned long timeout, wait; int cpu; + int rc; netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); tests->interrupt = -1; - efx_nic_irq_test_start(efx); + rc = efx_nic_irq_test_start(efx); + if (rc == -ENOTSUPP) { + netif_dbg(efx, drv, efx->net_dev, + "direct interrupt testing not supported\n"); + tests->interrupt = 0; + return 0; + } + timeout = jiffies + IRQ_TIMEOUT; wait = 1; diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h index 009dbe88f3be..32a427253a03 100644 --- a/drivers/net/ethernet/sfc/selftest.h +++ b/drivers/net/ethernet/sfc/selftest.h @@ -28,7 +28,7 @@ struct efx_loopback_self_tests { /* Efx self test results * For fields which are not counters, 1 indicates success and -1 - * indicates failure. + * indicates failure; 0 indicates test could not be run. */ struct efx_self_tests { /* online tests */ diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 2219b5424d2b..04ed1b4c7cd9 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -34,19 +34,24 @@ static void siena_init_wol(struct efx_nic *efx); static void siena_push_irq_moderation(struct efx_channel *channel) { + struct efx_nic *efx = channel->efx; efx_dword_t timer_cmd; - if (channel->irq_moderation) + if (channel->irq_moderation_us) { + unsigned int ticks; + + ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us); EFX_POPULATE_DWORD_2(timer_cmd, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, FRF_CZ_TC_TIMER_VAL, - channel->irq_moderation - 1); - else + ticks - 1); + } else { EFX_POPULATE_DWORD_2(timer_cmd, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, FRF_CZ_TC_TIMER_VAL, 0); + } efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, channel->channel); } @@ -222,6 +227,9 @@ static int siena_probe_nvconfig(struct efx_nic *efx) efx->timer_quantum_ns = (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ? 3072 : 6144; /* 768 cycles */ + efx->timer_max_ns = efx->type->timer_period_max * + efx->timer_quantum_ns; + return rc; } diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h index 2310b75d4ec2..351cd14cb9f9 100644 --- a/drivers/net/ethernet/sfc/workarounds.h +++ b/drivers/net/ethernet/sfc/workarounds.h @@ -50,4 +50,8 @@ #define EFX_WORKAROUND_35388(efx) \ (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx)) +/* Moderation timer access must go through MCDI */ +#define EFX_EF10_WORKAROUND_61265(efx) \ + (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265) + #endif /* EFX_WORKAROUNDS_H */ diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f85d605e4560..c6cff3d2ff05 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -124,7 +124,7 @@ do { \ #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 -#define CPDMA_TX_PRIORITY_MAP 0x76543210 +#define CPDMA_TX_PRIORITY_MAP 0x01234567 #define CPSW_VLAN_AWARE BIT(1) #define CPSW_ALE_VLAN_AWARE 1 @@ -140,9 +140,11 @@ do { \ #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) -#define cpsw_slave_index(priv) \ - ((priv->data.dual_emac) ? priv->emac_port : \ - priv->data.active_slave) +#define cpsw_slave_index(cpsw, priv) \ + ((cpsw->data.dual_emac) ? priv->emac_port : \ + cpsw->data.active_slave) +#define IRQ_NUM 2 +#define CPSW_MAX_QUEUES 8 static int debug_level; module_param(debug_level, int, 0); @@ -363,38 +365,41 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) __raw_writel(val, slave->regs + offset); } -struct cpsw_priv { - struct platform_device *pdev; - struct net_device *ndev; - struct napi_struct napi_rx; - struct napi_struct napi_tx; +struct cpsw_common { struct device *dev; struct cpsw_platform_data data; + struct napi_struct napi_rx; + struct napi_struct napi_tx; struct cpsw_ss_regs __iomem *regs; struct cpsw_wr_regs __iomem *wr_regs; u8 __iomem *hw_stats; struct cpsw_host_regs __iomem *host_port_regs; - u32 msg_enable; u32 version; u32 coal_intvl; u32 bus_freq_mhz; int rx_packet_max; - struct clk *clk; - u8 mac_addr[ETH_ALEN]; struct cpsw_slave *slaves; struct cpdma_ctlr *dma; - struct cpdma_chan *txch, *rxch; + struct cpdma_chan *txch[CPSW_MAX_QUEUES]; + struct cpdma_chan *rxch[CPSW_MAX_QUEUES]; struct cpsw_ale *ale; - bool rx_pause; - bool tx_pause; bool quirk_irq; bool rx_irq_disabled; bool tx_irq_disabled; - /* snapshot of IRQ numbers */ - u32 irqs_table[4]; - u32 num_irqs; - struct cpts *cpts; + u32 irqs_table[IRQ_NUM]; + struct cpts *cpts; + int rx_ch_num, tx_ch_num; +}; + +struct cpsw_priv { + struct net_device *ndev; + struct device *dev; + u32 msg_enable; + u8 mac_addr[ETH_ALEN]; + bool rx_pause; + bool tx_pause; u32 emac_port; + struct cpsw_common *cpsw; }; struct cpsw_stats { @@ -455,108 +460,92 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = { { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, - { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) }, - { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, - { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, - { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) }, - { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, - { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, - { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, - { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, - { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, - { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, - { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) }, - { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) }, - { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, - { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) }, - { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) }, - { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) }, - { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) }, - { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) }, - { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) }, - { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) }, - { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) }, - { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) }, - { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) }, - { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) }, - { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) }, - { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) }, }; -#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats) +static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { + { "head_enqueue", CPDMA_RX_STAT(head_enqueue) }, + { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, + { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, + { "misqueued", CPDMA_RX_STAT(misqueued) }, + { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, + { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, + { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, + { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, + { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, + { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, + { "good_dequeue", CPDMA_RX_STAT(good_dequeue) }, + { "requeue", CPDMA_RX_STAT(requeue) }, + { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, +}; -#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) +#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats) +#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats) + +#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) +#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi) #define for_each_slave(priv, func, arg...) \ do { \ struct cpsw_slave *slave; \ + struct cpsw_common *cpsw = (priv)->cpsw; \ int n; \ - if (priv->data.dual_emac) \ - (func)((priv)->slaves + priv->emac_port, ##arg);\ + if (cpsw->data.dual_emac) \ + (func)((cpsw)->slaves + priv->emac_port, ##arg);\ else \ - for (n = (priv)->data.slaves, \ - slave = (priv)->slaves; \ + for (n = cpsw->data.slaves, \ + slave = cpsw->slaves; \ n; n--) \ (func)(slave++, ##arg); \ } while (0) -#define cpsw_get_slave_ndev(priv, __slave_no__) \ - ((__slave_no__ < priv->data.slaves) ? \ - priv->slaves[__slave_no__].ndev : NULL) -#define cpsw_get_slave_priv(priv, __slave_no__) \ - (((__slave_no__ < priv->data.slaves) && \ - (priv->slaves[__slave_no__].ndev)) ? \ - netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ - -#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ + +#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \ do { \ - if (!priv->data.dual_emac) \ + if (!cpsw->data.dual_emac) \ break; \ if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ - ndev = cpsw_get_slave_ndev(priv, 0); \ - priv = netdev_priv(ndev); \ + ndev = cpsw->slaves[0].ndev; \ skb->dev = ndev; \ } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ - ndev = cpsw_get_slave_ndev(priv, 1); \ - priv = netdev_priv(ndev); \ + ndev = cpsw->slaves[1].ndev; \ skb->dev = ndev; \ } \ } while (0) -#define cpsw_add_mcast(priv, addr) \ +#define cpsw_add_mcast(cpsw, priv, addr) \ do { \ - if (priv->data.dual_emac) { \ - struct cpsw_slave *slave = priv->slaves + \ + if (cpsw->data.dual_emac) { \ + struct cpsw_slave *slave = cpsw->slaves + \ priv->emac_port; \ - int slave_port = cpsw_get_slave_port(priv, \ + int slave_port = cpsw_get_slave_port( \ slave->slave_num); \ - cpsw_ale_add_mcast(priv->ale, addr, \ + cpsw_ale_add_mcast(cpsw->ale, addr, \ 1 << slave_port | ALE_PORT_HOST, \ ALE_VLAN, slave->port_vlan, 0); \ } else { \ - cpsw_ale_add_mcast(priv->ale, addr, \ + cpsw_ale_add_mcast(cpsw->ale, addr, \ ALE_ALL_PORTS, \ 0, 0, 0); \ } \ } while (0) -static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) +static inline int cpsw_get_slave_port(u32 slave_num) { return slave_num + 1; } static void cpsw_set_promiscious(struct net_device *ndev, bool enable) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_ale *ale = priv->ale; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_ale *ale = cpsw->ale; int i; - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { bool flag = false; /* Enabling promiscuous mode for one interface will be * common for both the interface as the interface shares * the same hardware resource. */ - for (i = 0; i < priv->data.slaves; i++) - if (priv->slaves[i].ndev->flags & IFF_PROMISC) + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev->flags & IFF_PROMISC) flag = true; if (!enable && flag) { @@ -579,7 +568,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) unsigned long timeout = jiffies + HZ; /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ - for (i = 0; i <= priv->data.slaves; i++) { + for (i = 0; i <= cpsw->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 1); cpsw_ale_control_set(ale, i, @@ -606,7 +595,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ - for (i = 0; i <= priv->data.slaves; i++) { + for (i = 0; i <= cpsw->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 0); cpsw_ale_control_set(ale, i, @@ -620,17 +609,18 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; int vid; - if (priv->data.dual_emac) - vid = priv->slaves[priv->emac_port].port_vlan; + if (cpsw->data.dual_emac) + vid = cpsw->slaves[priv->emac_port].port_vlan; else - vid = priv->data.default_vlan; + vid = cpsw->data.default_vlan; if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ cpsw_set_promiscious(ndev, true); - cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI); + cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI); return; } else { /* Disable promiscuous mode */ @@ -638,51 +628,54 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) } /* Restore allmulti on vlans if necessary */ - cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); + cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI); /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid); + cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid); if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; /* program multicast address list into ALE register */ netdev_for_each_mc_addr(ha, ndev) { - cpsw_add_mcast(priv, (u8 *)ha->addr); + cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr); } } } -static void cpsw_intr_enable(struct cpsw_priv *priv) +static void cpsw_intr_enable(struct cpsw_common *cpsw) { - __raw_writel(0xFF, &priv->wr_regs->tx_en); - __raw_writel(0xFF, &priv->wr_regs->rx_en); + __raw_writel(0xFF, &cpsw->wr_regs->tx_en); + __raw_writel(0xFF, &cpsw->wr_regs->rx_en); - cpdma_ctlr_int_ctrl(priv->dma, true); + cpdma_ctlr_int_ctrl(cpsw->dma, true); return; } -static void cpsw_intr_disable(struct cpsw_priv *priv) +static void cpsw_intr_disable(struct cpsw_common *cpsw) { - __raw_writel(0, &priv->wr_regs->tx_en); - __raw_writel(0, &priv->wr_regs->rx_en); + __raw_writel(0, &cpsw->wr_regs->tx_en); + __raw_writel(0, &cpsw->wr_regs->rx_en); - cpdma_ctlr_int_ctrl(priv->dma, false); + cpdma_ctlr_int_ctrl(cpsw->dma, false); return; } static void cpsw_tx_handler(void *token, int len, int status) { + struct netdev_queue *txq; struct sk_buff *skb = token; struct net_device *ndev = skb->dev; - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); /* Check whether the queue is stopped due to stalled tx dma, if the * queue is stopped then start the queue as we have free desc for tx */ - if (unlikely(netif_queue_stopped(ndev))) - netif_wake_queue(ndev); - cpts_tx_timestamp(priv->cpts, skb); + txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb)); + if (unlikely(netif_tx_queue_stopped(txq))) + netif_tx_wake_queue(txq); + + cpts_tx_timestamp(cpsw->cpts, skb); ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -690,22 +683,23 @@ static void cpsw_tx_handler(void *token, int len, int status) static void cpsw_rx_handler(void *token, int len, int status) { + struct cpdma_chan *ch; struct sk_buff *skb = token; struct sk_buff *new_skb; struct net_device *ndev = skb->dev; - struct cpsw_priv *priv = netdev_priv(ndev); int ret = 0; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); + cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb); if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { bool ndev_status = false; - struct cpsw_slave *slave = priv->slaves; + struct cpsw_slave *slave = cpsw->slaves; int n; - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { /* In dual emac mode check for all interfaces */ - for (n = priv->data.slaves; n; n--, slave++) + for (n = cpsw->data.slaves; n; n--, slave++) if (netif_running(slave->ndev)) ndev_status = true; } @@ -726,10 +720,11 @@ static void cpsw_rx_handler(void *token, int len, int status) return; } - new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); + new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max); if (new_skb) { + skb_copy_queue_mapping(new_skb, skb); skb_put(skb, len); - cpts_rx_timestamp(priv->cpts, skb); + cpts_rx_timestamp(cpsw->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); ndev->stats.rx_bytes += len; @@ -741,83 +736,117 @@ static void cpsw_rx_handler(void *token, int len, int status) } requeue: - ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, - skb_tailroom(new_skb), 0); + if (netif_dormant(ndev)) { + dev_kfree_skb_any(new_skb); + return; + } + + ch = cpsw->rxch[skb_get_queue_mapping(new_skb)]; + ret = cpdma_chan_submit(ch, new_skb, new_skb->data, + skb_tailroom(new_skb), 0); if (WARN_ON(ret < 0)) dev_kfree_skb_any(new_skb); } static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) { - struct cpsw_priv *priv = dev_id; + struct cpsw_common *cpsw = dev_id; - writel(0, &priv->wr_regs->tx_en); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + writel(0, &cpsw->wr_regs->tx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); - if (priv->quirk_irq) { - disable_irq_nosync(priv->irqs_table[1]); - priv->tx_irq_disabled = true; + if (cpsw->quirk_irq) { + disable_irq_nosync(cpsw->irqs_table[1]); + cpsw->tx_irq_disabled = true; } - napi_schedule(&priv->napi_tx); + napi_schedule(&cpsw->napi_tx); return IRQ_HANDLED; } static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) { - struct cpsw_priv *priv = dev_id; + struct cpsw_common *cpsw = dev_id; - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - writel(0, &priv->wr_regs->rx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); + writel(0, &cpsw->wr_regs->rx_en); - if (priv->quirk_irq) { - disable_irq_nosync(priv->irqs_table[0]); - priv->rx_irq_disabled = true; + if (cpsw->quirk_irq) { + disable_irq_nosync(cpsw->irqs_table[0]); + cpsw->rx_irq_disabled = true; } - napi_schedule(&priv->napi_rx); + napi_schedule(&cpsw->napi_rx); return IRQ_HANDLED; } static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) { - struct cpsw_priv *priv = napi_to_priv(napi_tx); - int num_tx; + u32 ch_map; + int num_tx, ch; + struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); + + /* process every unprocessed channel */ + ch_map = cpdma_ctrl_txchs_state(cpsw->dma); + for (ch = 0, num_tx = 0; num_tx < budget; ch_map >>= 1, ch++) { + if (!ch_map) { + ch_map = cpdma_ctrl_txchs_state(cpsw->dma); + if (!ch_map) + break; + + ch = 0; + } + + if (!(ch_map & 0x01)) + continue; + + num_tx += cpdma_chan_process(cpsw->txch[ch], budget - num_tx); + } - num_tx = cpdma_chan_process(priv->txch, budget); if (num_tx < budget) { napi_complete(napi_tx); - writel(0xff, &priv->wr_regs->tx_en); - if (priv->quirk_irq && priv->tx_irq_disabled) { - priv->tx_irq_disabled = false; - enable_irq(priv->irqs_table[1]); + writel(0xff, &cpsw->wr_regs->tx_en); + if (cpsw->quirk_irq && cpsw->tx_irq_disabled) { + cpsw->tx_irq_disabled = false; + enable_irq(cpsw->irqs_table[1]); } } - if (num_tx) - cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx); - return num_tx; } static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) { - struct cpsw_priv *priv = napi_to_priv(napi_rx); - int num_rx; + u32 ch_map; + int num_rx, ch; + struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); + + /* process every unprocessed channel */ + ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); + for (ch = 0, num_rx = 0; num_rx < budget; ch_map >>= 1, ch++) { + if (!ch_map) { + ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); + if (!ch_map) + break; + + ch = 0; + } + + if (!(ch_map & 0x01)) + continue; + + num_rx += cpdma_chan_process(cpsw->rxch[ch], budget - num_rx); + } - num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { napi_complete(napi_rx); - writel(0xff, &priv->wr_regs->rx_en); - if (priv->quirk_irq && priv->rx_irq_disabled) { - priv->rx_irq_disabled = false; - enable_irq(priv->irqs_table[0]); + writel(0xff, &cpsw->wr_regs->rx_en); + if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; + enable_irq(cpsw->irqs_table[0]); } } - if (num_rx) - cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx); - return num_rx; } @@ -850,17 +879,18 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, struct phy_device *phy = slave->phy; u32 mac_control = 0; u32 slave_port; + struct cpsw_common *cpsw = priv->cpsw; if (!phy) return; - slave_port = cpsw_get_slave_port(priv, slave->slave_num); + slave_port = cpsw_get_slave_port(slave->slave_num); if (phy->link) { - mac_control = priv->data.mac_control; + mac_control = cpsw->data.mac_control; /* enable forwarding */ - cpsw_ale_control_set(priv->ale, slave_port, + cpsw_ale_control_set(cpsw->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); if (phy->speed == 1000) @@ -884,7 +914,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, } else { mac_control = 0; /* disable forwarding */ - cpsw_ale_control_set(priv->ale, slave_port, + cpsw_ale_control_set(cpsw->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); } @@ -906,19 +936,19 @@ static void cpsw_adjust_link(struct net_device *ndev) if (link) { netif_carrier_on(ndev); if (netif_running(ndev)) - netif_wake_queue(ndev); + netif_tx_wake_all_queues(ndev); } else { netif_carrier_off(ndev); - netif_stop_queue(ndev); + netif_tx_stop_all_queues(ndev); } } static int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - coal->rx_coalesce_usecs = priv->coal_intvl; + coal->rx_coalesce_usecs = cpsw->coal_intvl; return 0; } @@ -931,11 +961,12 @@ static int cpsw_set_coalesce(struct net_device *ndev, u32 prescale = 0; u32 addnl_dvdr = 1; u32 coal_intvl = 0; + struct cpsw_common *cpsw = priv->cpsw; coal_intvl = coal->rx_coalesce_usecs; - int_ctrl = readl(&priv->wr_regs->int_control); - prescale = priv->bus_freq_mhz * 4; + int_ctrl = readl(&cpsw->wr_regs->int_control); + prescale = cpsw->bus_freq_mhz * 4; if (!coal->rx_coalesce_usecs) { int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN); @@ -963,53 +994,69 @@ static int cpsw_set_coalesce(struct net_device *ndev, } num_interrupts = (1000 * addnl_dvdr) / coal_intvl; - writel(num_interrupts, &priv->wr_regs->rx_imax); - writel(num_interrupts, &priv->wr_regs->tx_imax); + writel(num_interrupts, &cpsw->wr_regs->rx_imax); + writel(num_interrupts, &cpsw->wr_regs->tx_imax); int_ctrl |= CPSW_INTPACEEN; int_ctrl &= (~CPSW_INTPRESCALE_MASK); int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); update_return: - writel(int_ctrl, &priv->wr_regs->int_control); + writel(int_ctrl, &cpsw->wr_regs->int_control); cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); - if (priv->data.dual_emac) { - int i; - - for (i = 0; i < priv->data.slaves; i++) { - priv = netdev_priv(priv->slaves[i].ndev); - priv->coal_intvl = coal_intvl; - } - } else { - priv->coal_intvl = coal_intvl; - } + cpsw->coal_intvl = coal_intvl; return 0; } static int cpsw_get_sset_count(struct net_device *ndev, int sset) { + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + switch (sset) { case ETH_SS_STATS: - return CPSW_STATS_LEN; + return (CPSW_STATS_COMMON_LEN + + (cpsw->rx_ch_num + cpsw->tx_ch_num) * + CPSW_STATS_CH_LEN); default: return -EOPNOTSUPP; } } +static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir) +{ + int ch_stats_len; + int line; + int i; + + ch_stats_len = CPSW_STATS_CH_LEN * ch_num; + for (i = 0; i < ch_stats_len; i++) { + line = i % CPSW_STATS_CH_LEN; + snprintf(*p, ETH_GSTRING_LEN, + "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx", + i / CPSW_STATS_CH_LEN, + cpsw_gstrings_ch_stats[line].stat_string); + *p += ETH_GSTRING_LEN; + } +} + static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < CPSW_STATS_LEN; i++) { + for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) { memcpy(p, cpsw_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1); + cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0); break; } } @@ -1017,86 +1064,78 @@ static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) static void cpsw_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *stats, u64 *data) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpdma_chan_stats rx_stats; - struct cpdma_chan_stats tx_stats; - u32 val; u8 *p; - int i; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpdma_chan_stats ch_stats; + int i, l, ch; /* Collect Davinci CPDMA stats for Rx and Tx Channel */ - cpdma_chan_get_stats(priv->rxch, &rx_stats); - cpdma_chan_get_stats(priv->txch, &tx_stats); - - for (i = 0; i < CPSW_STATS_LEN; i++) { - switch (cpsw_gstrings_stats[i].type) { - case CPSW_STATS: - val = readl(priv->hw_stats + - cpsw_gstrings_stats[i].stat_offset); - data[i] = val; - break; - - case CPDMA_RX_STATS: - p = (u8 *)&rx_stats + - cpsw_gstrings_stats[i].stat_offset; - data[i] = *(u32 *)p; - break; + for (l = 0; l < CPSW_STATS_COMMON_LEN; l++) + data[l] = readl(cpsw->hw_stats + + cpsw_gstrings_stats[l].stat_offset); + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + cpdma_chan_get_stats(cpsw->rxch[ch], &ch_stats); + for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { + p = (u8 *)&ch_stats + + cpsw_gstrings_ch_stats[i].stat_offset; + data[l] = *(u32 *)p; + } + } - case CPDMA_TX_STATS: - p = (u8 *)&tx_stats + - cpsw_gstrings_stats[i].stat_offset; - data[i] = *(u32 *)p; - break; + for (ch = 0; ch < cpsw->tx_ch_num; ch++) { + cpdma_chan_get_stats(cpsw->txch[ch], &ch_stats); + for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { + p = (u8 *)&ch_stats + + cpsw_gstrings_ch_stats[i].stat_offset; + data[l] = *(u32 *)p; } } } -static int cpsw_common_res_usage_state(struct cpsw_priv *priv) +static int cpsw_common_res_usage_state(struct cpsw_common *cpsw) { u32 i; u32 usage_count = 0; - if (!priv->data.dual_emac) + if (!cpsw->data.dual_emac) return 0; - for (i = 0; i < priv->data.slaves; i++) - if (priv->slaves[i].open_stat) + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].open_stat) usage_count++; return usage_count; } -static inline int cpsw_tx_packet_submit(struct net_device *ndev, - struct cpsw_priv *priv, struct sk_buff *skb) +static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv, + struct sk_buff *skb, + struct cpdma_chan *txch) { - if (!priv->data.dual_emac) - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 0); + struct cpsw_common *cpsw = priv->cpsw; - if (ndev == cpsw_get_slave_ndev(priv, 0)) - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 1); - else - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 2); + return cpdma_chan_submit(txch, skb, skb->data, skb->len, + priv->emac_port + cpsw->data.dual_emac); } static inline void cpsw_add_dual_emac_def_ale_entries( struct cpsw_priv *priv, struct cpsw_slave *slave, u32 slave_port) { + struct cpsw_common *cpsw = priv->cpsw; u32 port_mask = 1 << slave_port | ALE_PORT_HOST; - if (priv->version == CPSW_VERSION_1) + if (cpsw->version == CPSW_VERSION_1) slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); else slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); - cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, + cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, port_mask, port_mask, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, slave->port_vlan, 0); - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN | + ALE_SECURE, slave->port_vlan); } static void soft_reset_slave(struct cpsw_slave *slave) @@ -1110,13 +1149,14 @@ static void soft_reset_slave(struct cpsw_slave *slave) static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) { u32 slave_port; + struct cpsw_common *cpsw = priv->cpsw; soft_reset_slave(slave); /* setup priority mapping */ __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); - switch (priv->version) { + switch (cpsw->version) { case CPSW_VERSION_1: slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); break; @@ -1128,17 +1168,17 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } /* setup max packet size, and mac address */ - __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); + __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen); cpsw_set_slave_mac(slave, priv); slave->mac_control = 0; /* no link yet */ - slave_port = cpsw_get_slave_port(priv, slave->slave_num); + slave_port = cpsw_get_slave_port(slave->slave_num); - if (priv->data.dual_emac) + if (cpsw->data.dual_emac) cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); else - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); if (slave->data->phy_node) { @@ -1168,81 +1208,121 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) phy_start(slave->phy); /* Configure GMII_SEL register */ - cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num); + cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num); } static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) { - const int vlan = priv->data.default_vlan; + struct cpsw_common *cpsw = priv->cpsw; + const int vlan = cpsw->data.default_vlan; u32 reg; int i; int unreg_mcast_mask; - reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : CPSW2_PORT_VLAN; - writel(vlan, &priv->host_port_regs->port_vlan); + writel(vlan, &cpsw->host_port_regs->port_vlan); - for (i = 0; i < priv->data.slaves; i++) - slave_write(priv->slaves + i, vlan, reg); + for (i = 0; i < cpsw->data.slaves; i++) + slave_write(cpsw->slaves + i, vlan, reg); if (priv->ndev->flags & IFF_ALLMULTI) unreg_mcast_mask = ALE_ALL_PORTS; else unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; - cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS, + cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, ALE_ALL_PORTS, unreg_mcast_mask); } static void cpsw_init_host_port(struct cpsw_priv *priv) { - u32 control_reg; u32 fifo_mode; + u32 control_reg; + struct cpsw_common *cpsw = priv->cpsw; /* soft reset the controller and initialize ale */ - soft_reset("cpsw", &priv->regs->soft_reset); - cpsw_ale_start(priv->ale); + soft_reset("cpsw", &cpsw->regs->soft_reset); + cpsw_ale_start(cpsw->ale); /* switch to vlan unaware mode */ - cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, CPSW_ALE_VLAN_AWARE); - control_reg = readl(&priv->regs->control); + control_reg = readl(&cpsw->regs->control); control_reg |= CPSW_VLAN_AWARE; - writel(control_reg, &priv->regs->control); - fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : + writel(control_reg, &cpsw->regs->control); + fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : CPSW_FIFO_NORMAL_MODE; - writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); + writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl); /* setup host port priority mapping */ __raw_writel(CPDMA_TX_PRIORITY_MAP, - &priv->host_port_regs->cpdma_tx_pri_map); - __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); + &cpsw->host_port_regs->cpdma_tx_pri_map); + __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map); - cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - if (!priv->data.dual_emac) { - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM, + if (!cpsw->data.dual_emac) { + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, 0, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2); } } -static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) +static int cpsw_fill_rx_channels(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct sk_buff *skb; + int ch_buf_num; + int ch, i, ret; + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]); + for (i = 0; i < ch_buf_num; i++) { + skb = __netdev_alloc_skb_ip_align(priv->ndev, + cpsw->rx_packet_max, + GFP_KERNEL); + if (!skb) { + cpsw_err(priv, ifup, "cannot allocate skb\n"); + return -ENOMEM; + } + + skb_set_queue_mapping(skb, ch); + ret = cpdma_chan_submit(cpsw->rxch[ch], skb, skb->data, + skb_tailroom(skb), 0); + if (ret < 0) { + cpsw_err(priv, ifup, + "cannot submit skb to channel %d rx, error %d\n", + ch, ret); + kfree_skb(skb); + return ret; + } + kmemleak_not_leak(skb); + } + + cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", + ch, ch_buf_num); + } + + return 0; +} + +static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) { u32 slave_port; - slave_port = cpsw_get_slave_port(priv, slave->slave_num); + slave_port = cpsw_get_slave_port(slave->slave_num); if (!slave->phy) return; phy_stop(slave->phy); phy_disconnect(slave->phy); slave->phy = NULL; - cpsw_ale_control_set(priv->ale, slave_port, + cpsw_ale_control_set(cpsw->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); soft_reset_slave(slave); } @@ -1250,115 +1330,111 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); - int i, ret; + struct cpsw_common *cpsw = priv->cpsw; + int ret; u32 reg; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); return ret; } - if (!cpsw_common_res_usage_state(priv)) - cpsw_intr_disable(priv); + if (!cpsw_common_res_usage_state(cpsw)) + cpsw_intr_disable(cpsw); netif_carrier_off(ndev); - reg = priv->version; + /* Notify the stack of the actual queue counts. */ + ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of tx queues\n"); + goto err_cleanup; + } + + ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of rx queues\n"); + goto err_cleanup; + } + + reg = cpsw->version; dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), CPSW_RTL_VERSION(reg)); /* initialize host and slave ports */ - if (!cpsw_common_res_usage_state(priv)) + if (!cpsw_common_res_usage_state(cpsw)) cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); /* Add default VLAN */ - if (!priv->data.dual_emac) + if (!cpsw->data.dual_emac) cpsw_add_default_vlan(priv); else - cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan, + cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); - if (!cpsw_common_res_usage_state(priv)) { - struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); - int buf_num; - + if (!cpsw_common_res_usage_state(cpsw)) { /* setup tx dma to fixed prio and zero offset */ - cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); - cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); + cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1); + cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0); /* disable priority elevation */ - __raw_writel(0, &priv->regs->ptype); + __raw_writel(0, &cpsw->regs->ptype); /* enable statistics collection only on all ports */ - __raw_writel(0x7, &priv->regs->stat_port_en); + __raw_writel(0x7, &cpsw->regs->stat_port_en); /* Enable internal fifo flow control */ - writel(0x7, &priv->regs->flow_control); + writel(0x7, &cpsw->regs->flow_control); - napi_enable(&priv_sl0->napi_rx); - napi_enable(&priv_sl0->napi_tx); + napi_enable(&cpsw->napi_rx); + napi_enable(&cpsw->napi_tx); - if (priv_sl0->tx_irq_disabled) { - priv_sl0->tx_irq_disabled = false; - enable_irq(priv->irqs_table[1]); + if (cpsw->tx_irq_disabled) { + cpsw->tx_irq_disabled = false; + enable_irq(cpsw->irqs_table[1]); } - if (priv_sl0->rx_irq_disabled) { - priv_sl0->rx_irq_disabled = false; - enable_irq(priv->irqs_table[0]); + if (cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; + enable_irq(cpsw->irqs_table[0]); } - buf_num = cpdma_chan_get_rx_buf_num(priv->dma); - for (i = 0; i < buf_num; i++) { - struct sk_buff *skb; + ret = cpsw_fill_rx_channels(priv); + if (ret < 0) + goto err_cleanup; - ret = -ENOMEM; - skb = __netdev_alloc_skb_ip_align(priv->ndev, - priv->rx_packet_max, GFP_KERNEL); - if (!skb) - goto err_cleanup; - ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), 0); - if (ret < 0) { - kfree_skb(skb); - goto err_cleanup; - } - kmemleak_not_leak(skb); - } - /* continue even if we didn't manage to submit all - * receive descs - */ - cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); - - if (cpts_register(&priv->pdev->dev, priv->cpts, - priv->data.cpts_clock_mult, - priv->data.cpts_clock_shift)) + if (cpts_register(cpsw->dev, cpsw->cpts, + cpsw->data.cpts_clock_mult, + cpsw->data.cpts_clock_shift)) dev_err(priv->dev, "error registering cpts device\n"); } /* Enable Interrupt pacing if configured */ - if (priv->coal_intvl != 0) { + if (cpsw->coal_intvl != 0) { struct ethtool_coalesce coal; - coal.rx_coalesce_usecs = priv->coal_intvl; + coal.rx_coalesce_usecs = cpsw->coal_intvl; cpsw_set_coalesce(ndev, &coal); } - cpdma_ctlr_start(priv->dma); - cpsw_intr_enable(priv); + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + + if (cpsw->data.dual_emac) + cpsw->slaves[priv->emac_port].open_stat = true; + + netif_tx_start_all_queues(ndev); - if (priv->data.dual_emac) - priv->slaves[priv->emac_port].open_stat = true; return 0; err_cleanup: - cpdma_ctlr_stop(priv->dma); - for_each_slave(priv, cpsw_slave_stop, priv); - pm_runtime_put_sync(&priv->pdev->dev); + cpdma_ctlr_stop(cpsw->dma); + for_each_slave(priv, cpsw_slave_stop, cpsw); + pm_runtime_put_sync(cpsw->dev); netif_carrier_off(priv->ndev); return ret; } @@ -1366,25 +1442,24 @@ err_cleanup: static int cpsw_ndo_stop(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; cpsw_info(priv, ifdown, "shutting down cpsw device\n"); - netif_stop_queue(priv->ndev); + netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); - if (cpsw_common_res_usage_state(priv) <= 1) { - struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); - - napi_disable(&priv_sl0->napi_rx); - napi_disable(&priv_sl0->napi_tx); - cpts_unregister(priv->cpts); - cpsw_intr_disable(priv); - cpdma_ctlr_stop(priv->dma); - cpsw_ale_stop(priv->ale); - } - for_each_slave(priv, cpsw_slave_stop, priv); - pm_runtime_put_sync(&priv->pdev->dev); - if (priv->data.dual_emac) - priv->slaves[priv->emac_port].open_stat = false; + if (cpsw_common_res_usage_state(cpsw) <= 1) { + napi_disable(&cpsw->napi_rx); + napi_disable(&cpsw->napi_tx); + cpts_unregister(cpsw->cpts); + cpsw_intr_disable(cpsw); + cpdma_ctlr_stop(cpsw->dma); + cpsw_ale_stop(cpsw->ale); + } + for_each_slave(priv, cpsw_slave_stop, cpsw); + pm_runtime_put_sync(cpsw->dev); + if (cpsw->data.dual_emac) + cpsw->slaves[priv->emac_port].open_stat = false; return 0; } @@ -1392,7 +1467,10 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); - int ret; + struct cpsw_common *cpsw = priv->cpsw; + struct netdev_queue *txq; + struct cpdma_chan *txch; + int ret, q_idx; netif_trans_update(ndev); @@ -1403,12 +1481,17 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, } if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - priv->cpts->tx_enable) + cpsw->cpts->tx_enable) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_tx_timestamp(skb); - ret = cpsw_tx_packet_submit(ndev, priv, skb); + q_idx = skb_get_queue_mapping(skb); + if (q_idx >= cpsw->tx_ch_num) + q_idx = q_idx % cpsw->tx_ch_num; + + txch = cpsw->txch[q_idx]; + ret = cpsw_tx_packet_submit(priv, skb, txch); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); goto fail; @@ -1417,24 +1500,27 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, /* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames. */ - if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) - netif_stop_queue(ndev); + if (unlikely(!cpdma_check_free_tx_desc(txch))) { + txq = netdev_get_tx_queue(ndev, q_idx); + netif_tx_stop_queue(txq); + } return NETDEV_TX_OK; fail: ndev->stats.tx_dropped++; - netif_stop_queue(ndev); + txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb)); + netif_tx_stop_queue(txq); return NETDEV_TX_BUSY; } #ifdef CONFIG_TI_CPTS -static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) +static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw) { - struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; + struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave]; u32 ts_en, seq_id; - if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { + if (!cpsw->cpts->tx_enable && !cpsw->cpts->rx_enable) { slave_write(slave, 0, CPSW1_TS_CTL); return; } @@ -1442,10 +1528,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - if (priv->cpts->tx_enable) + if (cpsw->cpts->tx_enable) ts_en |= CPSW_V1_TS_TX_EN; - if (priv->cpts->rx_enable) + if (cpsw->cpts->rx_enable) ts_en |= CPSW_V1_TS_RX_EN; slave_write(slave, ts_en, CPSW1_TS_CTL); @@ -1455,32 +1541,33 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) { struct cpsw_slave *slave; + struct cpsw_common *cpsw = priv->cpsw; u32 ctrl, mtype; - if (priv->data.dual_emac) - slave = &priv->slaves[priv->emac_port]; + if (cpsw->data.dual_emac) + slave = &cpsw->slaves[priv->emac_port]; else - slave = &priv->slaves[priv->data.active_slave]; + slave = &cpsw->slaves[cpsw->data.active_slave]; ctrl = slave_read(slave, CPSW2_CONTROL); - switch (priv->version) { + switch (cpsw->version) { case CPSW_VERSION_2: ctrl &= ~CTRL_V2_ALL_TS_MASK; - if (priv->cpts->tx_enable) + if (cpsw->cpts->tx_enable) ctrl |= CTRL_V2_TX_TS_BITS; - if (priv->cpts->rx_enable) + if (cpsw->cpts->rx_enable) ctrl |= CTRL_V2_RX_TS_BITS; break; case CPSW_VERSION_3: default: ctrl &= ~CTRL_V3_ALL_TS_MASK; - if (priv->cpts->tx_enable) + if (cpsw->cpts->tx_enable) ctrl |= CTRL_V3_TX_TS_BITS; - if (priv->cpts->rx_enable) + if (cpsw->cpts->rx_enable) ctrl |= CTRL_V3_RX_TS_BITS; break; } @@ -1489,18 +1576,19 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); slave_write(slave, ctrl, CPSW2_CONTROL); - __raw_writel(ETH_P_1588, &priv->regs->ts_ltype); + __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype); } static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct cpsw_priv *priv = netdev_priv(dev); - struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; + struct cpsw_common *cpsw = priv->cpsw; + struct cpts *cpts = cpsw->cpts; - if (priv->version != CPSW_VERSION_1 && - priv->version != CPSW_VERSION_2 && - priv->version != CPSW_VERSION_3) + if (cpsw->version != CPSW_VERSION_1 && + cpsw->version != CPSW_VERSION_2 && + cpsw->version != CPSW_VERSION_3) return -EOPNOTSUPP; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -1540,9 +1628,9 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; - switch (priv->version) { + switch (cpsw->version) { case CPSW_VERSION_1: - cpsw_hwtstamp_v1(priv); + cpsw_hwtstamp_v1(cpsw); break; case CPSW_VERSION_2: case CPSW_VERSION_3: @@ -1557,13 +1645,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) { - struct cpsw_priv *priv = netdev_priv(dev); - struct cpts *cpts = priv->cpts; + struct cpsw_common *cpsw = ndev_to_cpsw(dev); + struct cpts *cpts = cpsw->cpts; struct hwtstamp_config cfg; - if (priv->version != CPSW_VERSION_1 && - priv->version != CPSW_VERSION_2 && - priv->version != CPSW_VERSION_3) + if (cpsw->version != CPSW_VERSION_1 && + cpsw->version != CPSW_VERSION_2 && + cpsw->version != CPSW_VERSION_3) return -EOPNOTSUPP; cfg.flags = 0; @@ -1579,7 +1667,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct cpsw_priv *priv = netdev_priv(dev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); if (!netif_running(dev)) return -EINVAL; @@ -1593,27 +1682,33 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) #endif } - if (!priv->slaves[slave_no].phy) + if (!cpsw->slaves[slave_no].phy) return -EOPNOTSUPP; - return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd); + return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); } static void cpsw_ndo_tx_timeout(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ch; cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); ndev->stats.tx_errors++; - cpsw_intr_disable(priv); - cpdma_chan_stop(priv->txch); - cpdma_chan_start(priv->txch); - cpsw_intr_enable(priv); + cpsw_intr_disable(cpsw); + for (ch = 0; ch < cpsw->tx_ch_num; ch++) { + cpdma_chan_stop(cpsw->txch[ch]); + cpdma_chan_start(cpsw->txch[ch]); + } + + cpsw_intr_enable(cpsw); } static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) { struct cpsw_priv *priv = netdev_priv(ndev); struct sockaddr *addr = (struct sockaddr *)p; + struct cpsw_common *cpsw = priv->cpsw; int flags = 0; u16 vid = 0; int ret; @@ -1621,27 +1716,27 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); return ret; } - if (priv->data.dual_emac) { - vid = priv->slaves[priv->emac_port].port_vlan; + if (cpsw->data.dual_emac) { + vid = cpsw->slaves[priv->emac_port].port_vlan; flags = ALE_VLAN; } - cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM, + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, flags, vid); - cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM, + cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM, flags, vid); memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); for_each_slave(priv, cpsw_set_slave_mac, priv); - pm_runtime_put(&priv->pdev->dev); + pm_runtime_put(cpsw->dev); return 0; } @@ -1649,12 +1744,12 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) #ifdef CONFIG_NET_POLL_CONTROLLER static void cpsw_ndo_poll_controller(struct net_device *ndev) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - cpsw_intr_disable(priv); - cpsw_rx_interrupt(priv->irqs_table[0], priv); - cpsw_tx_interrupt(priv->irqs_table[1], priv); - cpsw_intr_enable(priv); + cpsw_intr_disable(cpsw); + cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); + cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); + cpsw_intr_enable(cpsw); } #endif @@ -1664,8 +1759,9 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, int ret; int unreg_mcast_mask = 0; u32 port_mask; + struct cpsw_common *cpsw = priv->cpsw; - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; if (priv->ndev->flags & IFF_ALLMULTI) @@ -1679,27 +1775,27 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; } - ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask, + ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask, unreg_mcast_mask); if (ret != 0) return ret; - ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN, vid); if (ret != 0) goto clean_vid; - ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, vid, 0); if (ret != 0) goto clean_vlan_ucast; return 0; clean_vlan_ucast: - cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN, vid); clean_vid: - cpsw_ale_del_vlan(priv->ale, vid, 0); + cpsw_ale_del_vlan(cpsw->ale, vid, 0); return ret; } @@ -1707,26 +1803,27 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; int ret; - if (vid == priv->data.default_vlan) + if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); return ret; } - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual * EMAC port separation */ int i; - for (i = 0; i < priv->data.slaves; i++) { - if (vid == priv->slaves[i].port_vlan) + for (i = 0; i < cpsw->data.slaves; i++) { + if (vid == cpsw->slaves[i].port_vlan) return -EINVAL; } } @@ -1734,7 +1831,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); ret = cpsw_add_vlan_ale_entry(priv, vid); - pm_runtime_put(&priv->pdev->dev); + pm_runtime_put(cpsw->dev); return ret; } @@ -1742,39 +1839,40 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; int ret; - if (vid == priv->data.default_vlan) + if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); return ret; } - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { int i; - for (i = 0; i < priv->data.slaves; i++) { - if (vid == priv->slaves[i].port_vlan) + for (i = 0; i < cpsw->data.slaves; i++) { + if (vid == cpsw->slaves[i].port_vlan) return -EINVAL; } } dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); - ret = cpsw_ale_del_vlan(priv->ale, vid, 0); + ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); if (ret != 0) return ret; - ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN, vid); if (ret != 0) return ret; - ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 0, ALE_VLAN, vid); - pm_runtime_put(&priv->pdev->dev); + pm_runtime_put(cpsw->dev); return ret; } @@ -1797,31 +1895,32 @@ static const struct net_device_ops cpsw_netdev_ops = { static int cpsw_get_regs_len(struct net_device *ndev) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - return priv->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32); + return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32); } static void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) { - struct cpsw_priv *priv = netdev_priv(ndev); u32 *reg = p; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); /* update CPSW IP version */ - regs->version = priv->version; + regs->version = cpsw->version; - cpsw_ale_dump(priv->ale, reg); + cpsw_ale_dump(cpsw->ale, reg); } static void cpsw_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct platform_device *pdev = to_platform_device(cpsw->dev); strlcpy(info->driver, "cpsw", sizeof(info->driver)); strlcpy(info->version, "1.0", sizeof(info->version)); - strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); + strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); } static u32 cpsw_get_msglevel(struct net_device *ndev) @@ -1840,7 +1939,7 @@ static int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) { #ifdef CONFIG_TI_CPTS - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | @@ -1849,7 +1948,7 @@ static int cpsw_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = priv->cpts->phc_index; + info->phc_index = cpsw->cpts->phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1872,10 +1971,11 @@ static int cpsw_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); - if (priv->slaves[slave_no].phy) - return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_gset(cpsw->slaves[slave_no].phy, ecmd); else return -EOPNOTSUPP; } @@ -1883,10 +1983,11 @@ static int cpsw_get_settings(struct net_device *ndev, static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); - if (priv->slaves[slave_no].phy) - return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_sset(cpsw->slaves[slave_no].phy, ecmd); else return -EOPNOTSUPP; } @@ -1894,22 +1995,24 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); wol->supported = 0; wol->wolopts = 0; - if (priv->slaves[slave_no].phy) - phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol); + if (cpsw->slaves[slave_no].phy) + phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol); } static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); - if (priv->slaves[slave_no].phy) - return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol); + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol); else return -EOPNOTSUPP; } @@ -1940,12 +2043,13 @@ static int cpsw_set_pauseparam(struct net_device *ndev, static int cpsw_ethtool_op_begin(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; int ret; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); } return ret; @@ -1956,11 +2060,185 @@ static void cpsw_ethtool_op_complete(struct net_device *ndev) struct cpsw_priv *priv = netdev_priv(ndev); int ret; - ret = pm_runtime_put(&priv->pdev->dev); + ret = pm_runtime_put(priv->cpsw->dev); if (ret < 0) cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); } +static void cpsw_get_channels(struct net_device *ndev, + struct ethtool_channels *ch) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + ch->max_combined = 0; + ch->max_rx = CPSW_MAX_QUEUES; + ch->max_tx = CPSW_MAX_QUEUES; + ch->max_other = 0; + ch->other_count = 0; + ch->rx_count = cpsw->rx_ch_num; + ch->tx_count = cpsw->tx_ch_num; + ch->combined_count = 0; +} + +static int cpsw_check_ch_settings(struct cpsw_common *cpsw, + struct ethtool_channels *ch) +{ + if (ch->combined_count) + return -EINVAL; + + /* verify we have at least one channel in each direction */ + if (!ch->rx_count || !ch->tx_count) + return -EINVAL; + + if (ch->rx_count > cpsw->data.channels || + ch->tx_count > cpsw->data.channels) + return -EINVAL; + + return 0; +} + +static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) +{ + int (*poll)(struct napi_struct *, int); + struct cpsw_common *cpsw = priv->cpsw; + void (*handler)(void *, int, int); + struct cpdma_chan **chan; + int ret, *ch; + + if (rx) { + ch = &cpsw->rx_ch_num; + chan = cpsw->rxch; + handler = cpsw_rx_handler; + poll = cpsw_rx_poll; + } else { + ch = &cpsw->tx_ch_num; + chan = cpsw->txch; + handler = cpsw_tx_handler; + poll = cpsw_tx_poll; + } + + while (*ch < ch_num) { + chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx); + + if (IS_ERR(chan[*ch])) + return PTR_ERR(chan[*ch]); + + if (!chan[*ch]) + return -EINVAL; + + cpsw_info(priv, ifup, "created new %d %s channel\n", *ch, + (rx ? "rx" : "tx")); + (*ch)++; + } + + while (*ch > ch_num) { + (*ch)--; + + ret = cpdma_chan_destroy(chan[*ch]); + if (ret) + return ret; + + cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch, + (rx ? "rx" : "tx")); + } + + return 0; +} + +static int cpsw_update_channels(struct cpsw_priv *priv, + struct ethtool_channels *ch) +{ + int ret; + + ret = cpsw_update_channels_res(priv, ch->rx_count, 1); + if (ret) + return ret; + + ret = cpsw_update_channels_res(priv, ch->tx_count, 0); + if (ret) + return ret; + + return 0; +} + +static int cpsw_set_channels(struct net_device *ndev, + struct ethtool_channels *chs) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + ret = cpsw_check_ch_settings(cpsw, chs); + if (ret < 0) + return ret; + + /* Disable NAPI scheduling */ + cpsw_intr_disable(cpsw); + + /* Stop all transmit queues for every network device. + * Disable re-using rx descriptors with dormant_on. + */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + + netif_tx_stop_all_queues(slave->ndev); + netif_dormant_on(slave->ndev); + } + + /* Handle rest of tx packets and stop cpdma channels */ + cpdma_ctlr_stop(cpsw->dma); + ret = cpsw_update_channels(priv, chs); + if (ret) + goto err; + + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + + /* Inform stack about new count of queues */ + ret = netif_set_real_num_tx_queues(slave->ndev, + cpsw->tx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of tx queues\n"); + goto err; + } + + ret = netif_set_real_num_rx_queues(slave->ndev, + cpsw->rx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of rx queues\n"); + goto err; + } + + /* Enable rx packets handling */ + netif_dormant_off(slave->ndev); + } + + if (cpsw_common_res_usage_state(cpsw)) { + ret = cpsw_fill_rx_channels(priv); + if (ret) + goto err; + + /* After this receive is started */ + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + } + + /* Resume transmit for every affected interface */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + netif_tx_start_all_queues(slave->ndev); + } + return 0; +err: + dev_err(priv->dev, "cannot update channels number, closing device\n"); + dev_close(ndev); + return ret; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -1982,14 +2260,16 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_regs = cpsw_get_regs, .begin = cpsw_ethtool_op_begin, .complete = cpsw_ethtool_op_complete, + .get_channels = cpsw_get_channels, + .set_channels = cpsw_set_channels, }; -static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, +static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw, u32 slave_reg_ofs, u32 sliver_reg_ofs) { - void __iomem *regs = priv->regs; + void __iomem *regs = cpsw->regs; int slave_num = slave->slave_num; - struct cpsw_slave_data *data = priv->data.slave_data + slave_num; + struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num; slave->data = data; slave->regs = regs + slave_reg_ofs; @@ -2160,71 +2440,50 @@ no_phy_slave: return 0; } -static int cpsw_probe_dual_emac(struct platform_device *pdev, - struct cpsw_priv *priv) +static int cpsw_probe_dual_emac(struct cpsw_priv *priv) { - struct cpsw_platform_data *data = &priv->data; + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_platform_data *data = &cpsw->data; struct net_device *ndev; struct cpsw_priv *priv_sl2; - int ret = 0, i; + int ret = 0; - ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); if (!ndev) { - dev_err(&pdev->dev, "cpsw: error allocating net_device\n"); + dev_err(cpsw->dev, "cpsw: error allocating net_device\n"); return -ENOMEM; } priv_sl2 = netdev_priv(ndev); - priv_sl2->data = *data; - priv_sl2->pdev = pdev; + priv_sl2->cpsw = cpsw; priv_sl2->ndev = ndev; priv_sl2->dev = &ndev->dev; priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); - priv_sl2->rx_packet_max = max(rx_packet_max, 128); if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, ETH_ALEN); - dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); + dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n", + priv_sl2->mac_addr); } else { random_ether_addr(priv_sl2->mac_addr); - dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); + dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", + priv_sl2->mac_addr); } memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); - priv_sl2->slaves = priv->slaves; - priv_sl2->clk = priv->clk; - - priv_sl2->coal_intvl = 0; - priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; - - priv_sl2->regs = priv->regs; - priv_sl2->host_port_regs = priv->host_port_regs; - priv_sl2->wr_regs = priv->wr_regs; - priv_sl2->hw_stats = priv->hw_stats; - priv_sl2->dma = priv->dma; - priv_sl2->txch = priv->txch; - priv_sl2->rxch = priv->rxch; - priv_sl2->ale = priv->ale; priv_sl2->emac_port = 1; - priv->slaves[1].ndev = ndev; - priv_sl2->cpts = priv->cpts; - priv_sl2->version = priv->version; - - for (i = 0; i < priv->num_irqs; i++) { - priv_sl2->irqs_table[i] = priv->irqs_table[i]; - priv_sl2->num_irqs = priv->num_irqs; - } + cpsw->slaves[1].ndev = ndev; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; /* register the network device */ - SET_NETDEV_DEV(ndev, &pdev->dev); + SET_NETDEV_DEV(ndev, cpsw->dev); ret = register_netdev(ndev); if (ret) { - dev_err(&pdev->dev, "cpsw: error registering net device\n"); + dev_err(cpsw->dev, "cpsw: error registering net device\n"); free_netdev(ndev); ret = -ENODEV; } @@ -2272,6 +2531,7 @@ MODULE_DEVICE_TABLE(of, cpsw_of_mtable); static int cpsw_probe(struct platform_device *pdev) { + struct clk *clk; struct cpsw_platform_data *data; struct net_device *ndev; struct cpsw_priv *priv; @@ -2282,10 +2542,14 @@ static int cpsw_probe(struct platform_device *pdev) const struct of_device_id *of_id; struct gpio_descs *mode; u32 slave_offset, sliver_offset, slave_size; + struct cpsw_common *cpsw; int ret = 0, i; int irq; - ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); + cpsw->dev = &pdev->dev; + + ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); if (!ndev) { dev_err(&pdev->dev, "error allocating net_device\n"); return -ENOMEM; @@ -2293,13 +2557,13 @@ static int cpsw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); - priv->pdev = pdev; + priv->cpsw = cpsw; priv->ndev = ndev; priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); - priv->rx_packet_max = max(rx_packet_max, 128); - priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); - if (!priv->cpts) { + cpsw->rx_packet_max = max(rx_packet_max, 128); + cpsw->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); + if (!cpsw->cpts) { dev_err(&pdev->dev, "error allocating cpts\n"); ret = -ENOMEM; goto clean_ndev_ret; @@ -2320,12 +2584,14 @@ static int cpsw_probe(struct platform_device *pdev) /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (cpsw_probe_dt(&priv->data, pdev)) { + if (cpsw_probe_dt(&cpsw->data, pdev)) { dev_err(&pdev->dev, "cpsw: platform data missing\n"); ret = -ENODEV; goto clean_runtime_disable_ret; } - data = &priv->data; + data = &cpsw->data; + cpsw->rx_ch_num = 1; + cpsw->tx_ch_num = 1; if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); @@ -2337,27 +2603,26 @@ static int cpsw_probe(struct platform_device *pdev) memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); - priv->slaves = devm_kzalloc(&pdev->dev, + cpsw->slaves = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_slave) * data->slaves, GFP_KERNEL); - if (!priv->slaves) { + if (!cpsw->slaves) { ret = -ENOMEM; goto clean_runtime_disable_ret; } for (i = 0; i < data->slaves; i++) - priv->slaves[i].slave_num = i; + cpsw->slaves[i].slave_num = i; - priv->slaves[0].ndev = ndev; + cpsw->slaves[0].ndev = ndev; priv->emac_port = 0; - priv->clk = devm_clk_get(&pdev->dev, "fck"); - if (IS_ERR(priv->clk)) { + clk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(clk)) { dev_err(priv->dev, "fck is not found\n"); ret = -ENODEV; goto clean_runtime_disable_ret; } - priv->coal_intvl = 0; - priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; + cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); @@ -2365,7 +2630,7 @@ static int cpsw_probe(struct platform_device *pdev) ret = PTR_ERR(ss_regs); goto clean_runtime_disable_ret; } - priv->regs = ss_regs; + cpsw->regs = ss_regs; /* Need to enable clocks with runtime PM api to access module * registers @@ -2375,24 +2640,24 @@ static int cpsw_probe(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); goto clean_runtime_disable_ret; } - priv->version = readl(&priv->regs->id_ver); + cpsw->version = readl(&cpsw->regs->id_ver); pm_runtime_put_sync(&pdev->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->wr_regs)) { - ret = PTR_ERR(priv->wr_regs); + cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cpsw->wr_regs)) { + ret = PTR_ERR(cpsw->wr_regs); goto clean_runtime_disable_ret; } memset(&dma_params, 0, sizeof(dma_params)); memset(&ale_params, 0, sizeof(ale_params)); - switch (priv->version) { + switch (cpsw->version) { case CPSW_VERSION_1: - priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; - priv->hw_stats = ss_regs + CPSW1_HW_STATS; + cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; + cpsw->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + cpsw->hw_stats = ss_regs + CPSW1_HW_STATS; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; @@ -2404,9 +2669,9 @@ static int cpsw_probe(struct platform_device *pdev) case CPSW_VERSION_2: case CPSW_VERSION_3: case CPSW_VERSION_4: - priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; - priv->hw_stats = ss_regs + CPSW2_HW_STATS; + cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; + cpsw->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + cpsw->hw_stats = ss_regs + CPSW2_HW_STATS; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; @@ -2417,13 +2682,14 @@ static int cpsw_probe(struct platform_device *pdev) (u32 __force) ss_res->start + CPSW2_BD_OFFSET; break; default: - dev_err(priv->dev, "unknown version 0x%08x\n", priv->version); + dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); ret = -ENODEV; goto clean_runtime_disable_ret; } - for (i = 0; i < priv->data.slaves; i++) { - struct cpsw_slave *slave = &priv->slaves[i]; - cpsw_slave_init(slave, priv, slave_offset, sliver_offset); + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + + cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset); slave_offset += slave_size; sliver_offset += SLIVER_SIZE; } @@ -2443,19 +2709,16 @@ static int cpsw_probe(struct platform_device *pdev) dma_params.has_ext_regs = true; dma_params.desc_hw_addr = dma_params.desc_mem_phys; - priv->dma = cpdma_ctlr_create(&dma_params); - if (!priv->dma) { + cpsw->dma = cpdma_ctlr_create(&dma_params); + if (!cpsw->dma) { dev_err(priv->dev, "error initializing dma\n"); ret = -ENOMEM; goto clean_runtime_disable_ret; } - priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), - cpsw_tx_handler); - priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0), - cpsw_rx_handler); - - if (WARN_ON(!priv->txch || !priv->rxch)) { + cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); + cpsw->rxch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); + if (WARN_ON(!cpsw->rxch[0] || !cpsw->txch[0])) { dev_err(priv->dev, "error initializing dma channels\n"); ret = -ENOMEM; goto clean_dma_ret; @@ -2466,8 +2729,8 @@ static int cpsw_probe(struct platform_device *pdev) ale_params.ale_entries = data->ale_entries; ale_params.ale_ports = data->slaves; - priv->ale = cpsw_ale_create(&ale_params); - if (!priv->ale) { + cpsw->ale = cpsw_ale_create(&ale_params); + if (!cpsw->ale) { dev_err(priv->dev, "error initializing ale engine\n"); ret = -ENODEV; goto clean_dma_ret; @@ -2484,7 +2747,7 @@ static int cpsw_probe(struct platform_device *pdev) if (of_id) { pdev->id_entry = of_id->data; if (pdev->id_entry->driver_data) - priv->quirk_irq = true; + cpsw->quirk_irq = true; } /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and @@ -2502,9 +2765,9 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - priv->irqs_table[0] = irq; + cpsw->irqs_table[0] = irq; ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt, - 0, dev_name(&pdev->dev), priv); + 0, dev_name(&pdev->dev), cpsw); if (ret < 0) { dev_err(priv->dev, "error attaching irq (%d)\n", ret); goto clean_ale_ret; @@ -2517,21 +2780,20 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - priv->irqs_table[1] = irq; + cpsw->irqs_table[1] = irq; ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt, - 0, dev_name(&pdev->dev), priv); + 0, dev_name(&pdev->dev), cpsw); if (ret < 0) { dev_err(priv->dev, "error attaching irq (%d)\n", ret); goto clean_ale_ret; } - priv->num_irqs = 2; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); - netif_tx_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); + netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); @@ -2545,8 +2807,8 @@ static int cpsw_probe(struct platform_device *pdev) cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", &ss_res->start, ndev->irq); - if (priv->data.dual_emac) { - ret = cpsw_probe_dual_emac(pdev, priv); + if (cpsw->data.dual_emac) { + ret = cpsw_probe_dual_emac(priv); if (ret) { cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); goto clean_ale_ret; @@ -2556,9 +2818,9 @@ static int cpsw_probe(struct platform_device *pdev) return 0; clean_ale_ret: - cpsw_ale_destroy(priv->ale); + cpsw_ale_destroy(cpsw->ale); clean_dma_ret: - cpdma_ctlr_destroy(priv->dma); + cpdma_ctlr_destroy(cpsw->dma); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); clean_ndev_ret: @@ -2569,7 +2831,7 @@ clean_ndev_ret: static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); int ret; ret = pm_runtime_get_sync(&pdev->dev); @@ -2578,17 +2840,17 @@ static int cpsw_remove(struct platform_device *pdev) return ret; } - if (priv->data.dual_emac) - unregister_netdev(cpsw_get_slave_ndev(priv, 1)); + if (cpsw->data.dual_emac) + unregister_netdev(cpsw->slaves[1].ndev); unregister_netdev(ndev); - cpsw_ale_destroy(priv->ale); - cpdma_ctlr_destroy(priv->dma); + cpsw_ale_destroy(cpsw->ale); + cpdma_ctlr_destroy(cpsw->dma); of_platform_depopulate(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - if (priv->data.dual_emac) - free_netdev(cpsw_get_slave_ndev(priv, 1)); + if (cpsw->data.dual_emac) + free_netdev(cpsw->slaves[1].ndev); free_netdev(ndev); return 0; } @@ -2598,14 +2860,14 @@ static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { int i; - for (i = 0; i < priv->data.slaves; i++) { - if (netif_running(priv->slaves[i].ndev)) - cpsw_ndo_stop(priv->slaves[i].ndev); + for (i = 0; i < cpsw->data.slaves; i++) { + if (netif_running(cpsw->slaves[i].ndev)) + cpsw_ndo_stop(cpsw->slaves[i].ndev); } } else { if (netif_running(ndev)) @@ -2613,7 +2875,7 @@ static int cpsw_suspend(struct device *dev) } /* Select sleep pin state */ - pinctrl_pm_select_sleep_state(&pdev->dev); + pinctrl_pm_select_sleep_state(dev); return 0; } @@ -2622,17 +2884,17 @@ static int cpsw_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = netdev_priv(ndev); /* Select default pin state */ - pinctrl_pm_select_default_state(&pdev->dev); + pinctrl_pm_select_default_state(dev); - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { int i; - for (i = 0; i < priv->data.slaves; i++) { - if (netif_running(priv->slaves[i].ndev)) - cpsw_ndo_open(priv->slaves[i].ndev); + for (i = 0; i < cpsw->data.slaves; i++) { + if (netif_running(cpsw->slaves[i].ndev)) + cpsw_ndo_open(cpsw->slaves[i].ndev); } } else { if (netif_running(ndev)) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 19e5f32a8a64..c3f35f11a8fd 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -86,7 +86,7 @@ struct cpdma_desc_pool { void __iomem *iomap; /* ioremap map */ void *cpumap; /* dma_alloc map */ int desc_size, mem_size; - int num_desc, used_desc; + int num_desc; struct device *dev; struct gen_pool *gen_pool; }; @@ -104,6 +104,7 @@ struct cpdma_ctlr { struct cpdma_desc_pool *pool; spinlock_t lock; struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; + int chan_num; }; struct cpdma_chan { @@ -123,6 +124,13 @@ struct cpdma_chan { int int_set, int_clear, td; }; +#define tx_chan_num(chan) (chan) +#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) +#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) +#define is_tx_chan(chan) (!is_rx_chan(chan)) +#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) +#define chan_linear(chan) __chan_linear((chan)->chan_num) + /* The following make access to common cpdma_ctlr params more readable */ #define dmaregs params.dmaregs #define num_chan params.num_chan @@ -148,7 +156,10 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) if (!pool) return; - WARN_ON(pool->used_desc); + WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), + "cpdma_desc_pool size %d != avail %d", + gen_pool_size(pool->gen_pool), + gen_pool_avail(pool->gen_pool)); if (pool->cpumap) dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, pool->phys); @@ -232,21 +243,14 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) static struct cpdma_desc __iomem * cpdma_desc_alloc(struct cpdma_desc_pool *pool) { - struct cpdma_desc __iomem *desc = NULL; - - desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool, - pool->desc_size); - if (desc) - pool->used_desc++; - - return desc; + return (struct cpdma_desc __iomem *) + gen_pool_alloc(pool->gen_pool, pool->desc_size); } static void cpdma_desc_free(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc, int num_desc) { gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); - pool->used_desc--; } struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) @@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->state = CPDMA_STATE_IDLE; ctlr->params = *params; ctlr->dev = params->dev; + ctlr->chan_num = 0; spin_lock_init(&ctlr->lock); ctlr->pool = cpdma_desc_pool_create(ctlr->dev, @@ -336,12 +341,14 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) } ctlr->state = CPDMA_STATE_TEARDOWN; + spin_unlock_irqrestore(&ctlr->lock, flags); for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_stop(ctlr->channels[i]); } + spin_lock_irqsave(&ctlr->lock, flags); dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); @@ -403,13 +410,52 @@ void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) } EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); +u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) +{ + return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); +} +EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state); + +u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) +{ + return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); +} +EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state); + +/** + * cpdma_chan_split_pool - Splits ctrl pool between all channels. + * Has to be called under ctlr lock + */ +static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) +{ + struct cpdma_desc_pool *pool = ctlr->pool; + struct cpdma_chan *chan; + int ch_desc_num; + int i; + + if (!ctlr->chan_num) + return; + + /* calculate average size of pool slice */ + ch_desc_num = pool->num_desc / ctlr->chan_num; + + /* split ctlr pool */ + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { + chan = ctlr->channels[i]; + if (chan) + chan->desc_num = ch_desc_num; + } +} + struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, - cpdma_handler_fn handler) + cpdma_handler_fn handler, int rx_type) { + int offset = chan_num * 4; struct cpdma_chan *chan; - int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; unsigned long flags; + chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num); + if (__chan_linear(chan_num) >= ctlr->num_chan) return NULL; @@ -451,14 +497,25 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, spin_lock_init(&chan->lock); ctlr->channels[chan_num] = chan; + ctlr->chan_num++; + + cpdma_chan_split_pool(ctlr); + spin_unlock_irqrestore(&ctlr->lock, flags); return chan; } EXPORT_SYMBOL_GPL(cpdma_chan_create); -int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr) +int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) { - return ctlr->pool->num_desc / 2; + unsigned long flags; + int desc_num; + + spin_lock_irqsave(&chan->lock, flags); + desc_num = chan->desc_num; + spin_unlock_irqrestore(&chan->lock, flags); + + return desc_num; } EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); @@ -475,6 +532,10 @@ int cpdma_chan_destroy(struct cpdma_chan *chan) if (chan->state != CPDMA_STATE_IDLE) cpdma_chan_stop(chan); ctlr->channels[chan->chan_num] = NULL; + ctlr->chan_num--; + + cpdma_chan_split_pool(ctlr); + spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index 4b46cd6e9a3f..a07b22b12bc1 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -17,13 +17,6 @@ #define CPDMA_MAX_CHANNELS BITS_PER_LONG -#define tx_chan_num(chan) (chan) -#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) -#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) -#define is_tx_chan(chan) (!is_rx_chan(chan)) -#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) -#define chan_linear(chan) __chan_linear((chan)->chan_num) - #define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7) #define CPDMA_EOI_RX_THRESH 0x0 @@ -79,8 +72,8 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, - cpdma_handler_fn handler); -int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr); + cpdma_handler_fn handler, int rx_type); +int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan); int cpdma_chan_destroy(struct cpdma_chan *chan); int cpdma_chan_start(struct cpdma_chan *chan); int cpdma_chan_stop(struct cpdma_chan *chan); @@ -94,6 +87,8 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); +u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr); +u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan); enum cpdma_control { diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 727a79f3c7dd..2fd94a5bc1f3 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -597,14 +597,14 @@ static u32 hash_get(u8 *addr) } /** - * hash_add - Hash function to add mac addr from hash table + * emac_hash_add - Hash function to add mac addr from hash table * @priv: The DaVinci EMAC private adapter structure * @mac_addr: mac address to delete from hash table * * Adds mac address to the internal hash table * */ -static int hash_add(struct emac_priv *priv, u8 *mac_addr) +static int emac_hash_add(struct emac_priv *priv, u8 *mac_addr) { struct device *emac_dev = &priv->ndev->dev; u32 rc = 0; @@ -613,7 +613,7 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr) if (hash_value >= EMAC_NUM_MULTICAST_BITS) { if (netif_msg_drv(priv)) { - dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\ + dev_err(emac_dev, "DaVinci EMAC: emac_hash_add(): Invalid "\ "Hash %08x, should not be greater than %08x", hash_value, (EMAC_NUM_MULTICAST_BITS - 1)); } @@ -639,14 +639,14 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr) } /** - * hash_del - Hash function to delete mac addr from hash table + * emac_hash_del - Hash function to delete mac addr from hash table * @priv: The DaVinci EMAC private adapter structure * @mac_addr: mac address to delete from hash table * * Removes mac address from the internal hash table * */ -static int hash_del(struct emac_priv *priv, u8 *mac_addr) +static int emac_hash_del(struct emac_priv *priv, u8 *mac_addr) { u32 hash_value; u32 hash_bit; @@ -696,10 +696,10 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr) switch (action) { case EMAC_MULTICAST_ADD: - update = hash_add(priv, mac_addr); + update = emac_hash_add(priv, mac_addr); break; case EMAC_MULTICAST_DEL: - update = hash_del(priv, mac_addr); + update = emac_hash_del(priv, mac_addr); break; case EMAC_ALL_MULTI_SET: update = 1; @@ -1870,10 +1870,10 @@ static int davinci_emac_probe(struct platform_device *pdev) goto no_pdata; } - priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), - emac_tx_handler); - priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH), - emac_rx_handler); + priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH, + emac_tx_handler, 0); + priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, + emac_rx_handler, 1); if (WARN_ON(!priv->txchan || !priv->rxchan)) { rc = -ENOMEM; goto no_cpdma_chan; diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 9006877c53f2..e46b1ebbbff4 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -97,7 +97,6 @@ static struct acpi_driver fjes_acpi_driver = { static struct platform_driver fjes_driver = { .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, }, .probe = fjes_probe, .remove = fjes_remove, diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 591af71eae56..284b97b6b258 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -84,8 +84,6 @@ struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */ #define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40 #define ITAB_NUM 128 -#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 -extern u8 netvsc_hash_key[]; struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */ struct ndis_obj_header hdr; @@ -175,7 +173,7 @@ struct rndis_device { struct rndis_message; struct netvsc_device; int netvsc_device_add(struct hv_device *device, void *additional_info); -int netvsc_device_remove(struct hv_device *device); +void netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, @@ -490,6 +488,7 @@ struct nvsp_2_vsc_capability { u64 sriov:1; u64 ieee8021q:1; u64 correlation_id:1; + u64 teaming:1; }; }; } __packed; @@ -633,12 +632,34 @@ struct multi_send_data { u32 count; /* counter of batched packets */ }; +struct recv_comp_data { + u64 tid; /* transaction id */ + u32 status; +}; + +/* Netvsc Receive Slots Max */ +#define NETVSC_RECVSLOT_MAX (NETVSC_RECEIVE_BUFFER_SIZE / ETH_DATA_LEN + 1) + +struct multi_recv_comp { + void *buf; /* queued receive completions */ + u32 first; /* first data entry */ + u32 next; /* next entry for writing */ +}; + struct netvsc_stats { u64 packets; u64 bytes; struct u64_stats_sync syncp; }; +struct netvsc_ethtool_stats { + unsigned long tx_scattered; + unsigned long tx_no_memory; + unsigned long tx_no_space; + unsigned long tx_too_big; + unsigned long tx_busy; +}; + struct netvsc_reconfig { struct list_head list; u32 event; @@ -668,6 +689,7 @@ struct net_device_context { /* Ethtool settings */ u8 duplex; u32 speed; + struct netvsc_ethtool_stats eth_stats; /* the device is going away */ bool start_remove; @@ -735,6 +757,9 @@ struct netvsc_device { u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ + struct multi_recv_comp mrc[VRSS_CHANNEL_MAX]; + atomic_t num_outstanding_recvs; + atomic_t open_cnt; }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 410fb8e81376..2a9ccc4d9e3c 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -34,6 +34,89 @@ #include "hyperv_net.h" /* + * An API to support in-place processing of incoming VMBUS packets. + */ +#define VMBUS_PKT_TRAILER 8 + +static struct vmpacket_descriptor * +get_next_pkt_raw(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + void *ring_buffer = hv_get_ring_buffer(ring_info); + struct vmpacket_descriptor *cur_desc; + u32 packetlen; + u32 dsize = ring_info->ring_datasize; + u32 delta = read_loc - ring_info->ring_buffer->read_index; + u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); + + if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) + return NULL; + + if ((read_loc + sizeof(*cur_desc)) > dsize) + return NULL; + + cur_desc = ring_buffer + read_loc; + packetlen = cur_desc->len8 << 3; + + /* + * If the packet under consideration is wrapping around, + * return failure. + */ + if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) + return NULL; + + return cur_desc; +} + +/* + * A helper function to step through packets "in-place" + * This API is to be called after each successful call + * get_next_pkt_raw(). + */ +static void put_pkt_raw(struct vmbus_channel *channel, + struct vmpacket_descriptor *desc) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + u32 packetlen = desc->len8 << 3; + u32 dsize = ring_info->ring_datasize; + + BUG_ON((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize); + + /* + * Include the packet trailer. + */ + ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; +} + +/* + * This call commits the read index and potentially signals the host. + * Here is the pattern for using the "in-place" consumption APIs: + * + * while (get_next_pkt_raw() { + * process the packet "in-place"; + * put_pkt_raw(); + * } + * if (packets processed in place) + * commit_rd_index(); + */ +static void commit_rd_index(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + /* + * Make sure all reads are done before we update the read index since + * the writer may start writing to the read area once the read index + * is updated. + */ + virt_rmb(); + ring_info->ring_buffer->read_index = ring_info->priv_read_index; + + if (hv_need_to_signal_on_read(ring_info)) + vmbus_set_event(channel); +} + +/* * Switch the data path from the synthetic interface to the VF * interface. */ @@ -59,7 +142,6 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) VM_PKT_DATA_INBAND, 0); } - static struct netvsc_device *alloc_net_device(void) { struct netvsc_device *net_device; @@ -74,17 +156,26 @@ static struct netvsc_device *alloc_net_device(void) return NULL; } + net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX * + sizeof(struct recv_comp_data)); + init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; + init_completion(&net_device->channel_init_wait); return net_device; } static void free_netvsc_device(struct netvsc_device *nvdev) { + int i; + + for (i = 0; i < VRSS_CHANNEL_MAX; i++) + vfree(nvdev->mrc[i].buf); + kfree(nvdev->cb_buffer); kfree(nvdev); } @@ -107,20 +198,20 @@ static struct netvsc_device *get_inbound_net_device(struct hv_device *device) goto get_in_err; if (net_device->destroy && - atomic_read(&net_device->num_outstanding_sends) == 0) + atomic_read(&net_device->num_outstanding_sends) == 0 && + atomic_read(&net_device->num_outstanding_recvs) == 0) net_device = NULL; get_in_err: return net_device; } - -static int netvsc_destroy_buf(struct hv_device *device) +static void netvsc_destroy_buf(struct hv_device *device) { struct nvsp_message *revoke_packet; - int ret = 0; struct net_device *ndev = hv_get_drvdata(device); struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); + int ret; /* * If we got a section count, it means we received a @@ -150,7 +241,7 @@ static int netvsc_destroy_buf(struct hv_device *device) if (ret != 0) { netdev_err(ndev, "unable to send " "revoke receive buffer to netvsp\n"); - return ret; + return; } } @@ -165,7 +256,7 @@ static int netvsc_destroy_buf(struct hv_device *device) if (ret != 0) { netdev_err(ndev, "unable to teardown receive buffer's gpadl\n"); - return ret; + return; } net_device->recv_buf_gpadl_handle = 0; } @@ -209,7 +300,7 @@ static int netvsc_destroy_buf(struct hv_device *device) if (ret != 0) { netdev_err(ndev, "unable to send " "revoke send buffer to netvsp\n"); - return ret; + return; } } /* Teardown the gpadl on the vsp end */ @@ -223,7 +314,7 @@ static int netvsc_destroy_buf(struct hv_device *device) if (ret != 0) { netdev_err(ndev, "unable to teardown send buffer's gpadl\n"); - return ret; + return; } net_device->send_buf_gpadl_handle = 0; } @@ -233,8 +324,6 @@ static int netvsc_destroy_buf(struct hv_device *device) net_device->send_buf = NULL; } kfree(net_device->send_section_map); - - return ret; } static int netvsc_init_buf(struct hv_device *device) @@ -276,7 +365,6 @@ static int netvsc_init_buf(struct hv_device *device) goto cleanup; } - /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; @@ -403,7 +491,7 @@ static int netvsc_init_buf(struct hv_device *device) /* Section count is simply the size divided by the section size. */ net_device->send_section_cnt = - net_device->send_buf_size/net_device->send_section_size; + net_device->send_buf_size / net_device->send_section_size; dev_info(&device->device, "Send section size: %d, Section count:%d\n", net_device->send_section_size, net_device->send_section_cnt); @@ -412,8 +500,8 @@ static int netvsc_init_buf(struct hv_device *device) net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); - net_device->send_section_map = - kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); + net_device->send_section_map = kcalloc(net_device->map_words, + sizeof(ulong), GFP_KERNEL); if (net_device->send_section_map == NULL) { ret = -ENOMEM; goto cleanup; @@ -428,7 +516,6 @@ exit: return ret; } - /* Negotiate NVSP protocol version */ static int negotiate_nvsp_ver(struct hv_device *device, struct netvsc_device *net_device, @@ -468,9 +555,13 @@ static int negotiate_nvsp_ver(struct hv_device *device, init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; - if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) + if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) { init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; + /* Teaming bit is needed to receive link speed updates */ + init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; + } + ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, @@ -485,9 +576,10 @@ static int netvsc_connect_vsp(struct hv_device *device) struct netvsc_device *net_device; struct nvsp_message *init_packet; int ndis_version; - u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, + const u32 ver_list[] = { + NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 }; - int i, num_ver = 4; /* number of different NVSP versions */ + int i; net_device = get_outbound_net_device(device); if (!net_device) @@ -496,7 +588,7 @@ static int netvsc_connect_vsp(struct hv_device *device) init_packet = &net_device->channel_init_pkt; /* Negotiate the latest NVSP protocol supported */ - for (i = num_ver - 1; i >= 0; i--) + for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--) if (negotiate_nvsp_ver(device, net_device, init_packet, ver_list[i]) == 0) { net_device->nvsp_version = ver_list[i]; @@ -555,7 +647,7 @@ static void netvsc_disconnect_vsp(struct hv_device *device) /* * netvsc_device_remove - Callback when the root bus device is removed */ -int netvsc_device_remove(struct hv_device *device) +void netvsc_device_remove(struct hv_device *device) { struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); @@ -577,10 +669,8 @@ int netvsc_device_remove(struct hv_device *device) /* Release all resources */ vfree(net_device->sub_cb_buf); free_netvsc_device(net_device); - return 0; } - #define RING_AVAIL_PERCENT_HIWATER 20 #define RING_AVAIL_PERCENT_LOWATER 10 @@ -604,72 +694,79 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device, sync_change_bit(index, net_device->send_section_map); } +static void netvsc_send_tx_complete(struct netvsc_device *net_device, + struct vmbus_channel *incoming_channel, + struct hv_device *device, + struct vmpacket_descriptor *packet) +{ + struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id; + struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct vmbus_channel *channel = device->channel; + int num_outstanding_sends; + u16 q_idx = 0; + int queue_sends; + + /* Notify the layer above us */ + if (likely(skb)) { + struct hv_netvsc_packet *nvsc_packet + = (struct hv_netvsc_packet *)skb->cb; + u32 send_index = nvsc_packet->send_buf_index; + + if (send_index != NETVSC_INVALID_INDEX) + netvsc_free_send_slot(net_device, send_index); + q_idx = nvsc_packet->q_idx; + channel = incoming_channel; + + dev_kfree_skb_any(skb); + } + + num_outstanding_sends = + atomic_dec_return(&net_device->num_outstanding_sends); + queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]); + + if (net_device->destroy && num_outstanding_sends == 0) + wake_up(&net_device->wait_drain); + + if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && + !net_device_ctx->start_remove && + (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || + queue_sends < 1)) + netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); +} + static void netvsc_send_completion(struct netvsc_device *net_device, struct vmbus_channel *incoming_channel, struct hv_device *device, struct vmpacket_descriptor *packet) { struct nvsp_message *nvsp_packet; - struct hv_netvsc_packet *nvsc_packet; struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - u32 send_index; - struct sk_buff *skb; nvsp_packet = (struct nvsp_message *)((unsigned long)packet + - (packet->offset8 << 3)); + (packet->offset8 << 3)); - if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) || - (nvsp_packet->hdr.msg_type == - NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || - (nvsp_packet->hdr.msg_type == - NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) || - (nvsp_packet->hdr.msg_type == - NVSP_MSG5_TYPE_SUBCHANNEL)) { + switch (nvsp_packet->hdr.msg_type) { + case NVSP_MSG_TYPE_INIT_COMPLETE: + case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: + case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: + case NVSP_MSG5_TYPE_SUBCHANNEL: /* Copy the response back */ memcpy(&net_device->channel_init_pkt, nvsp_packet, sizeof(struct nvsp_message)); complete(&net_device->channel_init_wait); - } else if (nvsp_packet->hdr.msg_type == - NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { - int num_outstanding_sends; - u16 q_idx = 0; - struct vmbus_channel *channel = device->channel; - int queue_sends; - - /* Get the send context */ - skb = (struct sk_buff *)(unsigned long)packet->trans_id; - - /* Notify the layer above us */ - if (skb) { - nvsc_packet = (struct hv_netvsc_packet *) skb->cb; - send_index = nvsc_packet->send_buf_index; - if (send_index != NETVSC_INVALID_INDEX) - netvsc_free_send_slot(net_device, send_index); - q_idx = nvsc_packet->q_idx; - channel = incoming_channel; - dev_kfree_skb_any(skb); - } - - num_outstanding_sends = - atomic_dec_return(&net_device->num_outstanding_sends); - queue_sends = atomic_dec_return(&net_device-> - queue_sends[q_idx]); + break; - if (net_device->destroy && num_outstanding_sends == 0) - wake_up(&net_device->wait_drain); + case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: + netvsc_send_tx_complete(net_device, incoming_channel, + device, packet); + break; - if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - !net_device_ctx->start_remove && - (hv_ringbuf_avail_percent(&channel->outbound) > - RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) - netif_tx_wake_queue(netdev_get_tx_queue( - ndev, q_idx)); - } else { - netdev_err(ndev, "Unknown send completion packet type- " - "%d received!!\n", nvsp_packet->hdr.msg_type); + default: + netdev_err(ndev, + "Unknown send completion type %d received!!\n", + nvsp_packet->hdr.msg_type); } - } static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) @@ -743,7 +840,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, return msg_size; } -static inline int netvsc_send_pkt( +static int netvsc_send_pkt( struct hv_device *device, struct hv_netvsc_packet *packet, struct netvsc_device *net_device, @@ -859,7 +956,7 @@ int netvsc_send(struct hv_device *device, struct sk_buff *skb) { struct netvsc_device *net_device; - int ret = 0, m_ret = 0; + int ret = 0; struct vmbus_channel *out_channel; u16 q_idx = packet->q_idx; u32 pktlen = packet->total_data_buflen, msd_len = 0; @@ -948,8 +1045,8 @@ int netvsc_send(struct hv_device *device, } if (msd_send) { - m_ret = netvsc_send_pkt(device, msd_send, net_device, - NULL, msd_skb); + int m_ret = netvsc_send_pkt(device, msd_send, net_device, + NULL, msd_skb); if (m_ret != 0) { netvsc_free_send_slot(net_device, @@ -968,49 +1065,121 @@ send_now: return ret; } -static void netvsc_send_recv_completion(struct hv_device *device, - struct vmbus_channel *channel, - struct netvsc_device *net_device, - u64 transaction_id, u32 status) +static int netvsc_send_recv_completion(struct vmbus_channel *channel, + u64 transaction_id, u32 status) { struct nvsp_message recvcompMessage; - int retries = 0; int ret; - struct net_device *ndev = hv_get_drvdata(device); recvcompMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; -retry_send_cmplt: /* Send the completion */ ret = vmbus_sendpacket(channel, &recvcompMessage, - sizeof(struct nvsp_message), transaction_id, - VM_PKT_COMP, 0); - if (ret == 0) { - /* success */ - /* no-op */ - } else if (ret == -EAGAIN) { - /* no more room...wait a bit and attempt to retry 3 times */ - retries++; - netdev_err(ndev, "unable to send receive completion pkt" - " (tid %llx)...retrying %d\n", transaction_id, retries); - - if (retries < 4) { - udelay(100); - goto retry_send_cmplt; - } else { - netdev_err(ndev, "unable to send receive " - "completion pkt (tid %llx)...give up retrying\n", - transaction_id); - } - } else { - netdev_err(ndev, "unable to send receive " - "completion pkt - %llx\n", transaction_id); + sizeof(struct nvsp_message_header) + sizeof(u32), + transaction_id, VM_PKT_COMP, 0); + + return ret; +} + +static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, + u32 *filled, u32 *avail) +{ + u32 first = nvdev->mrc[q_idx].first; + u32 next = nvdev->mrc[q_idx].next; + + *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next : + next - first; + + *avail = NETVSC_RECVSLOT_MAX - *filled - 1; +} + +/* Read the first filled slot, no change to index */ +static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device + *nvdev, u16 q_idx) +{ + u32 filled, avail; + + if (!nvdev->mrc[q_idx].buf) + return NULL; + + count_recv_comp_slot(nvdev, q_idx, &filled, &avail); + if (!filled) + return NULL; + + return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first * + sizeof(struct recv_comp_data); +} + +/* Put the first filled slot back to available pool */ +static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) +{ + int num_recv; + + nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) % + NETVSC_RECVSLOT_MAX; + + num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs); + + if (nvdev->destroy && num_recv == 0) + wake_up(&nvdev->wait_drain); +} + +/* Check and send pending recv completions */ +static void netvsc_chk_recv_comp(struct netvsc_device *nvdev, + struct vmbus_channel *channel, u16 q_idx) +{ + struct recv_comp_data *rcd; + int ret; + + while (true) { + rcd = read_recv_comp_slot(nvdev, q_idx); + if (!rcd) + break; + + ret = netvsc_send_recv_completion(channel, rcd->tid, + rcd->status); + if (ret) + break; + + put_recv_comp_slot(nvdev, q_idx); } } +#define NETVSC_RCD_WATERMARK 80 + +/* Get next available slot */ +static inline struct recv_comp_data *get_recv_comp_slot( + struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx) +{ + u32 filled, avail, next; + struct recv_comp_data *rcd; + + if (!nvdev->recv_section) + return NULL; + + if (!nvdev->mrc[q_idx].buf) + return NULL; + + if (atomic_read(&nvdev->num_outstanding_recvs) > + nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100) + netvsc_chk_recv_comp(nvdev, channel, q_idx); + + count_recv_comp_slot(nvdev, q_idx, &filled, &avail); + if (!avail) + return NULL; + + next = nvdev->mrc[q_idx].next; + rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data); + nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX; + + atomic_inc(&nvdev->num_outstanding_recvs); + + return rcd; +} + static void netvsc_receive(struct netvsc_device *net_device, struct vmbus_channel *channel, struct hv_device *device, @@ -1025,6 +1194,9 @@ static void netvsc_receive(struct netvsc_device *net_device, int count = 0; struct net_device *ndev = hv_get_drvdata(device); void *data; + int ret; + struct recv_comp_data *rcd; + u16 q_idx = channel->offermsg.offer.sub_channel_index; /* * All inbound packets other than send completion should be xfer page @@ -1069,13 +1241,29 @@ static void netvsc_receive(struct netvsc_device *net_device, /* Pass it to the upper layer */ status = rndis_filter_receive(device, netvsc_packet, &data, channel); + } + if (!net_device->mrc[q_idx].buf) { + ret = netvsc_send_recv_completion(channel, + vmxferpage_packet->d.trans_id, + status); + if (ret) + netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n", + q_idx, vmxferpage_packet->d.trans_id, ret); + return; } - netvsc_send_recv_completion(device, channel, net_device, - vmxferpage_packet->d.trans_id, status); -} + rcd = get_recv_comp_slot(net_device, channel, q_idx); + if (!rcd) { + netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", + q_idx, vmxferpage_packet->d.trans_id); + return; + } + + rcd->tid = vmxferpage_packet->d.trans_id; + rcd->status = status; +} static void netvsc_send_table(struct hv_device *hdev, struct nvsp_message *nvmsg) @@ -1157,11 +1345,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device, } } - void netvsc_channel_cb(void *context) { int ret; struct vmbus_channel *channel = (struct vmbus_channel *)context; + u16 q_idx = channel->offermsg.offer.sub_channel_index; struct hv_device *device; struct netvsc_device *net_device; u32 bytes_recvd; @@ -1213,8 +1401,6 @@ void netvsc_channel_cb(void *context) ndev, request_id, desc); - - } else { /* * We are done for this pass. @@ -1241,7 +1427,8 @@ void netvsc_channel_cb(void *context) if (bufferlen > NETVSC_PACKET_SIZE) kfree(buffer); - return; + + netvsc_chk_recv_comp(net_device, channel, q_idx); } /* @@ -1263,9 +1450,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) net_device->ring_size = ring_size; - /* Initialize the NetVSC channel extension */ - init_completion(&net_device->channel_init_wait); - set_per_channel_state(device->channel, net_device->cb_buffer); /* Open the channel */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3ba29fc80d05..2360e704e271 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -40,7 +40,6 @@ #include "hyperv_net.h" - #define RING_SIZE_MIN 64 #define LINKCHANGE_INT (2 * HZ) #define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \ @@ -358,18 +357,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct rndis_message *rndis_msg; struct rndis_packet *rndis_pkt; u32 rndis_msg_size; - bool isvlan; - bool linear = false; struct rndis_per_packet_info *ppi; struct ndis_tcp_ip_checksum_info *csum_info; - struct ndis_tcp_lso_info *lso_info; int hdr_offset; u32 net_trans_info; u32 hash; u32 skb_length; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer *pb = page_buf; - struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number @@ -377,22 +372,20 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) * more pages we try linearizing it. */ -check_size: skb_length = skb->len; num_data_pgs = netvsc_get_slots(skb) + 2; - if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { - net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", - num_data_pgs, skb->len); - ret = -EFAULT; - goto drop; - } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { - if (skb_linearize(skb)) { - net_alert_ratelimited("failed to linearize skb\n"); - ret = -ENOMEM; + + if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { + ++net_device_ctx->eth_stats.tx_scattered; + + if (skb_linearize(skb)) + goto no_memory; + + num_data_pgs = netvsc_get_slots(skb) + 2; + if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { + ++net_device_ctx->eth_stats.tx_too_big; goto drop; } - linear = true; - goto check_size; } /* @@ -401,17 +394,14 @@ check_size: * structure. */ ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); - if (ret) { - netdev_err(net, "unable to alloc hv_netvsc_packet\n"); - ret = -ENOMEM; - goto drop; - } + if (ret) + goto no_memory; + /* Use the skb control buffer for building up the packet */ BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > FIELD_SIZEOF(struct sk_buff, cb)); packet = (struct hv_netvsc_packet *)skb->cb; - packet->q_idx = skb_get_queue_mapping(skb); packet->total_data_buflen = skb->len; @@ -420,8 +410,6 @@ check_size: memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE); - isvlan = skb->vlan_tci & VLAN_TAG_PRESENT; - /* Add the rndis header */ rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; rndis_msg->msg_len = packet->total_data_buflen; @@ -440,7 +428,7 @@ check_size: *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; } - if (isvlan) { + if (skb_vlan_tag_present(skb)) { struct ndis_pkt_8021q_info *vlan; rndis_msg_size += NDIS_VLAN_PPI_SIZE; @@ -461,8 +449,37 @@ check_size: * Setup the sendside checksum offload only if this is not a * GSO packet. */ - if (skb_is_gso(skb)) - goto do_lso; + if (skb_is_gso(skb)) { + struct ndis_tcp_lso_info *lso_info; + + rndis_msg_size += NDIS_LSO_PPI_SIZE; + ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, + TCP_LARGESEND_PKTINFO); + + lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + + ppi->ppi_offset); + + lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; + if (net_trans_info & (INFO_IPV4 << 16)) { + lso_info->lso_v2_transmit.ip_version = + NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; + ip_hdr(skb)->tot_len = 0; + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } else { + lso_info->lso_v2_transmit.ip_version = + NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } + lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; + lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; + goto do_send; + } if ((skb->ip_summed == CHECKSUM_NONE) || (skb->ip_summed == CHECKSUM_UNNECESSARY)) @@ -495,7 +512,7 @@ check_size: ret = skb_cow_head(skb, 0); if (ret) - goto drop; + goto no_memory; uh = udp_hdr(skb); udp_len = ntohs(uh->len); @@ -509,35 +526,6 @@ check_size: csum_info->transmit.udp_checksum = 0; } - goto do_send; - -do_lso: - rndis_msg_size += NDIS_LSO_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, - TCP_LARGESEND_PKTINFO); - - lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + - ppi->ppi_offset); - - lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; - if (net_trans_info & (INFO_IPV4 << 16)) { - lso_info->lso_v2_transmit.ip_version = - NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; - ip_hdr(skb)->tot_len = 0; - ip_hdr(skb)->check = 0; - tcp_hdr(skb)->check = - ~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - } else { - lso_info->lso_v2_transmit.ip_version = - NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - } - lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; - lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; do_send: /* Start filling in the page buffers with the rndis hdr */ @@ -550,21 +538,33 @@ do_send: skb_tx_timestamp(skb); ret = netvsc_send(net_device_ctx->device_ctx, packet, rndis_msg, &pb, skb); + if (likely(ret == 0)) { + struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); -drop: - if (ret == 0) { u64_stats_update_begin(&tx_stats->syncp); tx_stats->packets++; tx_stats->bytes += skb_length; u64_stats_update_end(&tx_stats->syncp); - } else { - if (ret != -EAGAIN) { - dev_kfree_skb_any(skb); - net->stats.tx_dropped++; - } + return NETDEV_TX_OK; + } + + if (ret == -EAGAIN) { + ++net_device_ctx->eth_stats.tx_busy; + return NETDEV_TX_BUSY; } - return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK; + if (ret == -ENOSPC) + ++net_device_ctx->eth_stats.tx_no_space; + +drop: + dev_kfree_skb_any(skb); + net->stats.tx_dropped++; + + return NETDEV_TX_OK; + +no_memory: + ++net_device_ctx->eth_stats.tx_no_memory; + goto drop; } /* @@ -579,19 +579,32 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, struct netvsc_reconfig *event; unsigned long flags; - /* Handle link change statuses only */ + net = hv_get_drvdata(device_obj); + + if (!net) + return; + + ndev_ctx = netdev_priv(net); + + /* Update the physical link speed when changing to another vSwitch */ + if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { + u32 speed; + + speed = *(u32 *)((void *)indicate + indicate-> + status_buf_offset) / 10000; + ndev_ctx->speed = speed; + return; + } + + /* Handle these link change statuses below */ if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && indicate->status != RNDIS_STATUS_MEDIA_CONNECT && indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) return; - net = hv_get_drvdata(device_obj); - - if (!net || net->reg_state != NETREG_REGISTERED) + if (net->reg_state != NETREG_REGISTERED) return; - ndev_ctx = netdev_priv(net); - event = kzalloc(sizeof(*event), GFP_ATOMIC); if (!event) return; @@ -604,7 +617,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, schedule_delayed_work(&ndev_ctx->dwork, 0); } - static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, struct hv_netvsc_packet *packet, struct ndis_tcp_ip_checksum_info *csum_info, @@ -728,8 +740,12 @@ vf_injection_done: static void netvsc_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { + struct net_device_context *net_device_ctx = netdev_priv(net); + struct hv_device *dev = net_device_ctx->device_ctx; + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info)); } static void netvsc_get_channels(struct net_device *net, @@ -1005,6 +1021,51 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p) return err; } +static const struct { + char name[ETH_GSTRING_LEN]; + u16 offset; +} netvsc_stats[] = { + { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, + { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, + { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, + { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, + { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, +}; + +static int netvsc_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return ARRAY_SIZE(netvsc_stats); + default: + return -EINVAL; + } +} + +static void netvsc_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct net_device_context *ndc = netdev_priv(dev); + const void *nds = &ndc->eth_stats; + int i; + + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); +} + +static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + netvsc_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void netvsc_poll_controller(struct net_device *net) { @@ -1017,6 +1078,9 @@ static void netvsc_poll_controller(struct net_device *net) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ethtool_stats = netvsc_get_ethtool_stats, + .get_sset_count = netvsc_get_sset_count, + .get_strings = netvsc_get_strings, .get_channels = netvsc_get_channels, .set_channels = netvsc_set_channels, .get_ts_info = ethtool_op_get_ts_info, @@ -1154,9 +1218,8 @@ static void netvsc_free_netdev(struct net_device *netdev) static struct net_device *get_netvsc_net_device(char *mac) { struct net_device *dev, *found = NULL; - int rtnl_locked; - rtnl_locked = rtnl_trylock(); + ASSERT_RTNL(); for_each_netdev(&init_net, dev) { if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) { @@ -1166,8 +1229,6 @@ static struct net_device *get_netvsc_net_device(char *mac) break; } } - if (rtnl_locked) - rtnl_unlock(); return found; } @@ -1261,7 +1322,6 @@ static int netvsc_vf_up(struct net_device *vf_netdev) return NOTIFY_OK; } - static int netvsc_vf_down(struct net_device *vf_netdev) { struct net_device *ndev; @@ -1295,7 +1355,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev) return NOTIFY_OK; } - static int netvsc_unregister_vf(struct net_device *vf_netdev) { struct net_device *ndev; @@ -1337,6 +1396,8 @@ static int netvsc_probe(struct hv_device *dev, netif_carrier_off(net); + netvsc_init_settings(net); + net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); @@ -1398,8 +1459,6 @@ static int netvsc_probe(struct hv_device *dev, netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); - netvsc_init_settings(net); - ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); @@ -1423,7 +1482,6 @@ static int netvsc_remove(struct hv_device *dev) return 0; } - ndev_ctx = netdev_priv(net); net_device = ndev_ctx->nvdev; @@ -1470,7 +1528,6 @@ static struct hv_driver netvsc_drv = { .remove = netvsc_remove, }; - /* * On Hyper-V, every VF interface is matched with a corresponding * synthetic interface. The synthetic interface is presented first diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8e830f741d47..9195d5da8485 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -663,13 +663,14 @@ cleanup: return ret; } -u8 netvsc_hash_key[HASH_KEYLEN] = { +static const u8 netvsc_hash_key[] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; +#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key) static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) { @@ -720,7 +721,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) for (i = 0; i < HASH_KEYLEN; i++) keyp[i] = netvsc_hash_key[i]; - ret = rndis_filter_send_request(rdev, request); if (ret != 0) goto cleanup; @@ -738,7 +738,6 @@ cleanup: return ret; } - static int rndis_filter_query_device_link_status(struct rndis_device *dev) { u32 size = sizeof(u32); @@ -752,6 +751,28 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev) return ret; } +static int rndis_filter_query_link_speed(struct rndis_device *dev) +{ + u32 size = sizeof(u32); + u32 link_speed; + struct net_device_context *ndc; + int ret; + + ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED, + &link_speed, &size); + + if (!ret) { + ndc = netdev_priv(dev->ndev); + + /* The link speed reported from host is in 100bps unit, so + * we convert it to Mbps here. + */ + ndc->speed = link_speed / 10000; + } + + return ret; +} + int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) { struct rndis_request *request; @@ -792,7 +813,6 @@ cleanup: return ret; } - static int rndis_filter_init_device(struct rndis_device *dev) { struct rndis_request *request; @@ -875,11 +895,11 @@ cleanup: /* Wait for all send completions */ wait_event(nvdev->wait_drain, - atomic_read(&nvdev->num_outstanding_sends) == 0); + atomic_read(&nvdev->num_outstanding_sends) == 0 && + atomic_read(&nvdev->num_outstanding_recvs) == 0); if (request) put_rndis_request(dev, request); - return; } static int rndis_filter_open_device(struct rndis_device *dev) @@ -931,6 +951,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) * NETVSC_PACKET_SIZE); + nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX * + sizeof(struct recv_comp_data)); + ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, netvsc_channel_cb, new_sc); @@ -946,7 +969,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) } int rndis_filter_device_add(struct hv_device *dev, - void *additional_info) + void *additional_info) { int ret; struct net_device *net = hv_get_drvdata(dev); @@ -1028,7 +1051,6 @@ int rndis_filter_device_add(struct hv_device *dev, offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; - ret = rndis_filter_set_offload_params(net, &offloads); if (ret) goto err_dev_remv; @@ -1044,6 +1066,8 @@ int rndis_filter_device_add(struct hv_device *dev, if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) return 0; + rndis_filter_query_link_speed(rndis_device); + /* vRSS setup */ memset(&rsscap, 0, rsscap_size); ret = rndis_filter_query_device(rndis_device, @@ -1152,7 +1176,6 @@ void rndis_filter_device_remove(struct hv_device *dev) netvsc_device_remove(dev); } - int rndis_filter_open(struct netvsc_device *nvdev) { if (!nvdev) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 47a64342cc16..1c3e07c3d0b8 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -15,152 +15,218 @@ if PHYLIB config SWPHY bool -comment "MII PHY device drivers" - -config AQUANTIA_PHY - tristate "Drivers for the Aquantia PHYs" - ---help--- - Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 +comment "MDIO bus device drivers" -config AT803X_PHY - tristate "Drivers for Atheros AT803X PHYs" - ---help--- - Currently supports the AT8030 and AT8035 model +config MDIO_BCM_IPROC + tristate "Broadcom iProc MDIO bus controller" + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on HAS_IOMEM && OF_MDIO + help + This module provides a driver for the MDIO busses found in the + Broadcom iProc SoC's. -config AMD_PHY - tristate "Drivers for the AMD PHYs" - ---help--- - Currently supports the am79c874 +config MDIO_BCM_UNIMAC + tristate "Broadcom UniMAC MDIO bus controller" + depends on HAS_IOMEM + help + This module provides a driver for the Broadcom UniMAC MDIO busses. + This hardware can be found in the Broadcom GENET Ethernet MAC + controllers as well as some Broadcom Ethernet switches such as the + Starfighter 2 switches. -config MARVELL_PHY - tristate "Drivers for Marvell PHYs" - ---help--- - Currently has a driver for the 88E1011S - -config DAVICOM_PHY - tristate "Drivers for Davicom PHYs" - ---help--- - Currently supports dm9161e and dm9131 +config MDIO_BITBANG + tristate "Bitbanged MDIO buses" + help + This module implements the MDIO bus protocol in software, + for use by low level drivers that export the ability to + drive the relevant pins. -config QSEMI_PHY - tristate "Drivers for Quality Semiconductor PHYs" - ---help--- - Currently supports the qs6612 + If in doubt, say N. -config LXT_PHY - tristate "Drivers for the Intel LXT PHYs" - ---help--- - Currently supports the lxt970, lxt971 +config MDIO_BUS_MUX + tristate + depends on OF_MDIO + help + This module provides a driver framework for MDIO bus + multiplexers which connect one of several child MDIO busses + to a parent bus. Switching between child busses is done by + device specific drivers. -config CICADA_PHY - tristate "Drivers for the Cicada PHYs" - ---help--- - Currently supports the cis8204 +config MDIO_BUS_MUX_BCM_IPROC + tristate "Broadcom iProc based MDIO bus multiplexers" + depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) + select MDIO_BUS_MUX + default ARCH_BCM_IPROC + help + This module provides a driver for MDIO bus multiplexers found in + iProc based Broadcom SoCs. This multiplexer connects one of several + child MDIO bus to a parent bus. Buses could be internal as well as + external and selection logic lies inside the same multiplexer. -config VITESSE_PHY - tristate "Drivers for the Vitesse PHYs" - ---help--- - Currently supports the vsc8244 +config MDIO_BUS_MUX_GPIO + tristate "GPIO controlled MDIO bus multiplexers" + depends on OF_GPIO && OF_MDIO + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via GPIO lines. The multiplexer connects one of + several child MDIO busses to a parent bus. Child bus + selection is under the control of GPIO lines. -config TERANETICS_PHY - tristate "Drivers for the Teranetics PHYs" - ---help--- - Currently supports the Teranetics TN2020 +config MDIO_BUS_MUX_MMIOREG + tristate "MMIO device-controlled MDIO bus multiplexers" + depends on OF_MDIO && HAS_IOMEM + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via a simple memory-mapped device, like an FPGA. + The multiplexer connects one of several child MDIO busses to a + parent bus. Child bus selection is under the control of one of + the FPGA's registers. -config SMSC_PHY - tristate "Drivers for SMSC PHYs" - ---help--- - Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs + Currently, only 8-bit registers are supported. -config BCM_NET_PHYLIB +config MDIO_CAVIUM tristate -config BROADCOM_PHY - tristate "Drivers for Broadcom PHYs" - select BCM_NET_PHYLIB +config MDIO_GPIO + tristate "GPIO lib-based bitbanged MDIO buses" + depends on MDIO_BITBANG && GPIOLIB ---help--- - Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464, - BCM5481 and BCM5482 PHYs. + Supports GPIO lib-based MDIO busses. -config BCM_CYGNUS_PHY - tristate "Drivers for Broadcom Cygnus SoC internal PHY" - depends on ARCH_BCM_CYGNUS || COMPILE_TEST - depends on MDIO_BCM_IPROC - select BCM_NET_PHYLIB + To compile this driver as a module, choose M here: the module + will be called mdio-gpio. + +config MDIO_HISI_FEMAC + tristate "Hisilicon FEMAC MDIO bus controller" + depends on HAS_IOMEM && OF_MDIO + help + This module provides a driver for the MDIO busses found in the + Hisilicon SoC that have an Fast Ethernet MAC. + +config MDIO_MOXART + tristate "MOXA ART MDIO interface support" + depends on ARCH_MOXART + help + This driver supports the MDIO interface found in the network + interface units of the MOXA ART SoC + +config MDIO_OCTEON + tristate "Octeon and some ThunderX SOCs MDIO buses" + depends on 64BIT + depends on HAS_IOMEM + select MDIO_CAVIUM + help + This module provides a driver for the Octeon and ThunderX MDIO + buses. It is required by the Octeon and ThunderX ethernet device + drivers on some systems. + +config MDIO_SUN4I + tristate "Allwinner sun4i MDIO interface support" + depends on ARCH_SUNXI + help + This driver supports the MDIO interface found in the network + interface units of the Allwinner SoC that have an EMAC (A10, + A12, A10s, etc.) + +config MDIO_THUNDER + tristate "ThunderX SOCs MDIO buses" + depends on 64BIT + depends on PCI + select MDIO_CAVIUM + help + This driver supports the MDIO interfaces found on Cavium + ThunderX SoCs when the MDIO bus device appears as a PCI + device. + +config MDIO_XGENE + tristate "APM X-Gene SoC MDIO bus controller" + help + This module provides a driver for the MDIO busses found in the + APM X-Gene SoC's. + +comment "MII PHY device drivers" + +config AMD_PHY + tristate "AMD PHYs" ---help--- - This PHY driver is for the 1G internal PHYs of the Broadcom - Cygnus Family SoC. + Currently supports the am79c874 - Currently supports internal PHY's used in the BCM11300, - BCM11320, BCM11350, BCM11360, BCM58300, BCM58302, - BCM58303 & BCM58305 Broadcom Cygnus SoCs. +config AQUANTIA_PHY + tristate "Aquantia PHYs" + ---help--- + Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 + +config AT803X_PHY + tristate "AT803X PHYs" + ---help--- + Currently supports the AT8030 and AT8035 model config BCM63XX_PHY - tristate "Drivers for Broadcom 63xx SOCs internal PHY" + tristate "Broadcom 63xx SOCs internal PHY" depends on BCM63XX select BCM_NET_PHYLIB ---help--- Currently supports the 6348 and 6358 PHYs. config BCM7XXX_PHY - tristate "Drivers for Broadcom 7xxx SOCs internal PHYs" + tristate "Broadcom 7xxx SOCs internal PHYs" select BCM_NET_PHYLIB ---help--- Currently supports the BCM7366, BCM7439, BCM7445, and 40nm and 65nm generation of BCM7xxx Set Top Box SoCs. config BCM87XX_PHY - tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs" + tristate "Broadcom BCM8706 and BCM8727 PHYs" help Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs. -config ICPLUS_PHY - tristate "Drivers for ICPlus PHYs" +config BCM_CYGNUS_PHY + tristate "Broadcom Cygnus SoC internal PHY" + depends on ARCH_BCM_CYGNUS || COMPILE_TEST + depends on MDIO_BCM_IPROC + select BCM_NET_PHYLIB ---help--- - Currently supports the IP175C and IP1001 PHYs. + This PHY driver is for the 1G internal PHYs of the Broadcom + Cygnus Family SoC. -config REALTEK_PHY - tristate "Drivers for Realtek PHYs" - ---help--- - Supports the Realtek 821x PHY. + Currently supports internal PHY's used in the BCM11300, + BCM11320, BCM11350, BCM11360, BCM58300, BCM58302, + BCM58303 & BCM58305 Broadcom Cygnus SoCs. -config NATIONAL_PHY - tristate "Drivers for National Semiconductor PHYs" - ---help--- - Currently supports the DP83865 PHY. +config BCM_NET_PHYLIB + tristate -config STE10XP - tristate "Driver for STMicroelectronics STe10Xp PHYs" +config BROADCOM_PHY + tristate "Broadcom PHYs" + select BCM_NET_PHYLIB ---help--- - This is the driver for the STe100p and STe101p PHYs. + Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464, + BCM5481 and BCM5482 PHYs. -config LSI_ET1011C_PHY - tristate "Driver for LSI ET1011C PHY" +config CICADA_PHY + tristate "Cicada PHYs" ---help--- - Supports the LSI ET1011C PHY. + Currently supports the cis8204 -config MICREL_PHY - tristate "Driver for Micrel PHYs" +config DAVICOM_PHY + tristate "Davicom PHYs" ---help--- - Supports the KSZ9021, VSC8201, KS8001 PHYs. + Currently supports dm9161e and dm9131 config DP83848_PHY - tristate "Driver for Texas Instruments DP83848 PHY" + tristate "Texas Instruments DP83848 PHY" ---help--- Supports the DP83848 PHY. config DP83867_PHY - tristate "Drivers for Texas Instruments DP83867 Gigabit PHY" + tristate "Texas Instruments DP83867 Gigabit PHY" ---help--- Currently supports the DP83867 PHY. -config MICROCHIP_PHY - tristate "Drivers for Microchip PHYs" - help - Supports the LAN88XX PHYs. - config FIXED_PHY - tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs" depends on PHYLIB select SWPHY ---help--- @@ -169,143 +235,90 @@ config FIXED_PHY Currently tested with mpc866ads and mpc8349e-mitx. -config MDIO_BITBANG - tristate "Support for bitbanged MDIO buses" - help - This module implements the MDIO bus protocol in software, - for use by low level drivers that export the ability to - drive the relevant pins. - - If in doubt, say N. - -config MDIO_GPIO - tristate "Support for GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GPIOLIB +config ICPLUS_PHY + tristate "ICPlus PHYs" ---help--- - Supports GPIO lib-based MDIO busses. - - To compile this driver as a module, choose M here: the module - will be called mdio-gpio. - -config MDIO_CAVIUM - tristate - -config MDIO_OCTEON - tristate "Support for MDIO buses on Octeon and some ThunderX SOCs" - depends on 64BIT - depends on HAS_IOMEM - select MDIO_CAVIUM - help - This module provides a driver for the Octeon and ThunderX MDIO - buses. It is required by the Octeon and ThunderX ethernet device - drivers on some systems. + Currently supports the IP175C and IP1001 PHYs. -config MDIO_THUNDER - tristate "Support for MDIO buses on ThunderX SOCs" - depends on 64BIT - depends on PCI - select MDIO_CAVIUM - help - This driver supports the MDIO interfaces found on Cavium - ThunderX SoCs when the MDIO bus device appears as a PCI - device. +config INTEL_XWAY_PHY + tristate "Intel XWAY PHYs" + ---help--- + Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs. + These PHYs are marked as standalone chips under the names + PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel + SoCs xRX200, xRX300, xRX330, xRX350 and xRX550. +config LSI_ET1011C_PHY + tristate "LSI ET1011C PHY" + ---help--- + Supports the LSI ET1011C PHY. -config MDIO_SUN4I - tristate "Allwinner sun4i MDIO interface support" - depends on ARCH_SUNXI - help - This driver supports the MDIO interface found in the network - interface units of the Allwinner SoC that have an EMAC (A10, - A12, A10s, etc.) +config LXT_PHY + tristate "Intel LXT PHYs" + ---help--- + Currently supports the lxt970, lxt971 -config MDIO_MOXART - tristate "MOXA ART MDIO interface support" - depends on ARCH_MOXART - help - This driver supports the MDIO interface found in the network - interface units of the MOXA ART SoC +config MARVELL_PHY + tristate "Marvell PHYs" + ---help--- + Currently has a driver for the 88E1011S -config MDIO_BUS_MUX - tristate - depends on OF_MDIO - help - This module provides a driver framework for MDIO bus - multiplexers which connect one of several child MDIO busses - to a parent bus. Switching between child busses is done by - device specific drivers. +config MICREL_PHY + tristate "Micrel PHYs" + ---help--- + Supports the KSZ9021, VSC8201, KS8001 PHYs. -config MDIO_BUS_MUX_GPIO - tristate "Support for GPIO controlled MDIO bus multiplexers" - depends on OF_GPIO && OF_MDIO - select MDIO_BUS_MUX +config MICROCHIP_PHY + tristate "Microchip PHYs" help - This module provides a driver for MDIO bus multiplexers that - are controlled via GPIO lines. The multiplexer connects one of - several child MDIO busses to a parent bus. Child bus - selection is under the control of GPIO lines. + Supports the LAN88XX PHYs. -config MDIO_BUS_MUX_MMIOREG - tristate "Support for MMIO device-controlled MDIO bus multiplexers" - depends on OF_MDIO && HAS_IOMEM - select MDIO_BUS_MUX - help - This module provides a driver for MDIO bus multiplexers that - are controlled via a simple memory-mapped device, like an FPGA. - The multiplexer connects one of several child MDIO busses to a - parent bus. Child bus selection is under the control of one of - the FPGA's registers. +config MICROSEMI_PHY + tristate "Microsemi PHYs" + ---help--- + Currently supports the VSC8531 and VSC8541 PHYs - Currently, only 8-bit registers are supported. +config NATIONAL_PHY + tristate "National Semiconductor PHYs" + ---help--- + Currently supports the DP83865 PHY. -config MDIO_BUS_MUX_BCM_IPROC - tristate "Support for iProc based MDIO bus multiplexers" - depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) - select MDIO_BUS_MUX - default ARCH_BCM_IPROC - help - This module provides a driver for MDIO bus multiplexers found in - iProc based Broadcom SoCs. This multiplexer connects one of several - child MDIO bus to a parent bus. Buses could be internal as well as - external and selection logic lies inside the same multiplexer. +config QSEMI_PHY + tristate "Quality Semiconductor PHYs" + ---help--- + Currently supports the qs6612 -config MDIO_BCM_UNIMAC - tristate "Broadcom UniMAC MDIO bus controller" - depends on HAS_IOMEM - help - This module provides a driver for the Broadcom UniMAC MDIO busses. - This hardware can be found in the Broadcom GENET Ethernet MAC - controllers as well as some Broadcom Ethernet switches such as the - Starfighter 2 switches. +config REALTEK_PHY + tristate "Realtek PHYs" + ---help--- + Supports the Realtek 821x PHY. -config MDIO_BCM_IPROC - tristate "Broadcom iProc MDIO bus controller" - depends on ARCH_BCM_IPROC || COMPILE_TEST - depends on HAS_IOMEM && OF_MDIO - help - This module provides a driver for the MDIO busses found in the - Broadcom iProc SoC's. +config SMSC_PHY + tristate "SMSC PHYs" + ---help--- + Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs -config INTEL_XWAY_PHY - tristate "Driver for Intel XWAY PHYs" +config STE10XP + tristate "STMicroelectronics STe10Xp PHYs" ---help--- - Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs. - These PHYs are marked as standalone chips under the names - PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel - SoCs xRX200, xRX300, xRX330, xRX350 and xRX550. + This is the driver for the STe100p and STe101p PHYs. -config MDIO_HISI_FEMAC - tristate "Hisilicon FEMAC MDIO bus controller" - depends on HAS_IOMEM && OF_MDIO - help - This module provides a driver for the MDIO busses found in the - Hisilicon SoC that have an Fast Ethernet MAC. +config TERANETICS_PHY + tristate "Teranetics PHYs" + ---help--- + Currently supports the Teranetics TN2020 -config MDIO_XGENE - tristate "APM X-Gene SoC MDIO bus controller" - help - This module provides a driver for the MDIO busses found in the - APM X-Gene SoC's. +config VITESSE_PHY + tristate "Vitesse PHYs" + ---help--- + Currently supports the vsc8244 + +config XILINX_GMII2RGMII + tristate "Xilinx GMII2RGMII converter driver" + ---help--- + This driver support xilinx GMII to RGMII IP core it provides + the Reduced Gigabit Media Independent Interface(RGMII) between + Ethernet physical media devices and the Gigabit Ethernet controller. endif # PHYLIB diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 534dfa74d5a2..e58667d111e7 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,51 +1,55 @@ -# Makefile for Linux PHY drivers +# Makefile for Linux PHY drivers and MDIO bus drivers libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o libphy-$(CONFIG_SWPHY) += swphy.o obj-$(CONFIG_PHYLIB) += libphy.o + +obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o +obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o +obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o +obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o +obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o +obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o +obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o +obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o +obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o +obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o +obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o +obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o +obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o +obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o +obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o + +obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o -obj-$(CONFIG_MARVELL_PHY) += marvell.o -obj-$(CONFIG_DAVICOM_PHY) += davicom.o -obj-$(CONFIG_CICADA_PHY) += cicada.o -obj-$(CONFIG_LXT_PHY) += lxt.o -obj-$(CONFIG_QSEMI_PHY) += qsemi.o -obj-$(CONFIG_SMSC_PHY) += smsc.o -obj-$(CONFIG_TERANETICS_PHY) += teranetics.o -obj-$(CONFIG_VITESSE_PHY) += vitesse.o -obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o -obj-$(CONFIG_BROADCOM_PHY) += broadcom.o +obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o -obj-$(CONFIG_ICPLUS_PHY) += icplus.o -obj-$(CONFIG_REALTEK_PHY) += realtek.o -obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o -obj-$(CONFIG_FIXED_PHY) += fixed_phy.o -obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o -obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o -obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o +obj-$(CONFIG_BROADCOM_PHY) += broadcom.o +obj-$(CONFIG_CICADA_PHY) += cicada.o +obj-$(CONFIG_DAVICOM_PHY) += davicom.o obj-$(CONFIG_DP83640_PHY) += dp83640.o obj-$(CONFIG_DP83848_PHY) += dp83848.o obj-$(CONFIG_DP83867_PHY) += dp83867.o -obj-$(CONFIG_STE10XP) += ste10Xp.o -obj-$(CONFIG_MICREL_PHY) += micrel.o -obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o -obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o -obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o +obj-$(CONFIG_FIXED_PHY) += fixed_phy.o +obj-$(CONFIG_ICPLUS_PHY) += icplus.o +obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o +obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o +obj-$(CONFIG_LXT_PHY) += lxt.o +obj-$(CONFIG_MARVELL_PHY) += marvell.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o -obj-$(CONFIG_AT803X_PHY) += at803x.o -obj-$(CONFIG_AMD_PHY) += amd.o -obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o -obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o -obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o -obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o -obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o -obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o -obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o +obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o -obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o -obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o -obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o -obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o +obj-$(CONFIG_MICROSEMI_PHY) += mscc.o +obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_QSEMI_PHY) += qsemi.o +obj-$(CONFIG_REALTEK_PHY) += realtek.o +obj-$(CONFIG_SMSC_PHY) += smsc.o +obj-$(CONFIG_STE10XP) += ste10Xp.o +obj-$(CONFIG_TERANETICS_PHY) += teranetics.o +obj-$(CONFIG_VITESSE_PHY) += vitesse.o +obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c new file mode 100644 index 000000000000..ad33390b382a --- /dev/null +++ b/drivers/net/phy/mscc.c @@ -0,0 +1,161 @@ +/* + * Driver for Microsemi VSC85xx PHYs + * + * Author: Nagaraju Lakkaraju + * License: Dual MIT/GPL + * Copyright (c) 2016 Microsemi Corporation + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mdio.h> +#include <linux/mii.h> +#include <linux/phy.h> + +enum rgmii_rx_clock_delay { + RGMII_RX_CLK_DELAY_0_2_NS = 0, + RGMII_RX_CLK_DELAY_0_8_NS = 1, + RGMII_RX_CLK_DELAY_1_1_NS = 2, + RGMII_RX_CLK_DELAY_1_7_NS = 3, + RGMII_RX_CLK_DELAY_2_0_NS = 4, + RGMII_RX_CLK_DELAY_2_3_NS = 5, + RGMII_RX_CLK_DELAY_2_6_NS = 6, + RGMII_RX_CLK_DELAY_3_4_NS = 7 +}; + +#define MII_VSC85XX_INT_MASK 25 +#define MII_VSC85XX_INT_MASK_MASK 0xa000 +#define MII_VSC85XX_INT_STATUS 26 + +#define MSCC_EXT_PAGE_ACCESS 31 +#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */ +#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */ + +/* Extended Page 2 Registers */ +#define MSCC_PHY_RGMII_CNTL 20 +#define RGMII_RX_CLK_DELAY_MASK 0x0070 +#define RGMII_RX_CLK_DELAY_POS 4 + +/* Microsemi PHY ID's */ +#define PHY_ID_VSC8531 0x00070570 +#define PHY_ID_VSC8541 0x00070770 + +static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page) +{ + int rc; + + rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page); + return rc; +} + +static int vsc85xx_default_config(struct phy_device *phydev) +{ + int rc; + u16 reg_val; + + mutex_lock(&phydev->lock); + rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); + if (rc != 0) + goto out_unlock; + + reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL); + reg_val &= ~(RGMII_RX_CLK_DELAY_MASK); + reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS); + phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val); + rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); + +out_unlock: + mutex_unlock(&phydev->lock); + + return rc; +} + +static int vsc85xx_config_init(struct phy_device *phydev) +{ + int rc; + + rc = vsc85xx_default_config(phydev); + if (rc) + return rc; + rc = genphy_config_init(phydev); + + return rc; +} + +static int vsc85xx_ack_interrupt(struct phy_device *phydev) +{ + int rc = 0; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + rc = phy_read(phydev, MII_VSC85XX_INT_STATUS); + + return (rc < 0) ? rc : 0; +} + +static int vsc85xx_config_intr(struct phy_device *phydev) +{ + int rc; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + rc = phy_write(phydev, MII_VSC85XX_INT_MASK, + MII_VSC85XX_INT_MASK_MASK); + } else { + rc = phy_write(phydev, MII_VSC85XX_INT_MASK, 0); + if (rc < 0) + return rc; + rc = phy_read(phydev, MII_VSC85XX_INT_STATUS); + } + + return rc; +} + +/* Microsemi VSC85xx PHYs */ +static struct phy_driver vsc85xx_driver[] = { +{ + .phy_id = PHY_ID_VSC8531, + .name = "Microsemi VSC8531", + .phy_id_mask = 0xfffffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc85xx_config_init, + .config_aneg = &genphy_config_aneg, + .aneg_done = &genphy_aneg_done, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, +}, +{ + .phy_id = PHY_ID_VSC8541, + .name = "Microsemi VSC8541 SyncE", + .phy_id_mask = 0xfffffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc85xx_config_init, + .config_aneg = &genphy_config_aneg, + .aneg_done = &genphy_aneg_done, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, +} + +}; + +module_phy_driver(vsc85xx_driver); + +static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { + { PHY_ID_VSC8531, 0xfffffff0, }, + { PHY_ID_VSC8541, 0xfffffff0, }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl); + +MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver"); +MODULE_AUTHOR("Nagaraju Lakkaraju"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c new file mode 100644 index 000000000000..d15dd3938ba8 --- /dev/null +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -0,0 +1,112 @@ +/* Xilinx GMII2RGMII Converter driver + * + * Copyright (C) 2016 Xilinx, Inc. + * Copyright (C) 2016 Andrew Lunn <andrew@lunn.ch> + * + * Author: Andrew Lunn <andrew@lunn.ch> + * Author: Kedareswara rao Appana <appanad@xilinx.com> + * + * Description: + * This driver is developed for Xilinx GMII2RGMII Converter + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/mii.h> +#include <linux/mdio.h> +#include <linux/phy.h> +#include <linux/of_mdio.h> + +#define XILINX_GMII2RGMII_REG 0x10 +#define XILINX_GMII2RGMII_SPEED_MASK (BMCR_SPEED1000 | BMCR_SPEED100) + +struct gmii2rgmii { + struct phy_device *phy_dev; + struct phy_driver *phy_drv; + struct phy_driver conv_phy_drv; + int addr; +}; + +static int xgmiitorgmii_read_status(struct phy_device *phydev) +{ + struct gmii2rgmii *priv = phydev->priv; + u16 val = 0; + + priv->phy_drv->read_status(phydev); + + val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); + val &= XILINX_GMII2RGMII_SPEED_MASK; + + if (phydev->speed == SPEED_1000) + val |= BMCR_SPEED1000; + else if (phydev->speed == SPEED_100) + val |= BMCR_SPEED100; + else + val |= BMCR_SPEED10; + + mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val); + + return 0; +} + +static int xgmiitorgmii_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct device_node *np = dev->of_node, *phy_node; + struct gmii2rgmii *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!phy_node) { + dev_err(dev, "Couldn't parse phy-handle\n"); + return -ENODEV; + } + + priv->phy_dev = of_phy_find_device(phy_node); + of_node_put(phy_node); + if (!priv->phy_dev) { + dev_info(dev, "Couldn't find phydev\n"); + return -EPROBE_DEFER; + } + + priv->addr = mdiodev->addr; + priv->phy_drv = priv->phy_dev->drv; + memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, + sizeof(struct phy_driver)); + priv->conv_phy_drv.read_status = xgmiitorgmii_read_status; + priv->phy_dev->priv = priv; + priv->phy_dev->drv = &priv->conv_phy_drv; + + return 0; +} + +static const struct of_device_id xgmiitorgmii_of_match[] = { + { .compatible = "xlnx,gmii-to-rgmii-1.0" }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgmiitorgmii_of_match); + +static struct mdio_driver xgmiitorgmii_driver = { + .probe = xgmiitorgmii_probe, + .mdiodrv.driver = { + .name = "xgmiitorgmii", + .of_match_table = xgmiitorgmii_of_match, + }, +}; + +mdio_module_driver(xgmiitorgmii_driver); + +MODULE_DESCRIPTION("Xilinx GMII2RGMII converter driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index f226db4616b7..70cfa06ccd40 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1103,6 +1103,15 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, } conf.file = file; + + /* Don't use device name generated by the rtnetlink layer when ifname + * isn't specified. Let ppp_dev_configure() set the device name using + * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows + * userspace to infer the device name using to the PPPIOCGUNIT ioctl. + */ + if (!tb[IFLA_IFNAME]) + conf.ifname_is_set = false; + err = ppp_dev_configure(src_net, dev, &conf); out_unlock: diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index ae0905ed4a32..1951b1085cb8 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -37,6 +37,7 @@ #include <net/icmp.h> #include <net/route.h> #include <net/gre.h> +#include <net/pptp.h> #include <linux/uaccess.h> @@ -53,41 +54,6 @@ static struct proto pptp_sk_proto __read_mostly; static const struct ppp_channel_ops pptp_chan_ops; static const struct proto_ops pptp_ops; -#define PPP_LCP_ECHOREQ 0x09 -#define PPP_LCP_ECHOREP 0x0A -#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) - -#define MISSING_WINDOW 20 -#define WRAPPED(curseq, lastseq)\ - ((((curseq) & 0xffffff00) == 0) &&\ - (((lastseq) & 0xffffff00) == 0xffffff00)) - -#define PPTP_GRE_PROTO 0x880B -#define PPTP_GRE_VER 0x1 - -#define PPTP_GRE_FLAG_C 0x80 -#define PPTP_GRE_FLAG_R 0x40 -#define PPTP_GRE_FLAG_K 0x20 -#define PPTP_GRE_FLAG_S 0x10 -#define PPTP_GRE_FLAG_A 0x80 - -#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C) -#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R) -#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K) -#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S) -#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A) - -#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header)) -struct pptp_gre_header { - u8 flags; - u8 ver; - __be16 protocol; - __be16 payload_len; - __be16 call_id; - __be32 seq; - __be32 ack; -} __packed; - static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr) { struct pppox_sock *sock; @@ -240,16 +206,14 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) skb_push(skb, header_len); hdr = (struct pptp_gre_header *)(skb->data); - hdr->flags = PPTP_GRE_FLAG_K; - hdr->ver = PPTP_GRE_VER; - hdr->protocol = htons(PPTP_GRE_PROTO); - hdr->call_id = htons(opt->dst_addr.call_id); + hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ; + hdr->gre_hd.protocol = GRE_PROTO_PPP; + hdr->call_id = htons(opt->dst_addr.call_id); - hdr->flags |= PPTP_GRE_FLAG_S; - hdr->seq = htonl(++opt->seq_sent); + hdr->seq = htonl(++opt->seq_sent); if (opt->ack_sent != seq_recv) { /* send ack with this message */ - hdr->ver |= PPTP_GRE_FLAG_A; + hdr->gre_hd.flags |= GRE_ACK; hdr->ack = htonl(seq_recv); opt->ack_sent = seq_recv; } @@ -312,7 +276,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) headersize = sizeof(*header); /* test if acknowledgement present */ - if (PPTP_GRE_IS_A(header->ver)) { + if (GRE_IS_ACK(header->gre_hd.flags)) { __u32 ack; if (!pskb_may_pull(skb, headersize)) @@ -320,7 +284,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) header = (struct pptp_gre_header *)(skb->data); /* ack in different place if S = 0 */ - ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq; + ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq; ack = ntohl(ack); @@ -333,7 +297,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) headersize -= sizeof(header->ack); } /* test if payload present */ - if (!PPTP_GRE_IS_S(header->flags)) + if (!GRE_IS_SEQ(header->gre_hd.flags)) goto drop; payload_len = ntohs(header->payload_len); @@ -394,11 +358,11 @@ static int pptp_rcv(struct sk_buff *skb) header = (struct pptp_gre_header *)skb->data; - if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */ - PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */ - PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */ - !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */ - (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */ + if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */ + GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */ + GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */ + !GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */ + (header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */ /* if invalid, discard this packet */ goto drop; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 6f9df375c5d4..8093e39ae263 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -731,14 +731,9 @@ static int update_filter(struct tap_filter *filter, void __user *arg) } alen = ETH_ALEN * uf.count; - addr = kmalloc(alen, GFP_KERNEL); - if (!addr) - return -ENOMEM; - - if (copy_from_user(addr, arg + sizeof(uf), alen)) { - err = -EFAULT; - goto done; - } + addr = memdup_user(arg + sizeof(uf), alen); + if (IS_ERR(addr)) + return PTR_ERR(addr); /* The filter is updated without holding any locks. Which is * perfectly safe. We disable it first and in the worst @@ -758,7 +753,7 @@ static int update_filter(struct tap_filter *filter, void __user *arg) for (; n < uf.count; n++) { if (!is_multicast_ether_addr(addr[n].u)) { err = 0; /* no filter */ - goto done; + goto free_addr; } addr_hash_set(filter->mask, addr[n].u); } @@ -774,8 +769,7 @@ static int update_filter(struct tap_filter *filter, void __user *arg) /* Return the number of exact filters */ err = nexact; - -done: +free_addr: kfree(addr); return err; } diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 4b4458616693..c5544d36c54f 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2300,10 +2300,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, serial->rx_data_length = rx_size; for (i = 0; i < serial->num_rx_urbs; i++) { serial->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL); - if (!serial->rx_urb[i]) { - dev_err(dev, "Could not allocate urb?\n"); + if (!serial->rx_urb[i]) goto exit; - } serial->rx_urb[i]->transfer_buffer = NULL; serial->rx_urb[i]->transfer_buffer_length = 0; serial->rx_data[i] = kzalloc(serial->rx_data_length, @@ -2314,10 +2312,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, /* TX, allocate urb and initialize */ serial->tx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!serial->tx_urb) { - dev_err(dev, "Could not allocate urb?\n"); + if (!serial->tx_urb) goto exit; - } serial->tx_urb->transfer_buffer = NULL; serial->tx_urb->transfer_buffer_length = 0; /* prepare our TX buffer */ @@ -2555,20 +2551,16 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, /* start allocating */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); - if (!hso_net->mux_bulk_rx_urb_pool[i]) { - dev_err(&interface->dev, "Could not allocate rx urb\n"); + if (!hso_net->mux_bulk_rx_urb_pool[i]) goto exit; - } hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) goto exit; } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!hso_net->mux_bulk_tx_urb) { - dev_err(&interface->dev, "Could not allocate tx urb\n"); + if (!hso_net->mux_bulk_tx_urb) goto exit; - } hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) goto exit; @@ -2787,10 +2779,8 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface) } mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!mux->shared_intr_urb) { - dev_err(&interface->dev, "Could not allocate intr urb?\n"); + if (!mux->shared_intr_urb) goto exit; - } mux->shared_intr_buf = kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize), GFP_KERNEL); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 6a9d474b08b2..432b8a3ae354 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3002,10 +3002,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) gso_skb: urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netif_dbg(dev, tx_err, dev->net, "no urb\n"); + if (!urb) goto drop; - } entry = (struct skb_data *)skb->cb; entry->urb = urb; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 3bfb59209326..d5071e364d40 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -2062,11 +2062,8 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(dev->net, "Error allocating URB in" - " %s!\n", __func__); + if (!urb) goto fail; - } if (data) { buf = kmemdup(data, size, GFP_ATOMIC); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f37a6e61d4ad..4bda502254fb 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -313,7 +313,7 @@ static const struct net_device_ops veth_netdev_ops = { }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ - NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ + NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX ) diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 4244b9d4418e..2fd93b4c759a 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1641,7 +1641,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } } -void +static void vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter) { int i; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c0dda6fc0921..3f7e0d2dd21a 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -27,7 +27,6 @@ #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/vxlan.h> -#include <net/protocol.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ip6_tunnel.h> diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 6f044450b702..5fbf83d5aa57 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -162,7 +162,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) ALIGNMENT_OF_UCC_HDLC_PRAM); if (priv->ucc_pram_offset < 0) { - dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n"); + dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); ret = -ENOMEM; goto free_tx_bd; } diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c index fc1355d98bc6..5d429f816125 100644 --- a/drivers/net/wimax/i2400m/usb-notif.c +++ b/drivers/net/wimax/i2400m/usb-notif.c @@ -206,7 +206,6 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu) i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL); if (!i2400mu->notif_urb) { ret = -ENOMEM; - dev_err(dev, "notification: cannot allocate URB\n"); goto error_alloc_urb; } epd = usb_get_epd(i2400mu->usb_iface, diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 8aded24bcdf4..7a60d2e652da 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -706,10 +706,8 @@ static int ar5523_alloc_rx_bufs(struct ar5523 *ar) data->ar = ar; data->urb = usb_alloc_urb(0, GFP_KERNEL); - if (!data->urb) { - ar5523_err(ar, "could not allocate rx data urb\n"); + if (!data->urb) goto err; - } list_add_tail(&data->list, &ar->rx_data_free); atomic_inc(&ar->rx_data_free_cnt); } @@ -824,7 +822,6 @@ static void ar5523_tx_work_locked(struct ar5523 *ar) urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - ar5523_err(ar, "Failed to allocate TX urb\n"); ieee80211_free_txskb(ar->hw, skb); continue; } @@ -949,10 +946,8 @@ static int ar5523_alloc_tx_cmd(struct ar5523 *ar) init_completion(&cmd->done); cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL); - if (!cmd->urb_tx) { - ar5523_err(ar, "could not allocate urb\n"); + if (!cmd->urb_tx) return -ENOMEM; - } cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ, GFP_KERNEL, &cmd->urb_tx->transfer_dma); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 98b15a9a2779..fa26619a7945 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1099,15 +1099,11 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo, devinfo->tx_freecount = ntxq; devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!devinfo->ctl_urb) { - brcmf_err("usb_alloc_urb (ctl) failed\n"); + if (!devinfo->ctl_urb) goto error; - } devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!devinfo->bulk_urb) { - brcmf_err("usb_alloc_urb (bulk) failed\n"); + if (!devinfo->bulk_urb) goto error; - } return &devinfo->bus_pub; diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index 56f109bc8394..bca6935a94db 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -1613,10 +1613,8 @@ static int ezusb_probe(struct usb_interface *interface, } upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!upriv->read_urb) { - err("No free urbs available"); + if (!upriv->read_urb) goto error; - } if (le16_to_cpu(ep->wMaxPacketSize) != 64) pr_warn("bulk in: wMaxPacketSize!= 64\n"); if (ep->bEndpointAddress != (2 | USB_DIR_IN)) diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 799a2efe5793..e0ade40d9497 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -198,22 +198,16 @@ static int if_usb_probe(struct usb_interface *intf, } cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!cardp->rx_urb) { - lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n"); + if (!cardp->rx_urb) goto dealloc; - } cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!cardp->tx_urb) { - lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n"); + if (!cardp->tx_urb) goto dealloc; - } cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!cardp->cmd_urb) { - lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n"); + if (!cardp->cmd_urb) goto dealloc; - } cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE, GFP_KERNEL); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 0857575c5c39..3bd04f52f369 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -657,11 +657,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) card->tx_cmd.ep = card->tx_cmd_ep; card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); - if (!card->tx_cmd.urb) { - mwifiex_dbg(adapter, ERROR, - "tx_cmd.urb allocation failed\n"); + if (!card->tx_cmd.urb) return -ENOMEM; - } for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) { port = &card->port[i]; @@ -677,11 +674,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) port->tx_data_list[j].ep = port->tx_data_ep; port->tx_data_list[j].urb = usb_alloc_urb(0, GFP_KERNEL); - if (!port->tx_data_list[j].urb) { - mwifiex_dbg(adapter, ERROR, - "urb allocation failed\n"); + if (!port->tx_data_list[j].urb) return -ENOMEM; - } } } @@ -697,10 +691,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) card->rx_cmd.ep = card->rx_cmd_ep; card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); - if (!card->rx_cmd.urb) { - mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n"); + if (!card->rx_cmd.urb) return -ENOMEM; - } card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE); if (!card->rx_cmd.skb) @@ -714,11 +706,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) card->rx_data_list[i].ep = card->rx_data_ep; card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL); - if (!card->rx_data_list[i].urb) { - mwifiex_dbg(adapter, ERROR, - "rx_data_list[] urb allocation failed\n"); + if (!card->rx_data_list[i].urb) return -1; - } if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i], MWIFIEX_RX_DATA_BUF_SIZE)) return -1; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 41617b7b0822..32aa5c1d070a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -739,11 +739,8 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) for (i = 0; i < rtlusb->rx_urb_num; i++) { err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Failed to alloc URB!!\n"); + if (!urb) goto err_out; - } err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (err < 0) { @@ -907,15 +904,12 @@ static void _rtl_tx_complete(struct urb *urb) static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, struct sk_buff *skb, u32 ep_num) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *_urb; WARN_ON(NULL == skb); _urb = usb_alloc_urb(0, GFP_ATOMIC); if (!_urb) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Can't allocate URB for bulk out!\n"); kfree_skb(skb); return NULL; } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 84d6cbdd11b2..3a562683603c 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -412,4 +412,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb); +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m); +#endif + #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index fb87cb39a56b..e8c5dddc54ba 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -32,15 +32,6 @@ #include <linux/vmalloc.h> #include <linux/rculist.h> -static void xenvif_del_hash(struct rcu_head *rcu) -{ - struct xenvif_hash_cache_entry *entry; - - entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu); - - kfree(entry); -} - static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, unsigned int len, u32 val) { @@ -76,7 +67,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, if (++vif->hash.cache.count > xenvif_hash_cache_size) { list_del_rcu(&oldest->link); vif->hash.cache.count--; - call_rcu(&oldest->rcu, xenvif_del_hash); + kfree_rcu(oldest, rcu); } } @@ -114,7 +105,7 @@ static void xenvif_flush_hash(struct xenvif *vif) list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { list_del_rcu(&entry->link); vif->hash.cache.count--; - call_rcu(&entry->rcu, xenvif_del_hash); + kfree_rcu(entry, rcu); } spin_unlock_irqrestore(&vif->hash.cache.lock, flags); @@ -369,6 +360,74 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, return XEN_NETIF_CTRL_STATUS_SUCCESS; } +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) +{ + unsigned int i; + + switch (vif->hash.alg) { + case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ: + seq_puts(m, "Hash Algorithm: TOEPLITZ\n"); + break; + + case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE: + seq_puts(m, "Hash Algorithm: NONE\n"); + /* FALLTHRU */ + default: + return; + } + + if (vif->hash.flags) { + seq_puts(m, "\nHash Flags:\n"); + + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) + seq_puts(m, "- IPv4\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP) + seq_puts(m, "- IPv4 + TCP\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) + seq_puts(m, "- IPv6\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP) + seq_puts(m, "- IPv6 + TCP\n"); + } + + seq_puts(m, "\nHash Key:\n"); + + for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) { + unsigned int j, n; + + n = 8; + if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE) + n = XEN_NETBK_MAX_HASH_KEY_SIZE - i; + + seq_printf(m, "[%2u - %2u]: ", i, i + n - 1); + + for (j = 0; j < n; j++, i++) + seq_printf(m, "%02x ", vif->hash.key[i]); + + seq_puts(m, "\n"); + } + + if (vif->hash.size != 0) { + seq_puts(m, "\nHash Mapping:\n"); + + for (i = 0; i < vif->hash.size; ) { + unsigned int j, n; + + n = 8; + if (i + n >= vif->hash.size) + n = vif->hash.size - i; + + seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); + + for (j = 0; j < n; j++, i++) + seq_printf(m, "%4u ", vif->hash.mapping[i]); + + seq_puts(m, "\n"); + } + } +} +#endif /* CONFIG_DEBUG_FS */ + void xenvif_init_hash(struct xenvif *vif) { if (xenvif_hash_cache_size == 0) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 6a31f2610c23..bacf6e0c12b9 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, return count; } -static int xenvif_dump_open(struct inode *inode, struct file *filp) +static int xenvif_io_ring_open(struct inode *inode, struct file *filp) { int ret; void *queue = NULL; @@ -179,13 +179,35 @@ static int xenvif_dump_open(struct inode *inode, struct file *filp) static const struct file_operations xenvif_dbg_io_ring_ops_fops = { .owner = THIS_MODULE, - .open = xenvif_dump_open, + .open = xenvif_io_ring_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = xenvif_write_io_ring, }; +static int xenvif_read_ctrl(struct seq_file *m, void *v) +{ + struct xenvif *vif = m->private; + + xenvif_dump_hash_info(vif, m); + + return 0; +} + +static int xenvif_ctrl_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, xenvif_read_ctrl, inode->i_private); +} + +static const struct file_operations xenvif_dbg_ctrl_ops_fops = { + .owner = THIS_MODULE, + .open = xenvif_ctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static void xenvif_debugfs_addif(struct xenvif *vif) { struct dentry *pfile; @@ -210,6 +232,17 @@ static void xenvif_debugfs_addif(struct xenvif *vif) pr_warn("Creation of io_ring file returned %ld!\n", PTR_ERR(pfile)); } + + if (vif->ctrl_task) { + pfile = debugfs_create_file("ctrl", + S_IRUSR, + vif->xenvif_dbg_root, + vif, + &xenvif_dbg_ctrl_ops_fops); + if (IS_ERR_OR_NULL(pfile)) + pr_warn("Creation of ctrl file returned %ld!\n", + PTR_ERR(pfile)); + } } else netdev_warn(vif->dev, "Creation of vif debugfs dir returned %ld!\n", diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index e13a4ab46977..1fde9c824948 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -34,48 +34,23 @@ static void cvm_oct_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info)); } -static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - - if (priv->phydev) - return phy_ethtool_gset(priv->phydev, cmd); - - return -EINVAL; -} - -static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (priv->phydev) - return phy_ethtool_sset(priv->phydev, cmd); - - return -EINVAL; -} - static int cvm_oct_nway_reset(struct net_device *dev) { - struct octeon_ethernet *priv = netdev_priv(dev); - if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (priv->phydev) - return phy_start_aneg(priv->phydev); + if (dev->phydev) + return phy_start_aneg(dev->phydev); return -EINVAL; } const struct ethtool_ops cvm_oct_ethtool_ops = { .get_drvinfo = cvm_oct_get_drvinfo, - .get_settings = cvm_oct_get_settings, - .set_settings = cvm_oct_set_settings, .nway_reset = cvm_oct_nway_reset, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /** @@ -88,15 +63,13 @@ const struct ethtool_ops cvm_oct_ethtool_ops = { */ int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct octeon_ethernet *priv = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!priv->phydev) + if (!dev->phydev) return -EINVAL; - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } void cvm_oct_note_carrier(struct octeon_ethernet *priv, @@ -119,9 +92,9 @@ void cvm_oct_adjust_link(struct net_device *dev) cvmx_helper_link_info_t link_info; link_info.u64 = 0; - link_info.s.link_up = priv->phydev->link ? 1 : 0; - link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0; - link_info.s.speed = priv->phydev->speed; + link_info.s.link_up = dev->phydev->link ? 1 : 0; + link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0; + link_info.s.speed = dev->phydev->speed; priv->link_info = link_info.u64; /* @@ -130,8 +103,8 @@ void cvm_oct_adjust_link(struct net_device *dev) if (priv->poll) priv->poll(dev); - if (priv->last_link != priv->phydev->link) { - priv->last_link = priv->phydev->link; + if (priv->last_link != dev->phydev->link) { + priv->last_link = dev->phydev->link; cvmx_helper_link_set(priv->port, link_info); cvm_oct_note_carrier(priv, link_info); } @@ -151,9 +124,8 @@ int cvm_oct_common_stop(struct net_device *dev) priv->poll = NULL; - if (priv->phydev) - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (dev->phydev) + phy_disconnect(dev->phydev); if (priv->last_link) { link_info.u64 = 0; @@ -176,6 +148,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); struct device_node *phy_node; + struct phy_device *phydev = NULL; if (!priv->of_node) goto no_phy; @@ -193,14 +166,14 @@ int cvm_oct_phy_setup_device(struct net_device *dev) if (!phy_node) goto no_phy; - priv->phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); + phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, + PHY_INTERFACE_MODE_GMII); - if (!priv->phydev) + if (!phydev) return -ENODEV; priv->last_link = 0; - phy_start_aneg(priv->phydev); + phy_start_aneg(phydev); return 0; no_phy: diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index 91b148cfcbdb..48846dffc8e1 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -145,7 +145,7 @@ int cvm_oct_rgmii_open(struct net_device *dev) if (ret) return ret; - if (priv->phydev) { + if (dev->phydev) { /* * In phydev mode, we need still periodic polling for the * preamble error checking, and we also need to call this diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index e9cd5f242921..45d576361319 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -457,10 +457,8 @@ int cvm_oct_common_init(struct net_device *dev) void cvm_oct_common_uninit(struct net_device *dev) { - struct octeon_ethernet *priv = netdev_priv(dev); - - if (priv->phydev) - phy_disconnect(priv->phydev); + if (dev->phydev) + phy_disconnect(dev->phydev); } int cvm_oct_common_open(struct net_device *dev, @@ -484,10 +482,10 @@ int cvm_oct_common_open(struct net_device *dev, if (octeon_is_simulation()) return 0; - if (priv->phydev) { - int r = phy_read_status(priv->phydev); + if (dev->phydev) { + int r = phy_read_status(dev->phydev); - if (r == 0 && priv->phydev->link == 0) + if (r == 0 && dev->phydev->link == 0) netif_carrier_off(dev); cvm_oct_adjust_link(dev); } else { diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index 6275c15e0035..d533aefe085a 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -40,7 +40,6 @@ struct octeon_ethernet { struct sk_buff_head tx_free_list[16]; /* Device statistics */ struct net_device_stats stats; - struct phy_device *phydev; unsigned int last_speed; unsigned int last_link; /* Last negotiated link state */ diff --git a/fs/proc/generic.c b/fs/proc/generic.c index c633476616e0..bca66d83a765 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -390,6 +390,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, atomic_set(&ent->count, 1); spin_lock_init(&ent->pde_unload_lock); INIT_LIST_HEAD(&ent->pde_openers); + proc_set_user(ent, (*parent)->uid, (*parent)->gid); + out: return ent; } diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index c8bbc68cdb05..7ae6b1da7cab 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -21,6 +21,7 @@ #include <linux/bitops.h> #include <linux/mount.h> #include <linux/nsproxy.h> +#include <linux/uidgid.h> #include <net/net_namespace.h> #include <linux/seq_file.h> @@ -185,6 +186,8 @@ const struct file_operations proc_net_operations = { static __net_init int proc_net_ns_init(struct net *net) { struct proc_dir_entry *netd, *net_statd; + kuid_t uid; + kgid_t gid; int err; err = -ENOMEM; @@ -199,6 +202,16 @@ static __net_init int proc_net_ns_init(struct net *net) netd->parent = &proc_root; memcpy(netd->name, "net", 4); + uid = make_kuid(net->user_ns, 0); + if (!uid_valid(uid)) + uid = netd->uid; + + gid = make_kgid(net->user_ns, 0); + if (!gid_valid(gid)) + gid = netd->gid; + + proc_set_user(netd, uid, gid); + err = -EEXIST; net_statd = proc_net_mkdir(net, "stat", netd); if (!net_statd) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 1b93650dda2f..2ed3d71d4767 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -430,6 +430,7 @@ static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, i static struct inode *proc_sys_make_inode(struct super_block *sb, struct ctl_table_header *head, struct ctl_table *table) { + struct ctl_table_root *root = head->root; struct inode *inode; struct proc_inode *ei; @@ -457,6 +458,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, if (is_empty_dir(head)) make_empty_dir_inode(inode); } + + if (root->set_ownership) + root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); + out: return inode; } diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 3db25df396cb..8eeedb2db924 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -205,6 +205,9 @@ struct bcma_host_ops { #define BCMA_PKG_ID_BCM4709 0 #define BCMA_CHIP_ID_BCM47094 53030 #define BCMA_CHIP_ID_BCM53018 53018 +#define BCMA_CHIP_ID_BCM53573 53573 +#define BCMA_PKG_ID_BCM53573 0 +#define BCMA_PKG_ID_BCM47189 1 /* Board types (on PCI usually equals to the subsystem dev id) */ /* BCM4313 */ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index ebd5c1fcdea4..c607fce6aadd 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -23,6 +23,7 @@ #define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ /* Agent registers (common for every core) */ +#define BCMA_OOB_SEL_OUT_A30 0x0100 #define BCMA_IOCTL 0x0408 /* IO control */ #define BCMA_IOCTL_CLK 0x0001 #define BCMA_IOCTL_FGC 0x0002 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 984f73b719a9..a4414a11eea7 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -497,6 +497,23 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp, return cgrp->ancestor_ids[ancestor->level] == ancestor->id; } +/** + * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry + * @task: the task to be tested + * @ancestor: possible ancestor of @task's cgroup + * + * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. + * It follows all the same rules as cgroup_is_descendant, and only applies + * to the default hierarchy. + */ +static inline bool task_under_cgroup_hierarchy(struct task_struct *task, + struct cgroup *ancestor) +{ + struct css_set *cset = task_css_set(task); + + return cgroup_is_descendant(cset->dfl_cgrp, ancestor); +} + /* no synchronization, the result can only be used as a hint */ static inline bool cgroup_is_populated(struct cgroup *cgrp) { @@ -557,6 +574,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; +struct cgroup; static inline void css_put(struct cgroup_subsys_state *css) {} static inline int cgroup_attach_task_all(struct task_struct *from, @@ -574,6 +592,11 @@ static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } +static inline bool task_under_cgroup_hierarchy(struct task_struct *task, + struct cgroup *ancestor) +{ + return true; +} #endif /* !CONFIG_CGROUPS */ /* diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index b10954a66939..b01c8c3dd531 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1114,6 +1114,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, const char *mod_name); void vmbus_driver_unregister(struct hv_driver *hv_driver); +static inline const char *vmbus_dev_name(const struct hv_device *device_obj) +{ + const struct kobject *kobj = &device_obj->device.kobj; + + return kobj->name; +} + void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, @@ -1422,88 +1429,4 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) return false; } -/* - * An API to support in-place processing of incoming VMBUS packets. - */ -#define VMBUS_PKT_TRAILER 8 - -static inline struct vmpacket_descriptor * -get_next_pkt_raw(struct vmbus_channel *channel) -{ - struct hv_ring_buffer_info *ring_info = &channel->inbound; - u32 read_loc = ring_info->priv_read_index; - void *ring_buffer = hv_get_ring_buffer(ring_info); - struct vmpacket_descriptor *cur_desc; - u32 packetlen; - u32 dsize = ring_info->ring_datasize; - u32 delta = read_loc - ring_info->ring_buffer->read_index; - u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); - - if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) - return NULL; - - if ((read_loc + sizeof(*cur_desc)) > dsize) - return NULL; - - cur_desc = ring_buffer + read_loc; - packetlen = cur_desc->len8 << 3; - - /* - * If the packet under consideration is wrapping around, - * return failure. - */ - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) - return NULL; - - return cur_desc; -} - -/* - * A helper function to step through packets "in-place" - * This API is to be called after each successful call - * get_next_pkt_raw(). - */ -static inline void put_pkt_raw(struct vmbus_channel *channel, - struct vmpacket_descriptor *desc) -{ - struct hv_ring_buffer_info *ring_info = &channel->inbound; - u32 read_loc = ring_info->priv_read_index; - u32 packetlen = desc->len8 << 3; - u32 dsize = ring_info->ring_datasize; - - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) - BUG(); - /* - * Include the packet trailer. - */ - ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; -} - -/* - * This call commits the read index and potentially signals the host. - * Here is the pattern for using the "in-place" consumption APIs: - * - * while (get_next_pkt_raw() { - * process the packet "in-place"; - * put_pkt_raw(); - * } - * if (packets processed in place) - * commit_rd_index(); - */ -static inline void commit_rd_index(struct vmbus_channel *channel) -{ - struct hv_ring_buffer_info *ring_info = &channel->inbound; - /* - * Make sure all reads are done before we update the read index since - * the writer may start writing to the read area once the read index - * is updated. - */ - virt_rmb(); - ring_info->ring_buffer->read_index = ring_info->priv_read_index; - - if (hv_need_to_signal_on_read(ring_info)) - vmbus_set_event(channel); -} - - #endif /* _HYPERV_H */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index a5f6ce6b578c..49d4aef1f789 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -81,6 +81,7 @@ static inline bool is_vlan_dev(const struct net_device *dev) #define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) +#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) /** * struct vlan_pcpu_stats - VLAN percpu rx/tx stats diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 2566f6d6444f..7c3c0d3aca37 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, int mlx5_init_cq_table(struct mlx5_core_dev *dev); void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_create_cq_mbox_in *in, int inlen); + u32 *in, int inlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_query_cq_mbox_out *out); + u32 *out, int outlen); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_modify_cq_mbox_in *in, int in_sz); + u32 *in, int inlen); int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u16 cq_period, u16 cq_max_count); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0b6d15cddb2f..77c141797152 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -198,19 +198,6 @@ enum { }; enum { - MLX5_ACCESS_MODE_PA = 0, - MLX5_ACCESS_MODE_MTT = 1, - MLX5_ACCESS_MODE_KLM = 2 -}; - -enum { - MLX5_MKEY_REMOTE_INVAL = 1 << 24, - MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, - MLX5_MKEY_BSF_EN = 1 << 30, - MLX5_MKEY_LEN64 = 1 << 31, -}; - -enum { MLX5_EN_RD = (u64)1, MLX5_EN_WR = (u64)2 }; @@ -411,33 +398,6 @@ enum { MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 }; -struct mlx5_inbox_hdr { - __be16 opcode; - u8 rsvd[4]; - __be16 opmod; -}; - -struct mlx5_outbox_hdr { - u8 status; - u8 rsvd[3]; - __be32 syndrome; -}; - -struct mlx5_cmd_query_adapter_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_cmd_query_adapter_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[24]; - u8 intapin; - u8 rsvd1[13]; - __be16 vsd_vendor_id; - u8 vsd[208]; - u8 vsd_psid[16]; -}; - enum mlx5_odp_transport_cap_bits { MLX5_ODP_SUPPORT_SEND = 1 << 31, MLX5_ODP_SUPPORT_RECV = 1 << 30, @@ -455,30 +415,6 @@ struct mlx5_odp_caps { char reserved2[0xe4]; }; -struct mlx5_cmd_init_hca_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd0[2]; - __be16 profile; - u8 rsvd1[4]; -}; - -struct mlx5_cmd_init_hca_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_cmd_teardown_hca_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd0[2]; - __be16 profile; - u8 rsvd1[4]; -}; - -struct mlx5_cmd_teardown_hca_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; @@ -494,7 +430,6 @@ struct mlx5_cmd_layout { u8 status_own; }; - struct health_buffer { __be32 assert_var[5]; __be32 rsvd0[3]; @@ -856,245 +791,15 @@ struct mlx5_cqe128 { struct mlx5_cqe64 cqe64; }; -struct mlx5_srq_ctx { - u8 state_log_sz; - u8 rsvd0[3]; - __be32 flags_xrcd; - __be32 pgoff_cqn; - u8 rsvd1[4]; - u8 log_pg_sz; - u8 rsvd2[7]; - __be32 pd; - __be16 lwm; - __be16 wqe_cnt; - u8 rsvd3[8]; - __be64 db_record; -}; - -struct mlx5_create_srq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 input_srqn; - u8 rsvd0[4]; - struct mlx5_srq_ctx ctx; - u8 rsvd1[208]; - __be64 pas[0]; -}; - -struct mlx5_create_srq_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 srqn; - u8 rsvd[4]; -}; - -struct mlx5_destroy_srq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 srqn; - u8 rsvd[4]; -}; - -struct mlx5_destroy_srq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_query_srq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 srqn; - u8 rsvd0[4]; -}; - -struct mlx5_query_srq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; - struct mlx5_srq_ctx ctx; - u8 rsvd1[32]; - __be64 pas[0]; -}; - -struct mlx5_arm_srq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 srqn; - __be16 rsvd; - __be16 lwm; -}; - -struct mlx5_arm_srq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_cq_context { - u8 status; - u8 cqe_sz_flags; - u8 st; - u8 rsvd3; - u8 rsvd4[6]; - __be16 page_offset; - __be32 log_sz_usr_page; - __be16 cq_period; - __be16 cq_max_count; - __be16 rsvd20; - __be16 c_eqn; - u8 log_pg_sz; - u8 rsvd25[7]; - __be32 last_notified_index; - __be32 solicit_producer_index; - __be32 consumer_counter; - __be32 producer_counter; - u8 rsvd48[8]; - __be64 db_record_addr; -}; - -struct mlx5_create_cq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 input_cqn; - u8 rsvdx[4]; - struct mlx5_cq_context ctx; - u8 rsvd6[192]; - __be64 pas[0]; -}; - -struct mlx5_create_cq_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 cqn; - u8 rsvd0[4]; -}; - -struct mlx5_destroy_cq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 cqn; - u8 rsvd0[4]; -}; - -struct mlx5_destroy_cq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; -}; - -struct mlx5_query_cq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 cqn; - u8 rsvd0[4]; -}; - -struct mlx5_query_cq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; - struct mlx5_cq_context ctx; - u8 rsvd6[16]; - __be64 pas[0]; -}; - -struct mlx5_modify_cq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 cqn; - __be32 field_select; - struct mlx5_cq_context ctx; - u8 rsvd[192]; - __be64 pas[0]; -}; - -struct mlx5_modify_cq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_enable_hca_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_enable_hca_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_disable_hca_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_disable_hca_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_eq_context { - u8 status; - u8 ec_oi; - u8 st; - u8 rsvd2[7]; - __be16 page_pffset; - __be32 log_sz_usr_page; - u8 rsvd3[7]; - u8 intr; - u8 log_page_size; - u8 rsvd4[15]; - __be32 consumer_counter; - __be32 produser_counter; - u8 rsvd5[16]; -}; - -struct mlx5_create_eq_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd0[3]; - u8 input_eqn; - u8 rsvd1[4]; - struct mlx5_eq_context ctx; - u8 rsvd2[8]; - __be64 events_mask; - u8 rsvd3[176]; - __be64 pas[0]; -}; - -struct mlx5_create_eq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[3]; - u8 eq_number; - u8 rsvd1[4]; -}; - -struct mlx5_destroy_eq_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd0[3]; - u8 eqn; - u8 rsvd1[4]; -}; - -struct mlx5_destroy_eq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_map_eq_mbox_in { - struct mlx5_inbox_hdr hdr; - __be64 mask; - u8 mu; - u8 rsvd0[2]; - u8 eqn; - u8 rsvd1[24]; -}; - -struct mlx5_map_eq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_query_eq_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd0[3]; - u8 eqn; - u8 rsvd1[4]; -}; - -struct mlx5_query_eq_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; - struct mlx5_eq_context ctx; +enum { + MLX5_MKEY_STATUS_FREE = 1 << 6, }; enum { - MLX5_MKEY_STATUS_FREE = 1 << 6, + MLX5_MKEY_REMOTE_INVAL = 1 << 24, + MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, + MLX5_MKEY_BSF_EN = 1 << 30, + MLX5_MKEY_LEN64 = 1 << 31, }; struct mlx5_mkey_seg { @@ -1119,134 +824,12 @@ struct mlx5_mkey_seg { u8 rsvd4[4]; }; -struct mlx5_query_special_ctxs_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_query_special_ctxs_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 dump_fill_mkey; - __be32 reserved_lkey; -}; - -struct mlx5_create_mkey_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 input_mkey_index; - __be32 flags; - struct mlx5_mkey_seg seg; - u8 rsvd1[16]; - __be32 xlat_oct_act_size; - __be32 rsvd2; - u8 rsvd3[168]; - __be64 pas[0]; -}; - -struct mlx5_create_mkey_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 mkey; - u8 rsvd[4]; -}; - -struct mlx5_destroy_mkey_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 mkey; - u8 rsvd[4]; -}; - -struct mlx5_destroy_mkey_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_query_mkey_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 mkey; -}; - -struct mlx5_query_mkey_mbox_out { - struct mlx5_outbox_hdr hdr; - __be64 pas[0]; -}; - -struct mlx5_modify_mkey_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 mkey; - __be64 pas[0]; -}; - -struct mlx5_modify_mkey_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_dump_mkey_mbox_in { - struct mlx5_inbox_hdr hdr; -}; - -struct mlx5_dump_mkey_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 mkey; -}; - -struct mlx5_mad_ifc_mbox_in { - struct mlx5_inbox_hdr hdr; - __be16 remote_lid; - u8 rsvd0; - u8 port; - u8 rsvd1[4]; - u8 data[256]; -}; - -struct mlx5_mad_ifc_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; - u8 data[256]; -}; - -struct mlx5_access_reg_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd0[2]; - __be16 register_id; - __be32 arg; - __be32 data[0]; -}; - -struct mlx5_access_reg_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; - __be32 data[0]; -}; - #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) enum { MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 }; -struct mlx5_allocate_psv_in { - struct mlx5_inbox_hdr hdr; - __be32 npsv_pd; - __be32 rsvd_psv0; -}; - -struct mlx5_allocate_psv_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; - __be32 psv_idx[4]; -}; - -struct mlx5_destroy_psv_in { - struct mlx5_inbox_hdr hdr; - __be32 psv_number; - u8 rsvd[4]; -}; - -struct mlx5_destroy_psv_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - enum { VPORT_STATE_DOWN = 0x0, VPORT_STATE_UP = 0x1, @@ -1381,6 +964,18 @@ enum mlx5_cap_type { #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) +#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) + +#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap) + +#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap) + +#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) + #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ccea6fb16482..5cb9fa7aec61 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -49,10 +49,6 @@ #include <linux/mlx5/srq.h> enum { - MLX5_RQ_BITMASK_VSD = 1 << 1, -}; - -enum { MLX5_BOARD_ID_LEN = 64, MLX5_MAX_NAME_LEN = 16, }; @@ -481,6 +477,7 @@ struct mlx5_fc_stats { }; struct mlx5_eswitch; +struct mlx5_lag; struct mlx5_rl_entry { u32 rate; @@ -554,6 +551,7 @@ struct mlx5_priv { struct mlx5_flow_steering *steering; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; + struct mlx5_lag *lag; unsigned long pci_dev_data; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; @@ -771,14 +769,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); -int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); -int mlx5_cmd_status_to_err_v2(void *ptr); -int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); + int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context); +void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); + +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); @@ -807,15 +806,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); void mlx5_init_mkey_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); +int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, + u32 *in, int inlen, + u32 *out, int outlen, + mlx5_cmd_cbk_t callback, void *context); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - struct mlx5_create_mkey_mbox_in *in, int inlen, - mlx5_cmd_cbk_t callback, void *context, - struct mlx5_create_mkey_mbox_out *out); + u32 *in, int inlen); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - struct mlx5_query_mkey_mbox_out *out, int outlen); + u32 *out, int outlen); int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, u32 *mkey); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); @@ -865,7 +867,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - struct mlx5_query_eq_mbox_out *out, int outlen); + u32 *out, int outlen); int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); @@ -942,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); +int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); +int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); +bool mlx5_lag_is_active(struct mlx5_core_dev *dev); +struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); + struct mlx5_profile { u64 mask; u8 log_max_qp; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index e036d6030867..93ebc5e21334 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority, enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_BYPASS, + MLX5_FLOW_NAMESPACE_LAG, MLX5_FLOW_NAMESPACE_OFFLOADS, MLX5_FLOW_NAMESPACE_ETHTOOL, MLX5_FLOW_NAMESPACE_KERNEL, @@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_FDB, MLX5_FLOW_NAMESPACE_ESW_EGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS, + MLX5_FLOW_NAMESPACE_SNIFFER_RX, + MLX5_FLOW_NAMESPACE_SNIFFER_TX, }; struct mlx5_flow_table; @@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, u32 level, u16 vport); +struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( + struct mlx5_flow_namespace *ns, + int prio, u32 level); int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); /* inbox should be set with the following values: diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 21bc4557b67a..73a720f74a69 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -152,7 +152,7 @@ enum { MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, MLX5_CMD_OP_ACCESS_REG = 0x805, MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, - MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807, + MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, MLX5_CMD_OP_MAD_IFC = 0x50d, MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, @@ -174,6 +174,12 @@ enum { MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, MLX5_CMD_OP_SET_WOL_ROL = 0x830, MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, + MLX5_CMD_OP_CREATE_LAG = 0x840, + MLX5_CMD_OP_MODIFY_LAG = 0x841, + MLX5_CMD_OP_QUERY_LAG = 0x842, + MLX5_CMD_OP_DESTROY_LAG = 0x843, + MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844, + MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, @@ -212,6 +218,8 @@ enum { MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, + MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, + MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, MLX5_CMD_OP_MAX }; @@ -281,7 +289,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 modify_root[0x1]; u8 identified_miss_table_mode[0x1]; u8 flow_table_modify[0x1]; - u8 reserved_at_7[0x19]; + u8 encap[0x1]; + u8 decap[0x1]; + u8 reserved_at_9[0x17]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; @@ -473,7 +483,9 @@ struct mlx5_ifc_ads_bits { struct mlx5_ifc_flow_table_nic_cap_bits { u8 nic_rx_multi_path_tirs[0x1]; - u8 reserved_at_1[0x1ff]; + u8 nic_rx_multi_path_tirs_fts[0x1]; + u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; + u8 reserved_at_3[0x1fd]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; @@ -512,7 +524,15 @@ struct mlx5_ifc_e_switch_cap_bits { u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1]; - u8 reserved_at_20[0x7e0]; + u8 vxlan_encap_decap[0x1]; + u8 nvgre_encap_decap[0x1]; + u8 reserved_at_22[0x9]; + u8 log_max_encap_headers[0x5]; + u8 reserved_2b[0x6]; + u8 max_encap_header_size[0xa]; + + u8 reserved_40[0x7c0]; + }; struct mlx5_ifc_qos_cap_bits { @@ -767,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; u8 retransmission_q_counters[0x1]; - u8 reserved_at_183[0x3]; + u8 reserved_at_183[0x1]; + u8 modify_rq_counter_set_id[0x1]; + u8 reserved_at_185[0x1]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; @@ -870,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 pad_tx_eth_packet[0x1]; u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_at_270[0x10]; + + u8 reserved_at_270[0xb]; + u8 lag_master[0x1]; + u8 num_lag_ports[0x4]; u8 reserved_at_280[0x10]; u8 max_wqe_sz_sq[0x10]; @@ -1904,7 +1929,7 @@ enum { struct mlx5_ifc_qpc_bits { u8 state[0x4]; - u8 reserved_at_4[0x4]; + u8 lag_tx_port_affinity[0x4]; u8 st[0x8]; u8 reserved_at_10[0x3]; u8 pm_state[0x2]; @@ -1966,7 +1991,10 @@ struct mlx5_ifc_qpc_bits { u8 reserved_at_3e0[0x8]; u8 cqn_snd[0x18]; - u8 reserved_at_400[0x40]; + u8 reserved_at_400[0x8]; + u8 deth_sqpn[0x18]; + + u8 reserved_at_420[0x20]; u8 reserved_at_440[0x8]; u8 last_acked_psn[0x18]; @@ -2064,6 +2092,8 @@ enum { MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, + MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, + MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, }; struct mlx5_ifc_flow_context_bits { @@ -2083,7 +2113,9 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_a0[0x8]; u8 flow_counter_list_size[0x18]; - u8 reserved_at_c0[0x140]; + u8 encap_id[0x20]; + + u8 reserved_at_e0[0x120]; struct mlx5_ifc_fte_match_param_bits match_value; @@ -2146,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits { }; struct mlx5_ifc_tisc_bits { - u8 reserved_at_0[0xc]; + u8 strict_lag_tx_port_affinity[0x1]; + u8 reserved_at_1[0x3]; + u8 lag_tx_port_affinity[0x04]; + + u8 reserved_at_8[0x4]; u8 prio[0x4]; u8 reserved_at_10[0x10]; @@ -2808,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits { struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; - u8 reserved_at_180[0x180]; + u8 reserved_at_180[0x200]; struct mlx5_ifc_wq_bits wq; }; @@ -3489,7 +3525,7 @@ struct mlx5_ifc_query_special_contexts_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; + u8 dump_fill_mkey[0x20]; u8 resd_lkey[0x20]; }; @@ -4213,6 +4249,85 @@ struct mlx5_ifc_query_eq_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_encap_header_in_bits { + u8 reserved_at_0[0x5]; + u8 header_type[0x3]; + u8 reserved_at_8[0xe]; + u8 encap_header_size[0xa]; + + u8 reserved_at_20[0x10]; + u8 encap_header[2][0x8]; + + u8 more_encap_header[0][0x8]; +}; + +struct mlx5_ifc_query_encap_header_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xa0]; + + struct mlx5_ifc_encap_header_in_bits encap_header[0]; +}; + +struct mlx5_ifc_query_encap_header_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 encap_id[0x20]; + + u8 reserved_at_60[0xa0]; +}; + +struct mlx5_ifc_alloc_encap_header_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 encap_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_encap_header_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xa0]; + + struct mlx5_ifc_encap_header_in_bits encap_header; +}; + +struct mlx5_ifc_dealloc_encap_header_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_encap_header_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_20[0x10]; + u8 op_mod[0x10]; + + u8 encap_id[0x20]; + + u8 reserved_60[0x20]; +}; + struct mlx5_ifc_query_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -4517,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits { struct mlx5_ifc_modify_tis_bitmask_bits { u8 reserved_at_0[0x20]; - u8 reserved_at_20[0x1f]; + u8 reserved_at_20[0x1d]; + u8 lag_tx_port_affinity[0x1]; + u8 strict_lag_tx_port_affinity[0x1]; u8 prio[0x1]; }; @@ -4652,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits { u8 reserved_at_40[0x40]; }; +enum { + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3, +}; + struct mlx5_ifc_modify_rq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -4721,7 +4843,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits { u8 reserved_at_0[0x16]; u8 node_guid[0x1]; u8 port_guid[0x1]; - u8 reserved_at_18[0x1]; + u8 min_inline[0x1]; u8 mtu[0x1]; u8 change_event[0x1]; u8 promisc[0x1]; @@ -6099,7 +6221,9 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_at_a0[0x20]; - u8 reserved_at_c0[0x4]; + u8 encap_en[0x1]; + u8 decap_en[0x1]; + u8 reserved_at_c2[0x2]; u8 table_miss_mode[0x4]; u8 level[0x8]; u8 reserved_at_d0[0x8]; @@ -6108,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_at_e0[0x8]; u8 table_miss_id[0x18]; - u8 reserved_at_100[0x100]; + u8 reserved_at_100[0x8]; + u8 lag_master_next_table_id[0x18]; + + u8 reserved_at_120[0x80]; }; struct mlx5_ifc_create_flow_group_out_bits { @@ -7562,7 +7689,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits { }; enum { - MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1, + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0), + MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15), }; struct mlx5_ifc_modify_flow_table_out_bits { @@ -7601,7 +7729,10 @@ struct mlx5_ifc_modify_flow_table_in_bits { u8 reserved_at_e0[0x8]; u8 table_miss_id[0x18]; - u8 reserved_at_100[0x100]; + u8 reserved_at_100[0x8]; + u8 lag_master_next_table_id[0x18]; + + u8 reserved_at_120[0x80]; }; struct mlx5_ifc_ets_tcn_config_reg_bits { @@ -7709,4 +7840,134 @@ struct mlx5_ifc_dcbx_param_bits { u8 error[0x8]; u8 reserved_at_a0[0x160]; }; + +struct mlx5_ifc_lagc_bits { + u8 reserved_at_0[0x1d]; + u8 lag_state[0x3]; + + u8 reserved_at_20[0x14]; + u8 tx_remap_affinity_2[0x4]; + u8 reserved_at_38[0x4]; + u8 tx_remap_affinity_1[0x4]; +}; + +struct mlx5_ifc_create_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_create_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + struct mlx5_ifc_lagc_bits ctx; +}; + +struct mlx5_ifc_modify_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + u8 field_select[0x20]; + + struct mlx5_ifc_lagc_bits ctx; +}; + +struct mlx5_ifc_query_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_lagc_bits ctx; +}; + +struct mlx5_ifc_query_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_create_vport_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_create_vport_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_vport_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_vport_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index e3012cc64b8a..b3065acd20b4 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -61,6 +61,39 @@ enum mlx5_an_status { #define MLX5_I2C_ADDR_HIGH 0x51 #define MLX5_EEPROM_PAGE_LENGTH 256 +enum mlx5e_link_mode { + MLX5E_1000BASE_CX_SGMII = 0, + MLX5E_1000BASE_KX = 1, + MLX5E_10GBASE_CX4 = 2, + MLX5E_10GBASE_KX4 = 3, + MLX5E_10GBASE_KR = 4, + MLX5E_20GBASE_KR2 = 5, + MLX5E_40GBASE_CR4 = 6, + MLX5E_40GBASE_KR4 = 7, + MLX5E_56GBASE_R4 = 8, + MLX5E_10GBASE_CR = 12, + MLX5E_10GBASE_SR = 13, + MLX5E_10GBASE_ER = 14, + MLX5E_40GBASE_SR4 = 15, + MLX5E_40GBASE_LR4 = 16, + MLX5E_50GBASE_SR2 = 18, + MLX5E_100GBASE_CR4 = 20, + MLX5E_100GBASE_SR4 = 21, + MLX5E_100GBASE_KR4 = 22, + MLX5E_100GBASE_LR4 = 23, + MLX5E_100BASE_TX = 24, + MLX5E_1000BASE_T = 25, + MLX5E_10GBASE_T = 26, + MLX5E_25GBASE_CR = 27, + MLX5E_25GBASE_KR = 28, + MLX5E_25GBASE_SR = 29, + MLX5E_50GBASE_CR2 = 30, + MLX5E_50GBASE_KR2 = 31, + MLX5E_LINK_MODES_NUMBER, +}; + +#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); @@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, u32 *proto_admin, int proto_mask); int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, u8 *link_width_oper, u8 local_port); -int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, int proto_mask, - u8 local_port); +int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, u8 local_port); +int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, + u32 *proto_oper, u8 local_port); int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, u32 proto_admin, int proto_mask); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 7879bf411891..0aacb2a7480d 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -123,12 +123,13 @@ enum { }; enum { - MLX5_NON_ZERO_RQ = 0 << 24, - MLX5_SRQ_RQ = 1 << 24, - MLX5_CRQ_RQ = 2 << 24, - MLX5_ZERO_LEN_RQ = 3 << 24 + MLX5_NON_ZERO_RQ = 0x0, + MLX5_SRQ_RQ = 0x1, + MLX5_CRQ_RQ = 0x2, + MLX5_ZERO_LEN_RQ = 0x3 }; +/* TODO REM */ enum { /* params1 */ MLX5_QP_BIT_SRE = 1 << 15, @@ -178,12 +179,6 @@ enum { }; enum { - MLX5_QP_LAT_SENSITIVE = 1 << 28, - MLX5_QP_BLOCK_MCAST = 1 << 30, - MLX5_QP_ENABLE_SIG = 1 << 31, -}; - -enum { MLX5_RCV_DBR = 0, MLX5_SND_DBR = 1, }; @@ -484,6 +479,7 @@ struct mlx5_qp_path { u8 rmac[6]; }; +/* FIXME: use mlx5_ifc.h qpc */ struct mlx5_qp_context { __be32 flags; __be32 flags_pd; @@ -525,99 +521,6 @@ struct mlx5_qp_context { u8 rsvd1[24]; }; -struct mlx5_create_qp_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 input_qpn; - u8 rsvd0[4]; - __be32 opt_param_mask; - u8 rsvd1[4]; - struct mlx5_qp_context ctx; - u8 rsvd3[16]; - __be64 pas[0]; -}; - -struct mlx5_create_qp_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 qpn; - u8 rsvd0[4]; -}; - -struct mlx5_destroy_qp_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 qpn; - u8 rsvd0[4]; -}; - -struct mlx5_destroy_qp_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; -}; - -struct mlx5_modify_qp_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 qpn; - u8 rsvd0[4]; - __be32 optparam; - u8 rsvd1[4]; - struct mlx5_qp_context ctx; - u8 rsvd2[16]; -}; - -struct mlx5_modify_qp_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; -}; - -struct mlx5_query_qp_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 qpn; - u8 rsvd[4]; -}; - -struct mlx5_query_qp_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd1[8]; - __be32 optparam; - u8 rsvd0[4]; - struct mlx5_qp_context ctx; - u8 rsvd2[16]; - __be64 pas[0]; -}; - -struct mlx5_conf_sqp_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 qpn; - u8 rsvd[3]; - u8 type; -}; - -struct mlx5_conf_sqp_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_alloc_xrcd_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_alloc_xrcd_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 xrcdn; - u8 rsvd[4]; -}; - -struct mlx5_dealloc_xrcd_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 xrcdn; - u8 rsvd[4]; -}; - -struct mlx5_dealloc_xrcd_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) { return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); @@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, return radix_tree_lookup(&dev->priv.mkey_table.tree, key); } -struct mlx5_page_fault_resume_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 flags_qpn; - u8 reserved[4]; -}; - -struct mlx5_page_fault_resume_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - struct mlx5_create_qp_mbox_in *in, + u32 *in, int inlen); -int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, - struct mlx5_modify_qp_mbox_in *in, int sqd_event, +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, + u32 opt_param_mask, void *qpc, struct mlx5_core_qp *qp); int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - struct mlx5_query_qp_mbox_out *out, int outlen); + u32 *out, int outlen); int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index e087b7d047ac..451b0bde9083 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); +int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, u16 vport, u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); diff --git a/include/linux/net.h b/include/linux/net.h index b9f0ff4d489c..cd0c8bd0a1de 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -25,6 +25,7 @@ #include <linux/kmemcheck.h> #include <linux/rcupdate.h> #include <linux/once.h> +#include <linux/fs.h> #include <uapi/linux/net.h> @@ -128,6 +129,9 @@ struct page; struct sockaddr; struct msghdr; struct module; +struct sk_buff; +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, + unsigned int, size_t); struct proto_ops { int family; @@ -186,6 +190,8 @@ struct proto_ops { struct pipe_inode_info *pipe, size_t len, unsigned int flags); int (*set_peek_off)(struct sock *sk, int val); int (*peek_len)(struct socket *sock); + int (*read_sock)(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor); }; #define DECLARE_SOCKADDR(type, dst, src) \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3a788bf0affd..d122be9345c7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -52,6 +52,7 @@ #include <uapi/linux/netdevice.h> #include <uapi/linux/if_bonding.h> #include <uapi/linux/pkt_cls.h> +#include <linux/hashtable.h> struct netpoll_info; struct device; @@ -1561,8 +1562,6 @@ enum netdev_priv_flags { * * @xps_maps: XXX: need comments on this one * - * @offload_fwd_mark: Offload device fwding mark - * * @watchdog_timeo: Represents the timeout that is used by * the watchdog (see dev_watchdog()) * @watchdog_timer: List of timers @@ -1800,6 +1799,9 @@ struct net_device { unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; +#ifdef CONFIG_NET_SCHED + DECLARE_HASHTABLE (qdisc_hash, 4); +#endif unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; @@ -1810,9 +1812,6 @@ struct net_device { #ifdef CONFIG_NET_CLS_ACT struct tcf_proto __rcu *egress_cl_list; #endif -#ifdef CONFIG_NET_SWITCHDEV - u32 offload_fwd_mark; -#endif /* These may be needed for future network-power-down code. */ struct timer_list watchdog_timer; diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 40c0ada01806..70b30e4d3cc4 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -5,28 +5,77 @@ * (GPL) Version 2, available from the file COPYING in the main directory of * this source tree. */ +#ifndef _COMMON_HSI_H +#define _COMMON_HSI_H +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/slab.h> + +/* dma_addr_t manip */ +#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) +#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) +#define DMA_REGPAIR_LE(x, val) do { \ + (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)); \ + } while (0) + +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) +#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) +#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) #ifndef __COMMON_HSI__ #define __COMMON_HSI__ -#define CORE_SPQE_PAGE_SIZE_BYTES 4096 #define X_FINAL_CLEANUP_AGG_INT 1 + +#define EVENT_RING_PAGE_SIZE_BYTES 4096 + #define NUM_OF_GLOBAL_QUEUES 128 +#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 + +#define ISCSI_CDU_TASK_SEG_TYPE 0 +#define RDMA_CDU_TASK_SEG_TYPE 1 + +#define FW_ASSERT_GENERAL_ATTN_IDX 32 + +#define MAX_PINNED_CCFC 32 /* Queue Zone sizes in bytes */ #define TSTORM_QZONE_SIZE 8 -#define MSTORM_QZONE_SIZE 0 +#define MSTORM_QZONE_SIZE 16 #define USTORM_QZONE_SIZE 8 #define XSTORM_QZONE_SIZE 8 #define YSTORM_QZONE_SIZE 0 #define PSTORM_QZONE_SIZE 0 -#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16 +#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 + +/********************************/ +/* CORE (LIGHT L2) FW CONSTANTS */ +/********************************/ + +#define CORE_LL2_MAX_RAMROD_PER_CON 8 +#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1 + +#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12 + +#define CORE_SPQE_PAGE_SIZE_BYTES 4096 + +#define MAX_NUM_LL2_RX_QUEUES 32 +#define MAX_NUM_LL2_TX_STATS_COUNTERS 32 #define FW_MAJOR_VERSION 8 #define FW_MINOR_VERSION 10 -#define FW_REVISION_VERSION 5 +#define FW_REVISION_VERSION 10 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -83,6 +132,17 @@ #define NUM_OF_LCIDS (320) #define NUM_OF_LTIDS (320) +/* Clock values */ +#define MASTER_CLK_FREQ_E4 (375e6) +#define STORM_CLK_FREQ_E4 (1000e6) +#define CLK25M_CLK_FREQ_E4 (25e6) + +/* Global PXP windows (GTT) */ +#define NUM_OF_GTT 19 +#define GTT_DWORD_SIZE_BITS 10 +#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2) +#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) + /*****************/ /* CDU CONSTANTS */ /*****************/ @@ -90,6 +150,8 @@ #define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) #define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) /*****************/ /* DQ CONSTANTS */ /*****************/ @@ -115,6 +177,11 @@ #define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 +#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 /* UCM agg val selection (HW) */ #define DQ_UCM_AGG_VAL_SEL_WORD0 0 @@ -159,13 +226,16 @@ #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 /* XCM agg counter flag selection */ -#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) /* UCM agg counter flag selection (HW) */ #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 @@ -178,9 +248,45 @@ #define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7 /* UCM agg counter flag selection (FW) */ -#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4) -#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5) - +#define DQ_UCM_ETH_PMD_TX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) +#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) + +/* TCM agg counter flag selection (HW) */ +#define DQ_TCM_AGG_FLG_SHIFT_CF0 0 +#define DQ_TCM_AGG_FLG_SHIFT_CF1 1 +#define DQ_TCM_AGG_FLG_SHIFT_CF2 2 +#define DQ_TCM_AGG_FLG_SHIFT_CF3 3 +#define DQ_TCM_AGG_FLG_SHIFT_CF4 4 +#define DQ_TCM_AGG_FLG_SHIFT_CF5 5 +#define DQ_TCM_AGG_FLG_SHIFT_CF6 6 +#define DQ_TCM_AGG_FLG_SHIFT_CF7 7 +/* TCM agg counter flag selection (FW) */ +#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) + +/* PWM address mapping */ +#define DQ_PWM_OFFSET_DPM_BASE 0x0 +#define DQ_PWM_OFFSET_DPM_END 0x27 +#define DQ_PWM_OFFSET_XCM16_BASE 0x40 +#define DQ_PWM_OFFSET_XCM32_BASE 0x44 +#define DQ_PWM_OFFSET_UCM16_BASE 0x48 +#define DQ_PWM_OFFSET_UCM32_BASE 0x4C +#define DQ_PWM_OFFSET_UCM16_4 0x50 +#define DQ_PWM_OFFSET_TCM16_BASE 0x58 +#define DQ_PWM_OFFSET_TCM32_BASE 0x5C +#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 +#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 +#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B + +#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) +#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) +#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4) +#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2) +#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) +#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) +#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) #define DQ_REGION_SHIFT (12) /* DPM */ @@ -214,15 +320,17 @@ */ #define CM_TX_PQ_BASE 0x200 +/* number of global Vport/QCN rate limiters */ +#define MAX_QM_GLOBAL_RLS 256 /* QM registers data */ #define QM_LINE_CRD_REG_WIDTH 16 -#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) +#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) #define QM_BYTE_CRD_REG_WIDTH 24 -#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) +#define QM_BYTE_CRD_REG_SIGN_BIT BIT((QM_BYTE_CRD_REG_WIDTH - 1)) #define QM_WFQ_CRD_REG_WIDTH 32 -#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1)) +#define QM_WFQ_CRD_REG_SIGN_BIT BIT((QM_WFQ_CRD_REG_WIDTH - 1)) #define QM_RL_CRD_REG_WIDTH 32 -#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1)) +#define QM_RL_CRD_REG_SIGN_BIT BIT((QM_RL_CRD_REG_WIDTH - 1)) /*****************/ /* CAU CONSTANTS */ @@ -287,6 +395,17 @@ /* PXP CONSTANTS */ /*****************/ +/* Bars for Blocks */ +#define PXP_BAR_GRC 0 +#define PXP_BAR_TSDM 0 +#define PXP_BAR_USDM 0 +#define PXP_BAR_XSDM 0 +#define PXP_BAR_MSDM 0 +#define PXP_BAR_YSDM 0 +#define PXP_BAR_PSDM 0 +#define PXP_BAR_IGU 0 +#define PXP_BAR_DQ 1 + /* PTT and GTT */ #define PXP_NUM_PF_WINDOWS 12 #define PXP_PER_PF_ENTRY_SIZE 8 @@ -334,6 +453,52 @@ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) +/* PF BAR */ +#define PXP_BAR0_START_GRC 0x0000 +#define PXP_BAR0_GRC_LENGTH 0x1C00000 +#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ + PXP_BAR0_GRC_LENGTH - 1) + +#define PXP_BAR0_START_IGU 0x1C00000 +#define PXP_BAR0_IGU_LENGTH 0x10000 +#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ + PXP_BAR0_IGU_LENGTH - 1) + +#define PXP_BAR0_START_TSDM 0x1C80000 +#define PXP_BAR0_SDM_LENGTH 0x40000 +#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 +#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_MSDM 0x1D00000 +#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_USDM 0x1D80000 +#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_XSDM 0x1E00000 +#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_YSDM 0x1E80000 +#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_PSDM 0x1F00000 +#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) + +/* VF BAR */ +#define PXP_VF_BAR0 0 + +#define PXP_VF_BAR0_START_GRC 0x3E00 +#define PXP_VF_BAR0_GRC_LENGTH 0x200 +#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ + PXP_VF_BAR0_GRC_LENGTH - 1) #define PXP_VF_BAR0_START_IGU 0 #define PXP_VF_BAR0_IGU_LENGTH 0x3000 @@ -399,6 +564,20 @@ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) +#define PXP_QUEUES_ZONE_MAX_NUM 320 +/*****************/ +/* PRM CONSTANTS */ +/*****************/ +#define PRM_DMA_PAD_BYTES_NUM 2 +/******************/ +/* SDMs CONSTANTS */ +/******************/ +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 +#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7 #define SDM_COMP_TYPE_NONE 0 #define SDM_COMP_TYPE_WAKE_THREAD 1 @@ -424,6 +603,8 @@ /* PRS CONSTANTS */ /*****************/ +#define PRS_GFT_CAM_LINES_NO_MATCH 31 + /* Async data KCQ CQE */ struct async_data { __le32 cid; @@ -440,20 +621,6 @@ struct coalescing_timeset { #define COALESCING_TIMESET_VALID_SHIFT 7 }; -struct common_prs_pf_msg_info { - __le32 value; -#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1 -#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0 -#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1 -#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1 -#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1 -#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2 -#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1 -#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3 -#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF -#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4 -}; - struct common_queue_zone { __le16 ring_drv_data_consumer; __le16 reserved; @@ -473,6 +640,19 @@ struct vf_pf_channel_eqe_data { struct regpair msg_addr; }; +struct iscsi_eqe_data { + __le32 cid; + __le16 conn_id; + u8 error_code; + u8 error_pdu_opcode_reserved; +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0 +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1 +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6 +#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1 +#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 +}; + struct malicious_vf_eqe_data { u8 vf_id; u8 err_id; @@ -488,6 +668,7 @@ struct initial_cleanup_eqe_data { union event_ring_data { u8 bytes[8]; struct vf_pf_channel_eqe_data vf_pf_channel; + struct iscsi_eqe_data iscsi_info; struct malicious_vf_eqe_data malicious_vf; struct initial_cleanup_eqe_data vf_init_cleanup; }; @@ -616,6 +797,52 @@ enum db_dest { MAX_DB_DEST }; +/* Enum of doorbell DPM types */ +enum db_dpm_type { + DPM_LEGACY, + DPM_ROCE, + DPM_L2_INLINE, + DPM_L2_BD, + MAX_DB_DPM_TYPE +}; + +/* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */ +struct db_l2_dpm_data { + __le16 icid; + __le16 bd_prod; + __le32 params; +#define DB_L2_DPM_DATA_SIZE_MASK 0x3F +#define DB_L2_DPM_DATA_SIZE_SHIFT 0 +#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3 +#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6 +#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF +#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8 +#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF +#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16 +#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1 +#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 +#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 +#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 +#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1 +#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31 +}; + +/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */ +struct db_l2_dpm_sge { + struct regpair addr; + __le16 nbytes; + __le16 bitfields; +#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF +#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 +#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 +#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 +#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 +#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 +#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF +#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 + __le32 reserved2; +}; + /* Structure for doorbell address, in legacy mode */ struct db_legacy_addr { __le32 addr; @@ -627,6 +854,49 @@ struct db_legacy_addr { #define DB_LEGACY_ADDR_ICID_SHIFT 5 }; +/* Structure for doorbell address, in PWM mode */ +struct db_pwm_addr { + __le32 addr; +#define DB_PWM_ADDR_RESERVED0_MASK 0x7 +#define DB_PWM_ADDR_RESERVED0_SHIFT 0 +#define DB_PWM_ADDR_OFFSET_MASK 0x7F +#define DB_PWM_ADDR_OFFSET_SHIFT 3 +#define DB_PWM_ADDR_WID_MASK 0x3 +#define DB_PWM_ADDR_WID_SHIFT 10 +#define DB_PWM_ADDR_DPI_MASK 0xFFFF +#define DB_PWM_ADDR_DPI_SHIFT 12 +#define DB_PWM_ADDR_RESERVED1_MASK 0xF +#define DB_PWM_ADDR_RESERVED1_SHIFT 28 +}; + +/* Parameters to RoCE firmware, passed in EDPM doorbell */ +struct db_roce_dpm_params { + __le32 params; +#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3 +#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30 +}; + +/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ +struct db_roce_dpm_data { + __le16 icid; + __le16 prod_val; + struct db_roce_dpm_params params; +}; + /* Igu interrupt command */ enum igu_int_cmd { IGU_INT_ENABLE = 0, @@ -764,6 +1034,19 @@ struct pxp_ptt_entry { struct pxp_pretend_cmd pretend; }; +/* VF Zone A Permission Register. */ +struct pxp_vf_zone_a_permission { + __le32 control; +#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF +#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 +#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 +#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 +}; + /* RSS hash type */ struct rdif_task_context { __le32 initial_ref_tag; @@ -831,6 +1114,7 @@ struct rdif_task_context { __le32 reserved2; }; +/* RSS hash type */ enum rss_hash_type { RSS_HASH_TYPE_DEFAULT = 0, RSS_HASH_TYPE_IPV4 = 1, @@ -942,7 +1226,7 @@ struct tdif_task_context { }; struct timers_context { - __le32 logical_client0; + __le32 logical_client_0; #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 #define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 @@ -951,7 +1235,7 @@ struct timers_context { #define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 #define TIMERS_CONTEXT_RESERVED0_MASK 0x3 #define TIMERS_CONTEXT_RESERVED0_SHIFT 30 - __le32 logical_client1; + __le32 logical_client_1; #define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF #define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 #define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 @@ -960,7 +1244,7 @@ struct timers_context { #define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 #define TIMERS_CONTEXT_RESERVED1_MASK 0x3 #define TIMERS_CONTEXT_RESERVED1_SHIFT 30 - __le32 logical_client2; + __le32 logical_client_2; #define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF #define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 #define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 @@ -978,3 +1262,4 @@ struct timers_context { #define TIMERS_CONTEXT_RESERVED3_SHIFT 29 }; #endif /* __COMMON_HSI__ */ +#endif diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index b5ebc697d05f..1aa0727c4136 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -13,9 +13,12 @@ /* ETH FW CONSTANTS */ /********************/ #define ETH_HSI_VER_MAJOR 3 -#define ETH_HSI_VER_MINOR 0 -#define ETH_CACHE_LINE_SIZE 64 +#define ETH_HSI_VER_MINOR 10 + +#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 +#define ETH_CACHE_LINE_SIZE 64 +#define ETH_RX_CQE_GAP 32 #define ETH_MAX_RAMROD_PER_CON 8 #define ETH_TX_BD_PAGE_SIZE_BYTES 4096 #define ETH_RX_BD_PAGE_SIZE_BYTES 4096 @@ -24,15 +27,25 @@ #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 #define ETH_TX_MAX_LSO_HDR_NBD 4 #define ETH_TX_MIN_BDS_PER_LSO_PKT 3 #define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 #define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 #define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 -#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) #define ETH_TX_MAX_LSO_HDR_BYTES 510 +#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) +#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 +#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 +#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 +#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ + (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) +#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ + (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ #define ETH_RX_MAX_BUFF_PER_PKT 5 @@ -59,6 +72,8 @@ #define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 #define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 +/* Control frame check constants */ +#define ETH_CTL_FRAME_ETH_TYPE_NUM 4 struct eth_tx_1st_bd_flags { u8 bitfields; @@ -82,10 +97,10 @@ struct eth_tx_1st_bd_flags { /* The parsing information data fo rthe first tx bd of a given packet. */ struct eth_tx_data_1st_bd { - __le16 vlan; - u8 nbds; - struct eth_tx_1st_bd_flags bd_flags; - __le16 bitfields; + __le16 vlan; + u8 nbds; + struct eth_tx_1st_bd_flags bd_flags; + __le16 bitfields; #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 #define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 @@ -96,7 +111,7 @@ struct eth_tx_data_1st_bd { /* The parsing information data for the second tx bd of a given packet. */ struct eth_tx_data_2nd_bd { - __le16 tunn_ip_size; + __le16 tunn_ip_size; __le16 bitfields1; #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 @@ -125,9 +140,14 @@ struct eth_tx_data_2nd_bd { #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; +/* Firmware data for L2-EDPM packet. */ +struct eth_edpm_fw_data { + struct eth_tx_data_1st_bd data_1st_bd; + struct eth_tx_data_2nd_bd data_2nd_bd; + __le32 reserved; +}; + struct eth_fast_path_cqe_fw_debug { - u8 reserved0; - u8 reserved1; __le16 reserved2; }; @@ -148,6 +168,17 @@ struct eth_tunnel_parsing_flags { #define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 }; +/* PMD flow control bits */ +struct eth_pmd_flow_flags { + u8 flags; +#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 +#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F +#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 +}; + /* Regular ETH Rx FP CQE. */ struct eth_fast_path_rx_reg_cqe { u8 type; @@ -166,64 +197,63 @@ struct eth_fast_path_rx_reg_cqe { u8 placement_offset; struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 bd_num; - u8 reserved[7]; + u8 reserved[9]; struct eth_fast_path_cqe_fw_debug fw_debug; u8 reserved1[3]; - u8 flags; -#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1 -#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0 -#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1 -#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2 + struct eth_pmd_flow_flags pmd_flags; }; /* TPA-continue ETH Rx FP CQE. */ struct eth_fast_path_rx_tpa_cont_cqe { - u8 type; - u8 tpa_agg_index; - __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; - u8 reserved[5]; - u8 reserved1; - __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 type; + u8 tpa_agg_index; + __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 reserved; + u8 reserved1; + __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 reserved3[3]; + struct eth_pmd_flow_flags pmd_flags; }; /* TPA-end ETH Rx FP CQE. */ struct eth_fast_path_rx_tpa_end_cqe { - u8 type; - u8 tpa_agg_index; - __le16 total_packet_len; - u8 num_of_bds; - u8 end_reason; - __le16 num_of_coalesced_segs; - __le32 ts_delta; - __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; - u8 reserved1[3]; - u8 reserved2; - __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + u8 type; + u8 tpa_agg_index; + __le16 total_packet_len; + u8 num_of_bds; + u8 end_reason; + __le16 num_of_coalesced_segs; + __le32 ts_delta; + __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + __le16 reserved1; + u8 reserved2; + struct eth_pmd_flow_flags pmd_flags; }; /* TPA-start ETH Rx FP CQE. */ struct eth_fast_path_rx_tpa_start_cqe { - u8 type; - u8 bitfields; + u8 type; + u8 bitfields; #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 - __le16 seg_len; + __le16 seg_len; struct parsing_and_err_flags pars_flags; - __le16 vlan_tag; - __le32 rss_hash; - __le16 len_on_first_bd; - u8 placement_offset; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_first_bd; + u8 placement_offset; struct eth_tunnel_parsing_flags tunnel_pars_flags; - u8 tpa_agg_index; - u8 header_len; - __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; + u8 tpa_agg_index; + u8 header_len; + __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; struct eth_fast_path_cqe_fw_debug fw_debug; + u8 reserved; + struct eth_pmd_flow_flags pmd_flags; }; /* The L4 pseudo checksum mode for Ethernet */ @@ -245,15 +275,7 @@ struct eth_slow_path_rx_cqe { u8 reserved[25]; __le16 echo; u8 reserved1; - u8 flags; -/* for PMD mode - valid indication */ -#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1 -#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0 -/* for PMD mode - valid toggle indication */ -#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1 -#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1 -#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F -#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2 + struct eth_pmd_flow_flags pmd_flags; }; /* union for all ETH Rx CQE types */ @@ -276,6 +298,11 @@ enum eth_rx_cqe_type { MAX_ETH_RX_CQE_TYPE }; +struct eth_rx_pmd_cqe { + union eth_rx_cqe cqe; + u8 reserved[ETH_RX_CQE_GAP]; +}; + enum eth_rx_tunn_type { ETH_RX_NO_TUNN, ETH_RX_TUNN_GENEVE, @@ -313,8 +340,8 @@ struct eth_tx_2nd_bd { /* The parsing information data for the third tx bd of a given packet. */ struct eth_tx_data_3rd_bd { - __le16 lso_mss; - __le16 bitfields; + __le16 lso_mss; + __le16 bitfields; #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF @@ -323,8 +350,8 @@ struct eth_tx_data_3rd_bd { #define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 #define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F #define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 - u8 tunn_l4_hdr_start_offset_w; - u8 tunn_hdr_size_w; + u8 tunn_l4_hdr_start_offset_w; + u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ @@ -355,10 +382,10 @@ struct eth_tx_bd { }; union eth_tx_bd_types { - struct eth_tx_1st_bd first_bd; - struct eth_tx_2nd_bd second_bd; - struct eth_tx_3rd_bd third_bd; - struct eth_tx_bd reg_bd; + struct eth_tx_1st_bd first_bd; + struct eth_tx_2nd_bd second_bd; + struct eth_tx_3rd_bd third_bd; + struct eth_tx_bd reg_bd; }; /* Mstorm Queue Zone */ @@ -389,8 +416,8 @@ struct eth_db_data { #define ETH_DB_DATA_RESERVED_SHIFT 5 #define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 #define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 bd_prod; + u8 agg_flags; + __le16 bd_prod; }; #endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index b3c0feb15ae9..8f64b1223c2f 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -311,7 +311,7 @@ struct iscsi_login_req_hdr { #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 - __le32 isid_TABC; + __le32 isid_tabc; __le16 tsih; __le16 isid_d; __le32 itt; @@ -464,7 +464,7 @@ struct iscsi_login_response_hdr { #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 - __le32 isid_TABC; + __le32 isid_tabc; __le16 tsih; __le16 isid_d; __le32 itt; @@ -688,8 +688,7 @@ union iscsi_cqe { enum iscsi_cqes_type { ISCSI_CQE_TYPE_SOLICITED = 1, ISCSI_CQE_TYPE_UNSOLICITED, - ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE - , + ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE, ISCSI_CQE_TYPE_TASK_CLEANUP, ISCSI_CQE_TYPE_DUMMY, MAX_ISCSI_CQES_TYPE @@ -769,9 +768,9 @@ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_UPDATE_CONN, ISCSI_EVENT_TYPE_CLEAR_SQ, ISCSI_EVENT_TYPE_TERMINATE_CONN, + ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, - RESERVED8, RESERVED9, ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, @@ -867,6 +866,7 @@ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4, ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, + ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, MAX_ISCSI_RAMROD_CMD_ID }; @@ -883,6 +883,16 @@ union iscsi_seq_num { __le16 r2t_sn; }; +struct iscsi_spe_conn_mac_update { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + u8 reserved0[2]; +}; + struct iscsi_spe_conn_offload { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -1302,14 +1312,6 @@ struct mstorm_iscsi_stats_drv { struct regpair iscsi_rx_dropped_pdus_task_not_valid; }; -struct ooo_opaque { - __le32 cid; - u8 drop_isle; - u8 drop_size; - u8 ooo_opcode; - u8 ooo_isle; -}; - struct pstorm_iscsi_stats_drv { struct regpair iscsi_tx_bytes_cnt; struct regpair iscsi_tx_packet_cnt; diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 7e441bdeabdc..72d88cf3ca25 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -16,19 +16,6 @@ #include <linux/slab.h> #include <linux/qed/common_hsi.h> -/* dma_addr_t manip */ -#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) -#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) -#define DMA_REGPAIR_LE(x, val) do { \ - (x).hi = DMA_HI_LE((val)); \ - (x).lo = DMA_LO_LE((val)); \ - } while (0) - -#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) -#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) -#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) -#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) - enum qed_chain_mode { /* Each Page contains a next pointer at its end */ QED_CHAIN_MODE_NEXT_PTR, diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 4475a9d8ae15..33c24ebc9b7f 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -23,6 +23,9 @@ struct qed_dev_eth_info { u8 port_mac[ETH_ALEN]; u8 num_vlan_filters; + + /* Legacy VF - this affects the datapath, so qede has to know */ + bool is_legacy; }; struct qed_update_vport_rss_params { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index d6c4177df7cb..d8dc5c2243d5 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -276,6 +276,21 @@ enum qed_protocol { QED_PROTOCOL_ISCSI, }; +enum qed_link_mode_bits { + QED_LM_FIBRE_BIT = BIT(0), + QED_LM_Autoneg_BIT = BIT(1), + QED_LM_Asym_Pause_BIT = BIT(2), + QED_LM_Pause_BIT = BIT(3), + QED_LM_1000baseT_Half_BIT = BIT(4), + QED_LM_1000baseT_Full_BIT = BIT(5), + QED_LM_10000baseKR_Full_BIT = BIT(6), + QED_LM_25000baseKR_Full_BIT = BIT(7), + QED_LM_40000baseLR4_Full_BIT = BIT(8), + QED_LM_50000baseKR2_Full_BIT = BIT(9), + QED_LM_100000baseKR4_Full_BIT = BIT(10), + QED_LM_COUNT = 11 +}; + struct qed_link_params { bool link_up; @@ -303,9 +318,11 @@ struct qed_link_params { struct qed_link_output { bool link_up; - u32 supported_caps; /* In SUPPORTED defs */ - u32 advertised_caps; /* In ADVERTISED defs */ - u32 lp_caps; /* In ADVERTISED defs */ + /* In QED_LM_* defs */ + u32 supported_caps; + u32 advertised_caps; + u32 lp_caps; + u32 speed; /* In Mb/s */ u8 duplex; /* In DUPLEX defs */ u8 port; /* In PORT defs */ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index accba0e6b704..dc3889d1bbe6 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -11,6 +11,14 @@ #define TCP_INVALID_TIMEOUT_VAL -1 +struct ooo_opaque { + __le32 cid; + u8 drop_isle; + u8 drop_size; + u8 ooo_opcode; + u8 ooo_isle; +}; + enum tcp_connect_mode { TCP_CONNECT_ACTIVE, TCP_CONNECT_PASSIVE, @@ -18,14 +26,10 @@ enum tcp_connect_mode { }; struct tcp_init_params { - __le32 max_cwnd; - __le16 dup_ack_threshold; + __le32 two_msl_timer; __le16 tx_sws_timer; - __le16 min_rto; - __le16 min_rto_rt; - __le16 max_rto; u8 maxfinrt; - u8 reserved[1]; + u8 reserved[9]; }; enum tcp_ip_version { diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 3eef0802a0cd..8b72ee710f95 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -173,7 +173,7 @@ struct rhashtable_walker { struct rhashtable_iter { struct rhashtable *ht; struct rhash_head *p; - struct rhashtable_walker *walker; + struct rhashtable_walker walker; unsigned int slot; unsigned int skip; }; @@ -346,8 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, struct bucket_table *old_tbl); int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); -int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, - gfp_t gfp); +void rhashtable_walk_enter(struct rhashtable *ht, + struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); void *rhashtable_walk_next(struct rhashtable_iter *iter); @@ -906,4 +906,12 @@ static inline int rhashtable_replace_fast( return err; } +/* Obsolete function, do not use in new code. */ +static inline int rhashtable_walk_init(struct rhashtable *ht, + struct rhashtable_iter *iter, gfp_t gfp) +{ + rhashtable_walk_enter(ht, iter); + return 0; +} + #endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0f665cb26b50..cfb7219be665 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -612,7 +612,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking - * @offload_fwd_mark: fwding offload mark * @mark: Generic packet mark * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information @@ -730,7 +729,10 @@ struct sk_buff { __u8 ipvs_property:1; __u8 inner_protocol_type:1; __u8 remcsum_offload:1; - /* 3 or 5 bit hole */ +#ifdef CONFIG_NET_SWITCHDEV + __u8 offload_fwd_mark:1; +#endif + /* 2, 4 or 5 bit hole */ #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -757,14 +759,9 @@ struct sk_buff { unsigned int sender_cpu; }; #endif - union { #ifdef CONFIG_NETWORK_SECMARK - __u32 secmark; -#endif -#ifdef CONFIG_NET_SWITCHDEV - __u32 offload_fwd_mark; + __u32 secmark; #endif - }; union { __u32 mark; @@ -2295,7 +2292,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) int ___pskb_trim(struct sk_buff *skb, unsigned int len); -static inline void __skb_trim(struct sk_buff *skb, unsigned int len) +static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) { if (unlikely(skb_is_nonlinear(skb))) { WARN_ON(1); @@ -2305,6 +2302,11 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) skb_set_tail_pointer(skb, len); } +static inline void __skb_trim(struct sk_buff *skb, unsigned int len) +{ + __skb_set_length(skb, len); +} + void skb_trim(struct sk_buff *skb, unsigned int len); static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) @@ -2335,6 +2337,20 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) BUG_ON(err); } +static inline int __skb_grow(struct sk_buff *skb, unsigned int len) +{ + unsigned int diff = len - skb->len; + + if (skb_tailroom(skb) < diff) { + int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), + GFP_ATOMIC); + if (ret) + return ret; + } + __skb_set_length(skb, len); + return 0; +} + /** * skb_orphan - orphan a buffer * @skb: buffer to orphan @@ -2938,6 +2954,21 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) return __pskb_trim(skb, len); } +static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + __skb_trim(skb, len); + return 0; +} + +static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + return __skb_grow(skb, len); +} + #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ @@ -3726,6 +3757,13 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } +static inline void skb_gso_reset(struct sk_buff *skb) +{ + skb_shinfo(skb)->gso_size = 0; + skb_shinfo(skb)->gso_segs = 0; + skb_shinfo(skb)->gso_type = 0; +} + void __skb_warn_lro_forwarding(const struct sk_buff *skb); static inline bool skb_warn_if_lro(const struct sk_buff *skb) diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index a4f7203a9017..ecc3e07c6e63 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -25,6 +25,7 @@ #include <linux/rcupdate.h> #include <linux/wait.h> #include <linux/rbtree.h> +#include <linux/uidgid.h> #include <uapi/linux/sysctl.h> /* For the /proc/sys support */ @@ -159,6 +160,9 @@ struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set *(*lookup)(struct ctl_table_root *root, struct nsproxy *namespaces); + void (*set_ownership)(struct ctl_table_header *head, + struct ctl_table *table, + kuid_t *uid, kgid_t *gid); int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); }; diff --git a/include/net/devlink.h b/include/net/devlink.h index c99ffe8cef3c..211bd3c37028 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -50,7 +50,6 @@ struct devlink_sb_pool_info { }; struct devlink_ops { - size_t priv_size; int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); int (*port_split)(struct devlink *devlink, unsigned int port_index, diff --git a/include/net/dsa.h b/include/net/dsa.h index 2217a3f817f8..2ebeba44a461 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -165,9 +165,9 @@ struct dsa_switch { struct dsa_chip_data *cd; /* - * The used switch driver. + * The switch operations. */ - struct dsa_switch_driver *drv; + struct dsa_switch_ops *ops; /* * An array of which element [a] indicates which port on this @@ -236,17 +236,18 @@ struct switchdev_obj; struct switchdev_obj_port_fdb; struct switchdev_obj_port_vlan; -struct dsa_switch_driver { +struct dsa_switch_ops { struct list_head list; - enum dsa_tag_protocol tag_protocol; - /* * Probing and setup. */ const char *(*probe)(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv); + + enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds); + int (*setup)(struct dsa_switch *ds); int (*set_addr)(struct dsa_switch *ds, u8 *addr); u32 (*get_phy_flags)(struct dsa_switch *ds, int port); @@ -370,8 +371,8 @@ struct dsa_switch_driver { int (*cb)(struct switchdev_obj *obj)); }; -void register_switch_driver(struct dsa_switch_driver *type); -void unregister_switch_driver(struct dsa_switch_driver *type); +void register_switch_driver(struct dsa_switch_ops *type); +void unregister_switch_driver(struct dsa_switch_ops *type); struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); static inline void *ds_to_priv(struct dsa_switch *ds) @@ -386,4 +387,18 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds, struct device_node *np); +#ifdef CONFIG_PM_SLEEP +int dsa_switch_suspend(struct dsa_switch *ds); +int dsa_switch_resume(struct dsa_switch *ds); +#else +static inline int dsa_switch_suspend(struct dsa_switch *ds) +{ + return 0; +} +static inline int dsa_switch_resume(struct dsa_switch *ds) +{ + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + #endif diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index d3d60dccd19f..f266b512c3bd 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -32,8 +32,13 @@ struct flow_dissector_key_basic { }; struct flow_dissector_key_tags { - u32 vlan_id:12, - flow_label:20; + u32 flow_label; +}; + +struct flow_dissector_key_vlan { + u16 vlan_id:12, + vlan_priority:3; + u16 padding; }; struct flow_dissector_key_keyid { @@ -119,7 +124,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ - FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */ @@ -148,6 +153,7 @@ struct flow_keys { #define FLOW_KEYS_HASH_START_FIELD basic struct flow_dissector_key_basic basic; struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; struct flow_dissector_key_keyid keyid; struct flow_dissector_key_ports ports; struct flow_dissector_key_addrs addrs; diff --git a/include/net/gre.h b/include/net/gre.h index 73ea256eb7d7..d25d836c129b 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -7,7 +7,15 @@ struct gre_base_hdr { __be16 flags; __be16 protocol; -}; +} __packed; + +struct gre_full_hdr { + struct gre_base_hdr fixed_header; + __be16 csum; + __be16 reserved1; + __be32 key; + __be32 seq; +} __packed; #define GRE_HEADER_SECTION 4 #define GREPROTO_CISCO 0 diff --git a/include/net/kcm.h b/include/net/kcm.h index 2840b5825dcc..2a8965819db0 100644 --- a/include/net/kcm.h +++ b/include/net/kcm.h @@ -13,6 +13,7 @@ #include <linux/skbuff.h> #include <net/sock.h> +#include <net/strparser.h> #include <uapi/linux/kcm.h> extern unsigned int kcm_net_id; @@ -21,16 +22,8 @@ extern unsigned int kcm_net_id; #define KCM_STATS_INCR(stat) ((stat)++) struct kcm_psock_stats { - unsigned long long rx_msgs; - unsigned long long rx_bytes; unsigned long long tx_msgs; unsigned long long tx_bytes; - unsigned int rx_aborts; - unsigned int rx_mem_fail; - unsigned int rx_need_more_hdr; - unsigned int rx_msg_too_big; - unsigned int rx_msg_timeouts; - unsigned int rx_bad_hdr_len; unsigned long long reserved; unsigned long long unreserved; unsigned int tx_aborts; @@ -64,13 +57,6 @@ struct kcm_tx_msg { struct sk_buff *last_skb; }; -struct kcm_rx_msg { - int full_len; - int accum_len; - int offset; - int early_eaten; -}; - /* Socket structure for KCM client sockets */ struct kcm_sock { struct sock sk; @@ -87,6 +73,7 @@ struct kcm_sock { struct work_struct tx_work; struct list_head wait_psock_list; struct sk_buff *seq_skb; + u32 tx_stopped : 1; /* Don't use bit fields here, these are set under different locks */ bool tx_wait; @@ -104,11 +91,11 @@ struct bpf_prog; /* Structure for an attached lower socket */ struct kcm_psock { struct sock *sk; + struct strparser strp; struct kcm_mux *mux; int index; u32 tx_stopped : 1; - u32 rx_stopped : 1; u32 done : 1; u32 unattaching : 1; @@ -121,18 +108,12 @@ struct kcm_psock { struct kcm_psock_stats stats; /* Receive */ - struct sk_buff *rx_skb_head; - struct sk_buff **rx_skb_nextp; - struct sk_buff *ready_rx_msg; struct list_head psock_ready_list; - struct work_struct rx_work; - struct delayed_work rx_delayed_work; struct bpf_prog *bpf_prog; struct kcm_sock *rx_kcm; unsigned long long saved_rx_bytes; unsigned long long saved_rx_msgs; - struct timer_list rx_msg_timer; - unsigned int rx_need_bytes; + struct sk_buff *ready_rx_msg; /* Transmit */ struct kcm_sock *tx_kcm; @@ -146,6 +127,7 @@ struct kcm_net { struct mutex mutex; struct kcm_psock_stats aggregate_psock_stats; struct kcm_mux_stats aggregate_mux_stats; + struct strp_aggr_stats aggregate_strp_stats; struct list_head mux_list; int count; }; @@ -163,6 +145,7 @@ struct kcm_mux { struct kcm_mux_stats stats; struct kcm_psock_stats aggregate_psock_stats; + struct strp_aggr_stats aggregate_strp_stats; /* Receive */ spinlock_t rx_lock ____cacheline_aligned_in_smp; @@ -190,14 +173,6 @@ static inline void aggregate_psock_stats(struct kcm_psock_stats *stats, /* Save psock statistics in the mux when psock is being unattached. */ #define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) - SAVE_PSOCK_STATS(rx_msgs); - SAVE_PSOCK_STATS(rx_bytes); - SAVE_PSOCK_STATS(rx_aborts); - SAVE_PSOCK_STATS(rx_mem_fail); - SAVE_PSOCK_STATS(rx_need_more_hdr); - SAVE_PSOCK_STATS(rx_msg_too_big); - SAVE_PSOCK_STATS(rx_msg_timeouts); - SAVE_PSOCK_STATS(rx_bad_hdr_len); SAVE_PSOCK_STATS(tx_msgs); SAVE_PSOCK_STATS(tx_bytes); SAVE_PSOCK_STATS(reserved); diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index d061ffeb1e71..7adf4386ac8f 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -40,7 +40,6 @@ struct netns_ipv4 { #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; - struct fib_table __rcu *fib_local; struct fib_table __rcu *fib_main; struct fib_table __rcu *fib_default; #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index c99508d426cc..a459be5fe1c2 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -69,17 +69,19 @@ struct tcf_exts { int police; }; -static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police) +static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police) { #ifdef CONFIG_NET_CLS_ACT exts->type = 0; exts->nr_actions = 0; exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), GFP_KERNEL); - WARN_ON(!exts->actions); /* TODO: propagate the error to callers */ + if (!exts->actions) + return -ENOMEM; #endif exts->action = action; exts->police = police; + return 0; } /** diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 7caa99b482c6..cd334c9584e9 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -90,8 +90,8 @@ int unregister_qdisc(struct Qdisc_ops *qops); void qdisc_get_default(char *id, size_t len); int qdisc_set_default(const char *id); -void qdisc_list_add(struct Qdisc *q); -void qdisc_list_del(struct Qdisc *q); +void qdisc_hash_add(struct Qdisc *q); +void qdisc_hash_del(struct Qdisc *q); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, diff --git a/include/net/pptp.h b/include/net/pptp.h new file mode 100644 index 000000000000..92e9f1fe2628 --- /dev/null +++ b/include/net/pptp.h @@ -0,0 +1,23 @@ +#ifndef _NET_PPTP_H +#define _NET_PPTP_H + +#define PPP_LCP_ECHOREQ 0x09 +#define PPP_LCP_ECHOREP 0x0A +#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) + +#define MISSING_WINDOW 20 +#define WRAPPED(curseq, lastseq)\ + ((((curseq) & 0xffffff00) == 0) &&\ + (((lastseq) & 0xffffff00) == 0xffffff00)) + +#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header)) +struct pptp_gre_header { + struct gre_base_hdr gre_hd; + __be16 payload_len; + __be16 call_id; + __be32 seq; + __be32 ack; +} __packed; + + +#endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 909aff2db2b3..52a2015667b4 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -61,7 +61,7 @@ struct Qdisc { u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; - struct list_head list; + struct hlist_node hash; u32 handle; u32 parent; void *u32_node; @@ -592,7 +592,7 @@ static inline void qdisc_qstats_drop(struct Qdisc *sch) static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) { - qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats)); + this_cpu_inc(sch->cpu_qstats->drops); } static inline void qdisc_qstats_overlimit(struct Qdisc *sch) diff --git a/include/net/sock.h b/include/net/sock.h index ff5be7e8ddea..c797c57f4d9f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1020,7 +1020,6 @@ struct proto { void (*unhash)(struct sock *sk); void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); - void (*clear_sk)(struct sock *sk, int size); /* Keeping track of sockets in use */ #ifdef CONFIG_PROC_FS @@ -1114,6 +1113,16 @@ static inline bool sk_stream_is_writeable(const struct sock *sk) sk_stream_memory_free(sk); } +static inline int sk_under_cgroup_hierarchy(struct sock *sk, + struct cgroup *ancestor) +{ +#ifdef CONFIG_SOCK_CGROUP_DATA + return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), + ancestor); +#else + return -ENOTSUPP; +#endif +} static inline bool sk_has_memory_pressure(const struct sock *sk) { @@ -1232,8 +1241,6 @@ static inline int __sk_prot_rehash(struct sock *sk) return sk->sk_prot->hash(sk); } -void sk_prot_clear_portaddr_nulls(struct sock *sk, int size); - /* About 10 seconds */ #define SOCK_DESTROY_TIME (10*HZ) diff --git a/include/net/strparser.h b/include/net/strparser.h new file mode 100644 index 000000000000..0c28ad97c52f --- /dev/null +++ b/include/net/strparser.h @@ -0,0 +1,142 @@ +/* + * Stream Parser + * + * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __NET_STRPARSER_H_ +#define __NET_STRPARSER_H_ + +#include <linux/skbuff.h> +#include <net/sock.h> + +#define STRP_STATS_ADD(stat, count) ((stat) += (count)) +#define STRP_STATS_INCR(stat) ((stat)++) + +struct strp_stats { + unsigned long long rx_msgs; + unsigned long long rx_bytes; + unsigned int rx_mem_fail; + unsigned int rx_need_more_hdr; + unsigned int rx_msg_too_big; + unsigned int rx_msg_timeouts; + unsigned int rx_bad_hdr_len; +}; + +struct strp_aggr_stats { + unsigned long long rx_msgs; + unsigned long long rx_bytes; + unsigned int rx_mem_fail; + unsigned int rx_need_more_hdr; + unsigned int rx_msg_too_big; + unsigned int rx_msg_timeouts; + unsigned int rx_bad_hdr_len; + unsigned int rx_aborts; + unsigned int rx_interrupted; + unsigned int rx_unrecov_intr; +}; + +struct strparser; + +/* Callbacks are called with lock held for the attached socket */ +struct strp_callbacks { + int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); + void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); + int (*read_sock_done)(struct strparser *strp, int err); + void (*abort_parser)(struct strparser *strp, int err); +}; + +struct strp_rx_msg { + int full_len; + int offset; +}; + +static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb) +{ + return (struct strp_rx_msg *)((void *)skb->cb + + offsetof(struct qdisc_skb_cb, data)); +} + +/* Structure for an attached lower socket */ +struct strparser { + struct sock *sk; + + u32 rx_stopped : 1; + u32 rx_paused : 1; + u32 rx_aborted : 1; + u32 rx_interrupted : 1; + u32 rx_unrecov_intr : 1; + + struct sk_buff **rx_skb_nextp; + struct timer_list rx_msg_timer; + struct sk_buff *rx_skb_head; + unsigned int rx_need_bytes; + struct delayed_work rx_delayed_work; + struct work_struct rx_work; + struct strp_stats stats; + struct strp_callbacks cb; +}; + +/* Must be called with lock held for attached socket */ +static inline void strp_pause(struct strparser *strp) +{ + strp->rx_paused = 1; +} + +/* May be called without holding lock for attached socket */ +void strp_unpause(struct strparser *strp); + +static inline void save_strp_stats(struct strparser *strp, + struct strp_aggr_stats *agg_stats) +{ + /* Save psock statistics in the mux when psock is being unattached. */ + +#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \ + strp->stats._stat) + SAVE_PSOCK_STATS(rx_msgs); + SAVE_PSOCK_STATS(rx_bytes); + SAVE_PSOCK_STATS(rx_mem_fail); + SAVE_PSOCK_STATS(rx_need_more_hdr); + SAVE_PSOCK_STATS(rx_msg_too_big); + SAVE_PSOCK_STATS(rx_msg_timeouts); + SAVE_PSOCK_STATS(rx_bad_hdr_len); +#undef SAVE_PSOCK_STATS + + if (strp->rx_aborted) + agg_stats->rx_aborts++; + if (strp->rx_interrupted) + agg_stats->rx_interrupted++; + if (strp->rx_unrecov_intr) + agg_stats->rx_unrecov_intr++; +} + +static inline void aggregate_strp_stats(struct strp_aggr_stats *stats, + struct strp_aggr_stats *agg_stats) +{ +#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) + SAVE_PSOCK_STATS(rx_msgs); + SAVE_PSOCK_STATS(rx_bytes); + SAVE_PSOCK_STATS(rx_mem_fail); + SAVE_PSOCK_STATS(rx_need_more_hdr); + SAVE_PSOCK_STATS(rx_msg_too_big); + SAVE_PSOCK_STATS(rx_msg_timeouts); + SAVE_PSOCK_STATS(rx_bad_hdr_len); + SAVE_PSOCK_STATS(rx_aborts); + SAVE_PSOCK_STATS(rx_interrupted); + SAVE_PSOCK_STATS(rx_unrecov_intr); +#undef SAVE_PSOCK_STATS + +} + +void strp_done(struct strparser *strp); +void strp_stop(struct strparser *strp); +void strp_check_rcv(struct strparser *strp); +int strp_init(struct strparser *strp, struct sock *csk, + struct strp_callbacks *cb); +void strp_data_ready(struct strparser *strp); + +#endif /* __NET_STRPARSER_H_ */ diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 62f6a967a1b7..82f5e0462021 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -347,12 +347,6 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb, return idx; } -static inline void switchdev_port_fwd_mark_set(struct net_device *dev, - struct net_device *group_dev, - bool joining) -{ -} - static inline bool switchdev_port_same_parent_id(struct net_device *a, struct net_device *b) { diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h index e29f52e8bdf1..6b835889ea30 100644 --- a/include/net/tc_act/tc_vlan.h +++ b/include/net/tc_act/tc_vlan.h @@ -20,6 +20,7 @@ struct tcf_vlan { int tcfv_action; u16 tcfv_push_vid; __be16 tcfv_push_proto; + u8 tcfv_push_prio; }; #define to_vlan(a) ((struct tcf_vlan *)a) diff --git a/include/net/tcp.h b/include/net/tcp.h index 7717302cab91..d6ae36512429 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -227,10 +227,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TFO_SERVER_COOKIE_NOT_REQD 0x200 /* Force enable TFO on all listeners, i.e., not requiring the - * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen. + * TCP_FASTOPEN socket option. */ #define TFO_SERVER_WO_SOCKOPT1 0x400 -#define TFO_SERVER_WO_SOCKOPT2 0x800 extern struct inet_timewait_death_row tcp_death_row; @@ -604,8 +603,6 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) void tcp_get_info(struct sock *, struct tcp_info *); /* Read 'sendfile()'-style from a TCP socket */ -typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, - unsigned int, size_t); int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor); @@ -1164,6 +1161,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) } bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); +bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); #undef STATE_TRACE @@ -1853,6 +1851,8 @@ static inline int tcp_inq(struct sock *sk) return answ; } +int tcp_peek_len(struct socket *sock); + static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) { u16 segs_in; diff --git a/include/net/udp.h b/include/net/udp.h index 8894d7144189..ea53a87d880f 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -251,6 +251,7 @@ int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); void udp_err(struct sk_buff *, u32); +int udp_abort(struct sock *sk, int err); int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udp_push_pending_frames(struct sock *sk); void udp_flush_pending_frames(struct sock *sk); diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h new file mode 100644 index 000000000000..15283ee3e41a --- /dev/null +++ b/include/trace/events/rxrpc.h @@ -0,0 +1,56 @@ +/* AF_RXRPC tracepoints + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rxrpc + +#if !defined(_TRACE_RXRPC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RXRPC_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(rxrpc_skb, + TP_PROTO(struct sk_buff *skb, int op, int usage, int mod_count, + const void *where), + + TP_ARGS(skb, op, usage, mod_count, where), + + TP_STRUCT__entry( + __field(struct sk_buff *, skb ) + __field(int, op ) + __field(int, usage ) + __field(int, mod_count ) + __field(const void *, where ) + ), + + TP_fast_assign( + __entry->skb = skb; + __entry->op = op; + __entry->usage = usage; + __entry->mod_count = mod_count; + __entry->where = where; + ), + + TP_printk("s=%p %s u=%d m=%d p=%pSR", + __entry->skb, + (__entry->op == 0 ? "NEW" : + __entry->op == 1 ? "SEE" : + __entry->op == 2 ? "GET" : + __entry->op == 3 ? "FRE" : + "PUR"), + __entry->usage, + __entry->mod_count, + __entry->where) + ); + +#endif /* _TRACE_RXRPC_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 0fbf6fd4711b..734fe83ab645 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -23,6 +23,42 @@ #define BATADV_NL_MCAST_GROUP_TPMETER "tpmeter" /** + * enum batadv_tt_client_flags - TT client specific flags + * @BATADV_TT_CLIENT_DEL: the client has to be deleted from the table + * @BATADV_TT_CLIENT_ROAM: the client roamed to/from another node and the new + * update telling its new real location has not been received/sent yet + * @BATADV_TT_CLIENT_WIFI: this client is connected through a wifi interface. + * This information is used by the "AP Isolation" feature + * @BATADV_TT_CLIENT_ISOLA: this client is considered "isolated". This + * information is used by the Extended Isolation feature + * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from the table + * @BATADV_TT_CLIENT_NEW: this client has been added to the local table but has + * not been announced yet + * @BATADV_TT_CLIENT_PENDING: this client is marked for removal but it is kept + * in the table for one more originator interval for consistency purposes + * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be part of + * the network but no nnode has already announced it + * + * Bits from 0 to 7 are called _remote flags_ because they are sent on the wire. + * Bits from 8 to 15 are called _local flags_ because they are used for local + * computations only. + * + * Bits from 4 to 7 - a subset of remote flags - are ensured to be in sync with + * the other nodes in the network. To achieve this goal these flags are included + * in the TT CRC computation. + */ +enum batadv_tt_client_flags { + BATADV_TT_CLIENT_DEL = (1 << 0), + BATADV_TT_CLIENT_ROAM = (1 << 1), + BATADV_TT_CLIENT_WIFI = (1 << 4), + BATADV_TT_CLIENT_ISOLA = (1 << 5), + BATADV_TT_CLIENT_NOPURGE = (1 << 8), + BATADV_TT_CLIENT_NEW = (1 << 9), + BATADV_TT_CLIENT_PENDING = (1 << 10), + BATADV_TT_CLIENT_TEMP = (1 << 11), +}; + +/** * enum batadv_nl_attrs - batman-adv netlink attributes * * @BATADV_ATTR_UNSPEC: unspecified attribute to catch errors @@ -40,6 +76,26 @@ * @BATADV_ATTR_TPMETER_BYTES: amount of acked bytes during run * @BATADV_ATTR_TPMETER_COOKIE: session cookie to match tp_meter session * @BATADV_ATTR_PAD: attribute used for padding for 64-bit alignment + * @BATADV_ATTR_ACTIVE: Flag indicating if the hard interface is active + * @BATADV_ATTR_TT_ADDRESS: Client MAC address + * @BATADV_ATTR_TT_TTVN: Translation table version + * @BATADV_ATTR_TT_LAST_TTVN: Previous translation table version + * @BATADV_ATTR_TT_CRC32: CRC32 over translation table + * @BATADV_ATTR_TT_VID: VLAN ID + * @BATADV_ATTR_TT_FLAGS: Translation table client flags + * @BATADV_ATTR_FLAG_BEST: Flags indicating entry is the best + * @BATADV_ATTR_LAST_SEEN_MSECS: Time in milliseconds since last seen + * @BATADV_ATTR_NEIGH_ADDRESS: Neighbour MAC address + * @BATADV_ATTR_TQ: TQ to neighbour + * @BATADV_ATTR_THROUGHPUT: Estimated throughput to Neighbour + * @BATADV_ATTR_BANDWIDTH_UP: Reported uplink bandwidth + * @BATADV_ATTR_BANDWIDTH_DOWN: Reported downlink bandwidth + * @BATADV_ATTR_ROUTER: Gateway router MAC address + * @BATADV_ATTR_BLA_OWN: Flag indicating own originator + * @BATADV_ATTR_BLA_ADDRESS: Bridge loop avoidance claim MAC address + * @BATADV_ATTR_BLA_VID: BLA VLAN ID + * @BATADV_ATTR_BLA_BACKBONE: BLA gateway originator MAC address + * @BATADV_ATTR_BLA_CRC: BLA CRC * @__BATADV_ATTR_AFTER_LAST: internal use * @NUM_BATADV_ATTR: total number of batadv_nl_attrs available * @BATADV_ATTR_MAX: highest attribute number currently defined @@ -60,6 +116,26 @@ enum batadv_nl_attrs { BATADV_ATTR_TPMETER_BYTES, BATADV_ATTR_TPMETER_COOKIE, BATADV_ATTR_PAD, + BATADV_ATTR_ACTIVE, + BATADV_ATTR_TT_ADDRESS, + BATADV_ATTR_TT_TTVN, + BATADV_ATTR_TT_LAST_TTVN, + BATADV_ATTR_TT_CRC32, + BATADV_ATTR_TT_VID, + BATADV_ATTR_TT_FLAGS, + BATADV_ATTR_FLAG_BEST, + BATADV_ATTR_LAST_SEEN_MSECS, + BATADV_ATTR_NEIGH_ADDRESS, + BATADV_ATTR_TQ, + BATADV_ATTR_THROUGHPUT, + BATADV_ATTR_BANDWIDTH_UP, + BATADV_ATTR_BANDWIDTH_DOWN, + BATADV_ATTR_ROUTER, + BATADV_ATTR_BLA_OWN, + BATADV_ATTR_BLA_ADDRESS, + BATADV_ATTR_BLA_VID, + BATADV_ATTR_BLA_BACKBONE, + BATADV_ATTR_BLA_CRC, /* add attributes above here, update the policy in netlink.c */ __BATADV_ATTR_AFTER_LAST, NUM_BATADV_ATTR = __BATADV_ATTR_AFTER_LAST, @@ -73,6 +149,15 @@ enum batadv_nl_attrs { * @BATADV_CMD_GET_MESH_INFO: Query basic information about batman-adv device * @BATADV_CMD_TP_METER: Start a tp meter session * @BATADV_CMD_TP_METER_CANCEL: Cancel a tp meter session + * @BATADV_CMD_GET_ROUTING_ALGOS: Query the list of routing algorithms. + * @BATADV_CMD_GET_HARDIFS: Query list of hard interfaces + * @BATADV_CMD_GET_TRANSTABLE_LOCAL: Query list of local translations + * @BATADV_CMD_GET_TRANSTABLE_GLOBAL Query list of global translations + * @BATADV_CMD_GET_ORIGINATORS: Query list of originators + * @BATADV_CMD_GET_NEIGHBORS: Query list of neighbours + * @BATADV_CMD_GET_GATEWAYS: Query list of gateways + * @BATADV_CMD_GET_BLA_CLAIM: Query list of bridge loop avoidance claims + * @BATADV_CMD_GET_BLA_BACKBONE: Query list of bridge loop avoidance backbones * @__BATADV_CMD_AFTER_LAST: internal use * @BATADV_CMD_MAX: highest used command number */ @@ -81,6 +166,15 @@ enum batadv_nl_commands { BATADV_CMD_GET_MESH_INFO, BATADV_CMD_TP_METER, BATADV_CMD_TP_METER_CANCEL, + BATADV_CMD_GET_ROUTING_ALGOS, + BATADV_CMD_GET_HARDIFS, + BATADV_CMD_GET_TRANSTABLE_LOCAL, + BATADV_CMD_GET_TRANSTABLE_GLOBAL, + BATADV_CMD_GET_ORIGINATORS, + BATADV_CMD_GET_NEIGHBORS, + BATADV_CMD_GET_GATEWAYS, + BATADV_CMD_GET_BLA_CLAIM, + BATADV_CMD_GET_BLA_BACKBONE, /* add new commands above here */ __BATADV_CMD_AFTER_LAST, BATADV_CMD_MAX = __BATADV_CMD_AFTER_LAST - 1 diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9e5fc168c8a3..e4c5a1baa993 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -375,6 +375,28 @@ enum bpf_func_id { */ BPF_FUNC_probe_write_user, + /** + * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task + * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type + * @index: index of the cgroup in the bpf_map + * Return: + * == 0 current failed the cgroup2 descendant test + * == 1 current succeeded the cgroup2 descendant test + * < 0 error + */ + BPF_FUNC_current_task_under_cgroup, + + /** + * bpf_skb_change_tail(skb, len, flags) + * The helper will resize the skb to the given new size, + * to be used f.e. with control messages. + * @skb: pointer to skb + * @len: new skb length + * @flags: reserved + * Return: 0 on success or negative error + */ + BPF_FUNC_skb_change_tail, + __BPF_FUNC_MAX_ID, }; diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index b8f38e84d93a..099a4200732c 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1362,7 +1362,14 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, - ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* @@ -1371,7 +1378,7 @@ enum ethtool_link_mode_bit_indices { */ __ETHTOOL_LINK_MODE_LAST - = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, }; #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index c186f64fffca..ab92bca6d448 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -140,7 +140,7 @@ struct bridge_vlan_xstats { __u64 tx_bytes; __u64 tx_packets; __u16 vid; - __u16 pad1; + __u16 flags; __u32 pad2; }; diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 777b6cdb1b7b..9865c8caedde 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -27,9 +27,22 @@ #define GRE_SEQ __cpu_to_be16(0x1000) #define GRE_STRICT __cpu_to_be16(0x0800) #define GRE_REC __cpu_to_be16(0x0700) -#define GRE_FLAGS __cpu_to_be16(0x00F8) +#define GRE_ACK __cpu_to_be16(0x0080) +#define GRE_FLAGS __cpu_to_be16(0x0078) #define GRE_VERSION __cpu_to_be16(0x0007) +#define GRE_IS_CSUM(f) ((f) & GRE_CSUM) +#define GRE_IS_ROUTING(f) ((f) & GRE_ROUTING) +#define GRE_IS_KEY(f) ((f) & GRE_KEY) +#define GRE_IS_SEQ(f) ((f) & GRE_SEQ) +#define GRE_IS_STRICT(f) ((f) & GRE_STRICT) +#define GRE_IS_REC(f) ((f) & GRE_REC) +#define GRE_IS_ACK(f) ((f) & GRE_ACK) + +#define GRE_VERSION_1 __cpu_to_be16(0x0001) +#define GRE_PROTO_PPP __cpu_to_be16(0x880b) +#define GRE_PPTP_KEY_MASK __cpu_to_be32(0xffff) + struct ip_tunnel_parm { char name[IFNAMSIZ]; int link; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index abbd1dc5d683..5581206a08ae 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -73,6 +73,7 @@ enum { INET_DIAG_BC_S_COND, INET_DIAG_BC_D_COND, INET_DIAG_BC_DEV_COND, /* u32 ifindex */ + INET_DIAG_BC_MARK_COND, }; struct inet_diag_hostcond { @@ -82,6 +83,11 @@ struct inet_diag_hostcond { __be32 addr[0]; }; +struct inet_diag_markcond { + __u32 mark; + __u32 mask; +}; + /* Base info structure. It contains socket identity (addrs/ports/cookie) * and, alas, the information shown by netstat. */ struct inet_diag_msg { diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h index 237fac4bc17b..15d8510cdae0 100644 --- a/include/uapi/linux/mii.h +++ b/include/uapi/linux/mii.h @@ -48,6 +48,7 @@ #define BMCR_SPEED100 0x2000 /* Select 100Mbps */ #define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */ #define BMCR_RESET 0x8000 /* Reset to default state */ +#define BMCR_SPEED10 0x0000 /* Select 10Mbps */ /* Basic mode status register. */ #define BMSR_ERCAP 0x0001 /* Ext-reg capability */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index d1c1ccaba787..51b5b247fb5a 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -428,6 +428,9 @@ enum { TCA_FLOWER_KEY_UDP_DST, /* be16 */ TCA_FLOWER_FLAGS, + TCA_FLOWER_KEY_VLAN_ID, + TCA_FLOWER_KEY_VLAN_PRIO, + TCA_FLOWER_KEY_VLAN_ETH_TYPE, __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 25a9ad8bcef1..e7a31f830690 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -235,6 +235,7 @@ enum LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ + LINUX_MIB_TCPMD5FAILURE, /* TCPMD5Failure */ LINUX_MIB_SACKSHIFTED, LINUX_MIB_SACKMERGED, LINUX_MIB_SACKSHIFTFALLBACK, diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h index 31151ff6264f..be72b6e3843b 100644 --- a/include/uapi/linux/tc_act/tc_vlan.h +++ b/include/uapi/linux/tc_act/tc_vlan.h @@ -29,6 +29,7 @@ enum { TCA_VLAN_PUSH_VLAN_ID, TCA_VLAN_PUSH_VLAN_PROTOCOL, TCA_VLAN_PAD, + TCA_VLAN_PUSH_VLAN_PRIORITY, __TCA_VLAN_MAX, }; #define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1) diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h index 5f3f6d09fb79..f9edd20fe9ba 100644 --- a/include/uapi/linux/tipc_netlink.h +++ b/include/uapi/linux/tipc_netlink.h @@ -59,6 +59,9 @@ enum { TIPC_NL_MON_SET, TIPC_NL_MON_GET, TIPC_NL_MON_PEER_GET, + TIPC_NL_PEER_REMOVE, + TIPC_NL_BEARER_ADD, + TIPC_NL_UDP_GET_REMOTEIP, __TIPC_NL_CMD_MAX, TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1 @@ -98,6 +101,7 @@ enum { TIPC_NLA_UDP_UNSPEC, TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */ TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */ + TIPC_NLA_UDP_MULTI_REMOTEIP, /* flag */ __TIPC_NLA_UDP_MAX, TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1 diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 633a650d7aeb..a2ac051c342f 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -538,7 +538,7 @@ static int __init register_perf_event_array_map(void) } late_initcall(register_perf_event_array_map); -#ifdef CONFIG_SOCK_CGROUP_DATA +#ifdef CONFIG_CGROUPS static void *cgroup_fd_array_get_ptr(struct bpf_map *map, struct file *map_file /* not used */, int fd) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index daea765d72e6..abb61f3f6900 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -930,14 +930,14 @@ static int check_func_arg(struct verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { - struct reg_state *reg = env->cur_state.regs + regno; - enum bpf_reg_type expected_type; + struct reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; + enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; - if (reg->type == NOT_INIT) { + if (type == NOT_INIT) { verbose("R%d !read_ok\n", regno); return -EACCES; } @@ -950,16 +950,29 @@ static int check_func_arg(struct verifier_env *env, u32 regno, return 0; } + if (type == PTR_TO_PACKET && !may_write_pkt_data(env->prog->type)) { + verbose("helper access to the packet is not allowed for clsact\n"); + return -EACCES; + } + if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; + if (type != PTR_TO_PACKET && type != expected_type) + goto err_type; } else if (arg_type == ARG_CONST_STACK_SIZE || arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { expected_type = CONST_IMM; + if (type != expected_type) + goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; + if (type != expected_type) + goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; + if (type != expected_type) + goto err_type; } else if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_RAW_STACK) { expected_type = PTR_TO_STACK; @@ -967,20 +980,16 @@ static int check_func_arg(struct verifier_env *env, u32 regno, * passed in as argument, it's a CONST_IMM type. Final test * happens during stack boundary checking. */ - if (reg->type == CONST_IMM && reg->imm == 0) - expected_type = CONST_IMM; + if (type == CONST_IMM && reg->imm == 0) + /* final test in check_stack_boundary() */; + else if (type != PTR_TO_PACKET && type != expected_type) + goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK; } else { verbose("unsupported arg_type %d\n", arg_type); return -EFAULT; } - if (reg->type != expected_type) { - verbose("R%d type=%s expected=%s\n", regno, - reg_type_str[reg->type], reg_type_str[expected_type]); - return -EACCES; - } - if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; @@ -998,8 +1007,13 @@ static int check_func_arg(struct verifier_env *env, u32 regno, verbose("invalid map_ptr to access map->key\n"); return -EACCES; } - err = check_stack_boundary(env, regno, meta->map_ptr->key_size, - false, NULL); + if (type == PTR_TO_PACKET) + err = check_packet_access(env, regno, 0, + meta->map_ptr->key_size); + else + err = check_stack_boundary(env, regno, + meta->map_ptr->key_size, + false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity @@ -1009,9 +1023,13 @@ static int check_func_arg(struct verifier_env *env, u32 regno, verbose("invalid map_ptr to access map->value\n"); return -EACCES; } - err = check_stack_boundary(env, regno, - meta->map_ptr->value_size, - false, NULL); + if (type == PTR_TO_PACKET) + err = check_packet_access(env, regno, 0, + meta->map_ptr->value_size); + else + err = check_stack_boundary(env, regno, + meta->map_ptr->value_size, + false, NULL); } else if (arg_type == ARG_CONST_STACK_SIZE || arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO); @@ -1025,11 +1043,18 @@ static int check_func_arg(struct verifier_env *env, u32 regno, verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); return -EACCES; } - err = check_stack_boundary(env, regno - 1, reg->imm, - zero_size_allowed, meta); + if (regs[regno - 1].type == PTR_TO_PACKET) + err = check_packet_access(env, regno - 1, 0, reg->imm); + else + err = check_stack_boundary(env, regno - 1, reg->imm, + zero_size_allowed, meta); } return err; +err_type: + verbose("R%d type=%s expected=%s\n", regno, + reg_type_str[type], reg_type_str[expected_type]); + return -EACCES; } static int check_map_func_compatibility(struct bpf_map *map, int func_id) @@ -1053,7 +1078,8 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: - if (func_id != BPF_FUNC_skb_under_cgroup) + if (func_id != BPF_FUNC_skb_under_cgroup && + func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; default: @@ -1075,6 +1101,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; + case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index b20438fdb029..ad35213b8405 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -376,6 +376,34 @@ static const struct bpf_func_proto bpf_get_current_task_proto = { .ret_type = RET_INTEGER, }; +static u64 bpf_current_task_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + struct bpf_map *map = (struct bpf_map *)(long)r1; + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct cgroup *cgrp; + u32 idx = (u32)r2; + + if (unlikely(in_interrupt())) + return -EINVAL; + + if (unlikely(idx >= array->map.max_entries)) + return -E2BIG; + + cgrp = READ_ONCE(array->ptrs[idx]); + if (unlikely(!cgrp)) + return -EAGAIN; + + return task_under_cgroup_hierarchy(current, cgrp); +} + +static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { + .func = bpf_current_task_under_cgroup, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) { switch (func_id) { @@ -407,6 +435,10 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) return &bpf_perf_event_read_proto; case BPF_FUNC_probe_write_user: return bpf_get_probe_write_proto(); + case BPF_FUNC_current_task_under_cgroup: + return &bpf_current_task_under_cgroup_proto; + case BPF_FUNC_get_prandom_u32: + return &bpf_get_prandom_u32_proto; default: return NULL; } diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 56054e541a0f..4320b92ef696 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -490,10 +490,9 @@ exit: EXPORT_SYMBOL_GPL(rhashtable_insert_slow); /** - * rhashtable_walk_init - Initialise an iterator + * rhashtable_walk_enter - Initialise an iterator * @ht: Table to walk over * @iter: Hash table Iterator - * @gfp: GFP flags for allocations * * This function prepares a hash table walk. * @@ -508,30 +507,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); * This function may sleep so you must not call it from interrupt * context or with spin locks held. * - * You must call rhashtable_walk_exit if this function returns - * successfully. + * You must call rhashtable_walk_exit after this function returns. */ -int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, - gfp_t gfp) +void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) { iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; - iter->walker = kmalloc(sizeof(*iter->walker), gfp); - if (!iter->walker) - return -ENOMEM; - spin_lock(&ht->lock); - iter->walker->tbl = + iter->walker.tbl = rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); - list_add(&iter->walker->list, &iter->walker->tbl->walkers); + list_add(&iter->walker.list, &iter->walker.tbl->walkers); spin_unlock(&ht->lock); - - return 0; } -EXPORT_SYMBOL_GPL(rhashtable_walk_init); +EXPORT_SYMBOL_GPL(rhashtable_walk_enter); /** * rhashtable_walk_exit - Free an iterator @@ -542,10 +533,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); void rhashtable_walk_exit(struct rhashtable_iter *iter) { spin_lock(&iter->ht->lock); - if (iter->walker->tbl) - list_del(&iter->walker->list); + if (iter->walker.tbl) + list_del(&iter->walker.list); spin_unlock(&iter->ht->lock); - kfree(iter->walker); } EXPORT_SYMBOL_GPL(rhashtable_walk_exit); @@ -571,12 +561,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) rcu_read_lock(); spin_lock(&ht->lock); - if (iter->walker->tbl) - list_del(&iter->walker->list); + if (iter->walker.tbl) + list_del(&iter->walker.list); spin_unlock(&ht->lock); - if (!iter->walker->tbl) { - iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); + if (!iter->walker.tbl) { + iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); return -EAGAIN; } @@ -598,7 +588,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start); */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { - struct bucket_table *tbl = iter->walker->tbl; + struct bucket_table *tbl = iter->walker.tbl; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; @@ -631,8 +621,8 @@ next: /* Ensure we see any new tables. */ smp_rmb(); - iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); - if (iter->walker->tbl) { + iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (iter->walker.tbl) { iter->slot = 0; iter->skip = 0; return ERR_PTR(-EAGAIN); @@ -652,7 +642,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU) { struct rhashtable *ht; - struct bucket_table *tbl = iter->walker->tbl; + struct bucket_table *tbl = iter->walker.tbl; if (!tbl) goto out; @@ -661,9 +651,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) spin_lock(&ht->lock); if (tbl->rehash < tbl->size) - list_add(&iter->walker->list, &tbl->walkers); + list_add(&iter->walker.list, &tbl->walkers); else - iter->walker->tbl = NULL; + iter->walker.tbl = NULL; spin_unlock(&ht->lock); iter->p = NULL; diff --git a/net/Kconfig b/net/Kconfig index c2cdbce629bd..7b6cd340b72b 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -369,6 +369,7 @@ source "net/irda/Kconfig" source "net/bluetooth/Kconfig" source "net/rxrpc/Kconfig" source "net/kcm/Kconfig" +source "net/strparser/Kconfig" config FIB_RULES bool diff --git a/net/Makefile b/net/Makefile index 9bd20bb86cc6..4cafaa2b4667 100644 --- a/net/Makefile +++ b/net/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_BT) += bluetooth/ obj-$(CONFIG_SUNRPC) += sunrpc/ obj-$(CONFIG_AF_RXRPC) += rxrpc/ obj-$(CONFIG_AF_KCM) += kcm/ +obj-$(CONFIG_STREAM_PARSER) += strparser/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_L2TP) += l2tp/ obj-$(CONFIG_DECNET) += decnet/ diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 0e982222d425..3b3b1a292ec8 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -1007,7 +1007,7 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; - if (dev->name == NULL || strncmp(dev->name, "lec", 3)) + if (strncmp(dev->name, "lec", 3)) return NOTIFY_DONE; /* we are only interested in lec:s */ switch (event) { diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 833bb145ba3c..f20742cbae6d 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -73,10 +73,21 @@ config BATMAN_ADV_MCAST reduce the air overhead while improving the reliability of multicast messages. -config BATMAN_ADV_DEBUG - bool "B.A.T.M.A.N. debugging" +config BATMAN_ADV_DEBUGFS + bool "batman-adv debugfs entries" depends on BATMAN_ADV depends on DEBUG_FS + default y + help + Enable this to export routing related debug tables via debugfs. + The information for each soft-interface and used hard-interface can be + found under batman_adv/ + + If unsure, say Y. + +config BATMAN_ADV_DEBUG + bool "B.A.T.M.A.N. debugging" + depends on BATMAN_ADV_DEBUGFS help This is an option for use by developers; most people should say N here. This enables compilation of support for diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index a83fc6c58d19..f724d3c98a81 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -24,14 +24,14 @@ batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_elp.o batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_ogm.o batman-adv-y += bitarray.o batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o -batman-adv-$(CONFIG_DEBUG_FS) += debugfs.o +batman-adv-$(CONFIG_BATMAN_ADV_DEBUGFS) += debugfs.o batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o batman-adv-y += fragmentation.o batman-adv-y += gateway_client.o batman-adv-y += gateway_common.o batman-adv-y += hard-interface.o batman-adv-y += hash.o -batman-adv-y += icmp_socket.o +batman-adv-$(CONFIG_BATMAN_ADV_DEBUGFS) += icmp_socket.o batman-adv-$(CONFIG_BATMAN_ADV_DEBUG) += log.o batman-adv-y += main.o batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c index 81dbbf569bd4..623d04302aa2 100644 --- a/net/batman-adv/bat_algo.c +++ b/net/batman-adv/bat_algo.c @@ -20,12 +20,18 @@ #include <linux/errno.h> #include <linux/list.h> #include <linux/moduleparam.h> +#include <linux/netlink.h> #include <linux/printk.h> #include <linux/seq_file.h> +#include <linux/skbuff.h> #include <linux/stddef.h> #include <linux/string.h> +#include <net/genetlink.h> +#include <net/netlink.h> +#include <uapi/linux/batman_adv.h> #include "bat_algo.h" +#include "netlink.h" char batadv_routing_algo[20] = "BATMAN_IV"; static struct hlist_head batadv_algo_list; @@ -95,6 +101,7 @@ int batadv_algo_select(struct batadv_priv *bat_priv, char *name) return 0; } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) { struct batadv_algo_ops *bat_algo_ops; @@ -107,6 +114,7 @@ int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) return 0; } +#endif static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) { @@ -138,3 +146,65 @@ static struct kparam_string batadv_param_string_ra = { module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra, 0644); + +/** + * batadv_algo_dump_entry - fill in information about one supported routing + * algorithm + * @msg: netlink message to be sent back + * @portid: Port to reply to + * @seq: Sequence number of message + * @bat_algo_ops: Algorithm to be dumped + * + * Return: Error number, or 0 on success + */ +static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_algo_ops *bat_algo_ops) +{ + void *hdr; + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, BATADV_CMD_GET_ROUTING_ALGOS); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_string(msg, BATADV_ATTR_ALGO_NAME, bat_algo_ops->name)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +/** + * batadv_algo_dump - fill in information about supported routing + * algorithms + * @msg: netlink message to be sent back + * @cb: Parameters to the netlink request + * + * Return: Length of reply message. + */ +int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb) +{ + int portid = NETLINK_CB(cb->skb).portid; + struct batadv_algo_ops *bat_algo_ops; + int skip = cb->args[0]; + int i = 0; + + hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) { + if (i++ < skip) + continue; + + if (batadv_algo_dump_entry(msg, portid, cb->nlh->nlmsg_seq, + bat_algo_ops)) { + i--; + break; + } + } + + cb->args[0] = i; + + return msg->len; +} diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h index 860d773dd8fa..3b5b69cdd12b 100644 --- a/net/batman-adv/bat_algo.h +++ b/net/batman-adv/bat_algo.h @@ -22,7 +22,9 @@ #include <linux/types.h> +struct netlink_callback; struct seq_file; +struct sk_buff; extern char batadv_routing_algo[]; extern struct list_head batadv_hardif_list; @@ -31,5 +33,6 @@ void batadv_algo_init(void); int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops); int batadv_algo_select(struct batadv_priv *bat_priv, char *name); int batadv_algo_seq_print_text(struct seq_file *seq, void *offset); +int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb); #endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */ diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 19b0abd6c640..e2d18d0b1f06 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -35,6 +35,7 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> +#include <linux/netlink.h> #include <linux/pkt_sched.h> #include <linux/printk.h> #include <linux/random.h> @@ -48,12 +49,17 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/workqueue.h> +#include <net/genetlink.h> +#include <net/netlink.h> +#include <uapi/linux/batman_adv.h> #include "bat_algo.h" #include "bitarray.h" +#include "gateway_client.h" #include "hard-interface.h" #include "hash.h" #include "log.h" +#include "netlink.h" #include "network-coding.h" #include "originator.h" #include "packet.h" @@ -318,17 +324,18 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) if (!orig_node->bat_iv.bcast_own_sum) goto free_orig_node; + kref_get(&orig_node->refcount); hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, batadv_choose_orig, orig_node, &orig_node->hash_entry); if (hash_added != 0) - goto free_orig_node; + goto free_orig_node_hash; return orig_node; -free_orig_node: - /* free twice, as batadv_orig_node_new sets refcount to 2 */ +free_orig_node_hash: batadv_orig_node_put(orig_node); +free_orig_node: batadv_orig_node_put(orig_node); return NULL; @@ -528,36 +535,25 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) { struct net_device *soft_iface; - struct batadv_priv *bat_priv; - struct batadv_hard_iface *primary_if = NULL; if (!forw_packet->if_incoming) { pr_err("Error - can't forward packet: incoming iface not specified\n"); - goto out; + return; } soft_iface = forw_packet->if_incoming->soft_iface; - bat_priv = netdev_priv(soft_iface); if (WARN_ON(!forw_packet->if_outgoing)) - goto out; + return; if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface)) - goto out; + return; if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE) - goto out; - - primary_if = batadv_primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; + return; /* only for one specific outgoing interface */ batadv_iv_ogm_send_to_if(forw_packet, forw_packet->if_outgoing); - -out: - if (primary_if) - batadv_hardif_put(primary_if); } /** @@ -685,19 +681,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, struct batadv_forw_packet *forw_packet_aggr; unsigned char *skb_buff; unsigned int skb_size; + atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left; - /* own packet should always be scheduled */ - if (!own_packet) { - if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) { - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "batman packet queue full\n"); - return; - } - } - - forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC); + forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing, + queue_left, bat_priv); if (!forw_packet_aggr) - goto out_nomem; + return; if (atomic_read(&bat_priv->aggregated_ogms) && packet_len < BATADV_MAX_AGGREGATION_BYTES) @@ -708,8 +697,11 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, skb_size += ETH_HLEN; forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size); - if (!forw_packet_aggr->skb) - goto out_free_forw_packet; + if (!forw_packet_aggr->skb) { + batadv_forw_packet_free(forw_packet_aggr); + return; + } + forw_packet_aggr->skb->priority = TC_PRIO_CONTROL; skb_reserve(forw_packet_aggr->skb, ETH_HLEN); @@ -717,12 +709,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, forw_packet_aggr->packet_len = packet_len; memcpy(skb_buff, packet_buff, packet_len); - kref_get(&if_incoming->refcount); - kref_get(&if_outgoing->refcount); forw_packet_aggr->own = own_packet; - forw_packet_aggr->if_incoming = if_incoming; - forw_packet_aggr->if_outgoing = if_outgoing; - forw_packet_aggr->num_packets = 0; forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS; forw_packet_aggr->send_time = send_time; @@ -741,13 +728,6 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, queue_delayed_work(batadv_event_workqueue, &forw_packet_aggr->delayed_work, send_time - jiffies); - - return; -out_free_forw_packet: - kfree(forw_packet_aggr); -out_nomem: - if (!own_packet) - atomic_inc(&bat_priv->batman_queue_left); } /* aggregate a new packet into the existing ogm packet */ @@ -1830,10 +1810,6 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work) batadv_iv_ogm_schedule(forw_packet->if_incoming); out: - /* don't count own packet */ - if (!forw_packet->own) - atomic_inc(&bat_priv->batman_queue_left); - batadv_forw_packet_free(forw_packet); } @@ -1879,6 +1855,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, return NET_RX_SUCCESS; } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_iv_ogm_orig_print_neigh - print neighbors for the originator table * @orig_node: the orig_node for which the neighbors are printed @@ -1976,8 +1953,239 @@ next: if (batman_count == 0) seq_puts(seq, "No batman nodes in range ...\n"); } +#endif + +/** + * batadv_iv_ogm_neigh_get_tq_avg - Get the TQ average for a neighbour on a + * given outgoing interface. + * @neigh_node: Neighbour of interest + * @if_outgoing: Outgoing interface of interest + * @tq_avg: Pointer of where to store the TQ average + * + * Return: False if no average TQ available, otherwise true. + */ +static bool +batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node, + struct batadv_hard_iface *if_outgoing, + u8 *tq_avg) +{ + struct batadv_neigh_ifinfo *n_ifinfo; + + n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); + if (!n_ifinfo) + return false; + + *tq_avg = n_ifinfo->bat_iv.tq_avg; + batadv_neigh_ifinfo_put(n_ifinfo); + + return true; +} + +/** + * batadv_iv_ogm_orig_dump_subentry - Dump an originator subentry into a + * message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @if_outgoing: Limit dump to entries with this outgoing interface + * @orig_node: Originator to dump + * @neigh_node: Single hops neighbour + * @best: Is the best originator + * + * Return: Error code, or 0 on success + */ +static int +batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *if_outgoing, + struct batadv_orig_node *orig_node, + struct batadv_neigh_node *neigh_node, + bool best) +{ + void *hdr; + u8 tq_avg; + unsigned int last_seen_msecs; + + last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen); + + if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node, if_outgoing, &tq_avg)) + return 0; + + if (if_outgoing != BATADV_IF_DEFAULT && + if_outgoing != neigh_node->if_incoming) + return 0; + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, BATADV_CMD_GET_ORIGINATORS); + if (!hdr) + return -ENOBUFS; + + if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, + orig_node->orig) || + nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, + neigh_node->addr) || + nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, + neigh_node->if_incoming->net_dev->ifindex) || + nla_put_u8(msg, BATADV_ATTR_TQ, tq_avg) || + nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, + last_seen_msecs)) + goto nla_put_failure; + + if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +/** + * batadv_iv_ogm_orig_dump_entry - Dump an originator entry into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @if_outgoing: Limit dump to entries with this outgoing interface + * @orig_node: Originator to dump + * @sub_s: Number of sub entries to skip + * + * This function assumes the caller holds rcu_read_lock(). + * + * Return: Error code, or 0 on success + */ +static int +batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *if_outgoing, + struct batadv_orig_node *orig_node, int *sub_s) +{ + struct batadv_neigh_node *neigh_node_best; + struct batadv_neigh_node *neigh_node; + int sub = 0; + bool best; + u8 tq_avg_best; + + neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing); + if (!neigh_node_best) + goto out; + + if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node_best, if_outgoing, + &tq_avg_best)) + goto out; + + if (tq_avg_best == 0) + goto out; + + hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) { + if (sub++ < *sub_s) + continue; + + best = (neigh_node == neigh_node_best); + + if (batadv_iv_ogm_orig_dump_subentry(msg, portid, seq, + bat_priv, if_outgoing, + orig_node, neigh_node, + best)) { + batadv_neigh_node_put(neigh_node_best); + + *sub_s = sub - 1; + return -EMSGSIZE; + } + } + + out: + if (neigh_node_best) + batadv_neigh_node_put(neigh_node_best); + + *sub_s = 0; + return 0; +} /** + * batadv_iv_ogm_orig_dump_bucket - Dump an originator bucket into a + * message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @if_outgoing: Limit dump to entries with this outgoing interface + * @head: Bucket to be dumped + * @idx_s: Number of entries to be skipped + * @sub: Number of sub entries to be skipped + * + * Return: Error code, or 0 on success + */ +static int +batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *if_outgoing, + struct hlist_head *head, int *idx_s, int *sub) +{ + struct batadv_orig_node *orig_node; + int idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, head, hash_entry) { + if (idx++ < *idx_s) + continue; + + if (batadv_iv_ogm_orig_dump_entry(msg, portid, seq, bat_priv, + if_outgoing, orig_node, + sub)) { + rcu_read_unlock(); + *idx_s = idx - 1; + return -EMSGSIZE; + } + } + rcu_read_unlock(); + + *idx_s = 0; + *sub = 0; + return 0; +} + +/** + * batadv_iv_ogm_orig_dump - Dump the originators into a message + * @msg: Netlink message to dump into + * @cb: Control block containing additional options + * @bat_priv: The bat priv with all the soft interface information + * @if_outgoing: Limit dump to entries with this outgoing interface + */ +static void +batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *if_outgoing) +{ + struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *head; + int bucket = cb->args[0]; + int idx = cb->args[1]; + int sub = cb->args[2]; + int portid = NETLINK_CB(cb->skb).portid; + + while (bucket < hash->size) { + head = &hash->table[bucket]; + + if (batadv_iv_ogm_orig_dump_bucket(msg, portid, + cb->nlh->nlmsg_seq, + bat_priv, if_outgoing, head, + &idx, &sub)) + break; + + bucket++; + } + + cb->args[0] = bucket; + cb->args[1] = idx; + cb->args[2] = sub; +} + +#ifdef CONFIG_BATMAN_ADV_DEBUGFS +/** * batadv_iv_hardif_neigh_print - print a single hop neighbour node * @seq: neighbour table seq_file struct * @hardif_neigh: hardif neighbour information @@ -2027,37 +2235,43 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv, if (batman_count == 0) seq_puts(seq, "No batman nodes in range ...\n"); } +#endif /** - * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors + * batadv_iv_ogm_neigh_diff - calculate tq difference of two neighbors * @neigh1: the first neighbor object of the comparison * @if_outgoing1: outgoing interface for the first neighbor * @neigh2: the second neighbor object of the comparison * @if_outgoing2: outgoing interface for the second neighbor + * @diff: pointer to integer receiving the calculated difference * - * Return: a value less, equal to or greater than 0 if the metric via neigh1 is - * lower, the same as or higher than the metric via neigh2 + * The content of *@diff is only valid when this function returns true. + * It is less, equal to or greater than 0 if the metric via neigh1 is lower, + * the same as or higher than the metric via neigh2 + * + * Return: true when the difference could be calculated, false otherwise */ -static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1, - struct batadv_hard_iface *if_outgoing1, - struct batadv_neigh_node *neigh2, - struct batadv_hard_iface *if_outgoing2) +static bool batadv_iv_ogm_neigh_diff(struct batadv_neigh_node *neigh1, + struct batadv_hard_iface *if_outgoing1, + struct batadv_neigh_node *neigh2, + struct batadv_hard_iface *if_outgoing2, + int *diff) { struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo; u8 tq1, tq2; - int diff; + bool ret = true; neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1); neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2); if (!neigh1_ifinfo || !neigh2_ifinfo) { - diff = 0; + ret = false; goto out; } tq1 = neigh1_ifinfo->bat_iv.tq_avg; tq2 = neigh2_ifinfo->bat_iv.tq_avg; - diff = tq1 - tq2; + *diff = (int)tq1 - (int)tq2; out: if (neigh1_ifinfo) @@ -2065,6 +2279,162 @@ out: if (neigh2_ifinfo) batadv_neigh_ifinfo_put(neigh2_ifinfo); + return ret; +} + +/** + * batadv_iv_ogm_neigh_dump_neigh - Dump a neighbour into a netlink message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @hardif_neigh: Neighbour to be dumped + * + * Return: Error code, or 0 on success + */ +static int +batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_hardif_neigh_node *hardif_neigh) +{ + void *hdr; + unsigned int last_seen_msecs; + + last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen); + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, BATADV_CMD_GET_NEIGHBORS); + if (!hdr) + return -ENOBUFS; + + if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, + hardif_neigh->addr) || + nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, + hardif_neigh->if_incoming->net_dev->ifindex) || + nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, + last_seen_msecs)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +/** + * batadv_iv_ogm_neigh_dump_hardif - Dump the neighbours of a hard interface + * into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @hard_iface: Hard interface to dump the neighbours for + * @idx_s: Number of entries to skip + * + * This function assumes the caller holds rcu_read_lock(). + * + * Return: Error code, or 0 on success + */ +static int +batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *hard_iface, + int *idx_s) +{ + struct batadv_hardif_neigh_node *hardif_neigh; + int idx = 0; + + hlist_for_each_entry_rcu(hardif_neigh, + &hard_iface->neigh_list, list) { + if (idx++ < *idx_s) + continue; + + if (batadv_iv_ogm_neigh_dump_neigh(msg, portid, seq, + hardif_neigh)) { + *idx_s = idx - 1; + return -EMSGSIZE; + } + } + + *idx_s = 0; + return 0; +} + +/** + * batadv_iv_ogm_neigh_dump - Dump the neighbours into a message + * @msg: Netlink message to dump into + * @cb: Control block containing additional options + * @bat_priv: The bat priv with all the soft interface information + * @single_hardif: Limit dump to this hard interfaace + */ +static void +batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *single_hardif) +{ + struct batadv_hard_iface *hard_iface; + int i_hardif = 0; + int i_hardif_s = cb->args[0]; + int idx = cb->args[1]; + int portid = NETLINK_CB(cb->skb).portid; + + rcu_read_lock(); + if (single_hardif) { + if (i_hardif_s == 0) { + if (batadv_iv_ogm_neigh_dump_hardif(msg, portid, + cb->nlh->nlmsg_seq, + bat_priv, + single_hardif, + &idx) == 0) + i_hardif++; + } + } else { + list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, + list) { + if (hard_iface->soft_iface != bat_priv->soft_iface) + continue; + + if (i_hardif++ < i_hardif_s) + continue; + + if (batadv_iv_ogm_neigh_dump_hardif(msg, portid, + cb->nlh->nlmsg_seq, + bat_priv, + hard_iface, &idx)) { + i_hardif--; + break; + } + } + } + rcu_read_unlock(); + + cb->args[0] = i_hardif; + cb->args[1] = idx; +} + +/** + * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors + * @neigh1: the first neighbor object of the comparison + * @if_outgoing1: outgoing interface for the first neighbor + * @neigh2: the second neighbor object of the comparison + * @if_outgoing2: outgoing interface for the second neighbor + * + * Return: a value less, equal to or greater than 0 if the metric via neigh1 is + * lower, the same as or higher than the metric via neigh2 + */ +static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1, + struct batadv_hard_iface *if_outgoing1, + struct batadv_neigh_node *neigh2, + struct batadv_hard_iface *if_outgoing2) +{ + bool ret; + int diff; + + ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2, + if_outgoing2, &diff); + if (!ret) + return 0; + return diff; } @@ -2085,36 +2455,341 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2) { - struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo; - u8 tq1, tq2; bool ret; + int diff; - neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1); - neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2); + ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2, + if_outgoing2, &diff); + if (!ret) + return false; - /* we can't say that the metric is better */ - if (!neigh1_ifinfo || !neigh2_ifinfo) { - ret = false; + ret = diff > -BATADV_TQ_SIMILARITY_THRESHOLD; + return ret; +} + +static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface) +{ + /* begin scheduling originator messages on that interface */ + batadv_iv_ogm_schedule(hard_iface); +} + +static struct batadv_gw_node * +batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) +{ + struct batadv_neigh_node *router; + struct batadv_neigh_ifinfo *router_ifinfo; + struct batadv_gw_node *gw_node, *curr_gw = NULL; + u64 max_gw_factor = 0; + u64 tmp_gw_factor = 0; + u8 max_tq = 0; + u8 tq_avg; + struct batadv_orig_node *orig_node; + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { + orig_node = gw_node->orig_node; + router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); + if (!router) + continue; + + router_ifinfo = batadv_neigh_ifinfo_get(router, + BATADV_IF_DEFAULT); + if (!router_ifinfo) + goto next; + + if (!kref_get_unless_zero(&gw_node->refcount)) + goto next; + + tq_avg = router_ifinfo->bat_iv.tq_avg; + + switch (atomic_read(&bat_priv->gw.sel_class)) { + case 1: /* fast connection */ + tmp_gw_factor = tq_avg * tq_avg; + tmp_gw_factor *= gw_node->bandwidth_down; + tmp_gw_factor *= 100 * 100; + tmp_gw_factor >>= 18; + + if ((tmp_gw_factor > max_gw_factor) || + ((tmp_gw_factor == max_gw_factor) && + (tq_avg > max_tq))) { + if (curr_gw) + batadv_gw_node_put(curr_gw); + curr_gw = gw_node; + kref_get(&curr_gw->refcount); + } + break; + + default: /* 2: stable connection (use best statistic) + * 3: fast-switch (use best statistic but change as + * soon as a better gateway appears) + * XX: late-switch (use best statistic but change as + * soon as a better gateway appears which has + * $routing_class more tq points) + */ + if (tq_avg > max_tq) { + if (curr_gw) + batadv_gw_node_put(curr_gw); + curr_gw = gw_node; + kref_get(&curr_gw->refcount); + } + break; + } + + if (tq_avg > max_tq) + max_tq = tq_avg; + + if (tmp_gw_factor > max_gw_factor) + max_gw_factor = tmp_gw_factor; + + batadv_gw_node_put(gw_node); + +next: + batadv_neigh_node_put(router); + if (router_ifinfo) + batadv_neigh_ifinfo_put(router_ifinfo); + } + rcu_read_unlock(); + + return curr_gw; +} + +static bool batadv_iv_gw_is_eligible(struct batadv_priv *bat_priv, + struct batadv_orig_node *curr_gw_orig, + struct batadv_orig_node *orig_node) +{ + struct batadv_neigh_ifinfo *router_orig_ifinfo = NULL; + struct batadv_neigh_ifinfo *router_gw_ifinfo = NULL; + struct batadv_neigh_node *router_gw = NULL; + struct batadv_neigh_node *router_orig = NULL; + u8 gw_tq_avg, orig_tq_avg; + bool ret = false; + + /* dynamic re-election is performed only on fast or late switch */ + if (atomic_read(&bat_priv->gw.sel_class) <= 2) + return false; + + router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT); + if (!router_gw) { + ret = true; goto out; } - tq1 = neigh1_ifinfo->bat_iv.tq_avg; - tq2 = neigh2_ifinfo->bat_iv.tq_avg; - ret = (tq1 - tq2) > -BATADV_TQ_SIMILARITY_THRESHOLD; + router_gw_ifinfo = batadv_neigh_ifinfo_get(router_gw, + BATADV_IF_DEFAULT); + if (!router_gw_ifinfo) { + ret = true; + goto out; + } + + router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); + if (!router_orig) + goto out; + + router_orig_ifinfo = batadv_neigh_ifinfo_get(router_orig, + BATADV_IF_DEFAULT); + if (!router_orig_ifinfo) + goto out; + + gw_tq_avg = router_gw_ifinfo->bat_iv.tq_avg; + orig_tq_avg = router_orig_ifinfo->bat_iv.tq_avg; + + /* the TQ value has to be better */ + if (orig_tq_avg < gw_tq_avg) + goto out; + /* if the routing class is greater than 3 the value tells us how much + * greater the TQ value of the new gateway must be + */ + if ((atomic_read(&bat_priv->gw.sel_class) > 3) && + (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class))) + goto out; + + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", + gw_tq_avg, orig_tq_avg); + + ret = true; out: - if (neigh1_ifinfo) - batadv_neigh_ifinfo_put(neigh1_ifinfo); - if (neigh2_ifinfo) - batadv_neigh_ifinfo_put(neigh2_ifinfo); + if (router_gw_ifinfo) + batadv_neigh_ifinfo_put(router_gw_ifinfo); + if (router_orig_ifinfo) + batadv_neigh_ifinfo_put(router_orig_ifinfo); + if (router_gw) + batadv_neigh_node_put(router_gw); + if (router_orig) + batadv_neigh_node_put(router_orig); return ret; } -static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface) +#ifdef CONFIG_BATMAN_ADV_DEBUGFS +/* fails if orig_node has no router */ +static int batadv_iv_gw_write_buffer_text(struct batadv_priv *bat_priv, + struct seq_file *seq, + const struct batadv_gw_node *gw_node) { - /* begin scheduling originator messages on that interface */ - batadv_iv_ogm_schedule(hard_iface); + struct batadv_gw_node *curr_gw; + struct batadv_neigh_node *router; + struct batadv_neigh_ifinfo *router_ifinfo = NULL; + int ret = -1; + + router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); + if (!router) + goto out; + + router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); + if (!router_ifinfo) + goto out; + + curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + + seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n", + (curr_gw == gw_node ? "=>" : " "), + gw_node->orig_node->orig, + router_ifinfo->bat_iv.tq_avg, router->addr, + router->if_incoming->net_dev->name, + gw_node->bandwidth_down / 10, + gw_node->bandwidth_down % 10, + gw_node->bandwidth_up / 10, + gw_node->bandwidth_up % 10); + ret = seq_has_overflowed(seq) ? -1 : 0; + + if (curr_gw) + batadv_gw_node_put(curr_gw); +out: + if (router_ifinfo) + batadv_neigh_ifinfo_put(router_ifinfo); + if (router) + batadv_neigh_node_put(router); + return ret; +} + +static void batadv_iv_gw_print(struct batadv_priv *bat_priv, + struct seq_file *seq) +{ + struct batadv_gw_node *gw_node; + int gw_count = 0; + + seq_puts(seq, + " Gateway (#/255) Nexthop [outgoingIF]: advertised uplink bandwidth\n"); + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { + /* fails if orig_node has no router */ + if (batadv_iv_gw_write_buffer_text(bat_priv, seq, gw_node) < 0) + continue; + + gw_count++; + } + rcu_read_unlock(); + + if (gw_count == 0) + seq_puts(seq, "No gateways in range ...\n"); +} +#endif + +/** + * batadv_iv_gw_dump_entry - Dump a gateway into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @gw_node: Gateway to be dumped + * + * Return: Error code, or 0 on success + */ +static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_gw_node *gw_node) +{ + struct batadv_neigh_ifinfo *router_ifinfo = NULL; + struct batadv_neigh_node *router; + struct batadv_gw_node *curr_gw; + int ret = -EINVAL; + void *hdr; + + router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); + if (!router) + goto out; + + router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); + if (!router_ifinfo) + goto out; + + curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS); + if (!hdr) { + ret = -ENOBUFS; + goto out; + } + + ret = -EMSGSIZE; + + if (curr_gw == gw_node) + if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, + gw_node->orig_node->orig) || + nla_put_u8(msg, BATADV_ATTR_TQ, router_ifinfo->bat_iv.tq_avg) || + nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN, + router->addr) || + nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, + router->if_incoming->net_dev->name) || + nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN, + gw_node->bandwidth_down) || + nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP, + gw_node->bandwidth_up)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + genlmsg_end(msg, hdr); + ret = 0; + +out: + if (router_ifinfo) + batadv_neigh_ifinfo_put(router_ifinfo); + if (router) + batadv_neigh_node_put(router); + return ret; +} + +/** + * batadv_iv_gw_dump - Dump gateways into a message + * @msg: Netlink message to dump into + * @cb: Control block containing additional options + * @bat_priv: The bat priv with all the soft interface information + */ +static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *bat_priv) +{ + int portid = NETLINK_CB(cb->skb).portid; + struct batadv_gw_node *gw_node; + int idx_skip = cb->args[0]; + int idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { + if (idx++ < idx_skip) + continue; + + if (batadv_iv_gw_dump_entry(msg, portid, cb->nlh->nlmsg_seq, + bat_priv, gw_node)) { + idx_skip = idx - 1; + goto unlock; + } + } + + idx_skip = idx; +unlock: + rcu_read_unlock(); + + cb->args[0] = idx_skip; } static struct batadv_algo_ops batadv_batman_iv __read_mostly = { @@ -2129,14 +2804,28 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = { .neigh = { .cmp = batadv_iv_ogm_neigh_cmp, .is_similar_or_better = batadv_iv_ogm_neigh_is_sob, +#ifdef CONFIG_BATMAN_ADV_DEBUGFS .print = batadv_iv_neigh_print, +#endif + .dump = batadv_iv_ogm_neigh_dump, }, .orig = { +#ifdef CONFIG_BATMAN_ADV_DEBUGFS .print = batadv_iv_ogm_orig_print, +#endif + .dump = batadv_iv_ogm_orig_dump, .free = batadv_iv_ogm_orig_free, .add_if = batadv_iv_ogm_orig_add_if, .del_if = batadv_iv_ogm_orig_del_if, }, + .gw = { + .get_best_gw_node = batadv_iv_gw_get_best_gw_node, + .is_eligible = batadv_iv_gw_is_eligible, +#ifdef CONFIG_BATMAN_ADV_DEBUGFS + .print = batadv_iv_gw_print, +#endif + .dump = batadv_iv_gw_dump, + }, }; int __init batadv_iv_init(void) diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 0366cbf5e444..e79f6f01182e 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -21,24 +21,38 @@ #include <linux/atomic.h> #include <linux/bug.h> #include <linux/cache.h> +#include <linux/errno.h> +#include <linux/if_ether.h> #include <linux/init.h> #include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/kref.h> #include <linux/netdevice.h> +#include <linux/netlink.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/seq_file.h> #include <linux/stddef.h> #include <linux/types.h> #include <linux/workqueue.h> +#include <net/genetlink.h> +#include <net/netlink.h> +#include <uapi/linux/batman_adv.h> #include "bat_algo.h" #include "bat_v_elp.h" #include "bat_v_ogm.h" +#include "gateway_client.h" +#include "gateway_common.h" #include "hard-interface.h" #include "hash.h" +#include "log.h" +#include "netlink.h" #include "originator.h" #include "packet.h" +struct sk_buff; + static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); @@ -115,6 +129,7 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh) batadv_v_elp_throughput_metric_update); } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_v_orig_print_neigh - print neighbors for the originator table * @orig_node: the orig_node for which the neighbors are printed @@ -198,8 +213,142 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv, if (batman_count == 0) seq_puts(seq, "No batman nodes in range ...\n"); } +#endif + +/** + * batadv_v_neigh_dump_neigh - Dump a neighbour into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @hardif_neigh: Neighbour to dump + * + * Return: Error code, or 0 on success + */ +static int +batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_hardif_neigh_node *hardif_neigh) +{ + void *hdr; + unsigned int last_seen_msecs; + u32 throughput; + + last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen); + throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput); + throughput = throughput * 100; + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI, + BATADV_CMD_GET_NEIGHBORS); + if (!hdr) + return -ENOBUFS; + + if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, + hardif_neigh->addr) || + nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, + hardif_neigh->if_incoming->net_dev->ifindex) || + nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, + last_seen_msecs) || + nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} /** + * batadv_v_neigh_dump_hardif - Dump the neighbours of a hard interface into + * a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @hard_iface: The hard interface to be dumped + * @idx_s: Entries to be skipped + * + * This function assumes the caller holds rcu_read_lock(). + * + * Return: Error code, or 0 on success + */ +static int +batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *hard_iface, + int *idx_s) +{ + struct batadv_hardif_neigh_node *hardif_neigh; + int idx = 0; + + hlist_for_each_entry_rcu(hardif_neigh, + &hard_iface->neigh_list, list) { + if (idx++ < *idx_s) + continue; + + if (batadv_v_neigh_dump_neigh(msg, portid, seq, hardif_neigh)) { + *idx_s = idx - 1; + return -EMSGSIZE; + } + } + + *idx_s = 0; + return 0; +} + +/** + * batadv_v_neigh_dump - Dump the neighbours of a hard interface into a + * message + * @msg: Netlink message to dump into + * @cb: Control block containing additional options + * @bat_priv: The bat priv with all the soft interface information + * @single_hardif: Limit dumping to this hard interface + */ +static void +batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *single_hardif) +{ + struct batadv_hard_iface *hard_iface; + int i_hardif = 0; + int i_hardif_s = cb->args[0]; + int idx = cb->args[1]; + int portid = NETLINK_CB(cb->skb).portid; + + rcu_read_lock(); + if (single_hardif) { + if (i_hardif_s == 0) { + if (batadv_v_neigh_dump_hardif(msg, portid, + cb->nlh->nlmsg_seq, + bat_priv, single_hardif, + &idx) == 0) + i_hardif++; + } + } else { + list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + if (hard_iface->soft_iface != bat_priv->soft_iface) + continue; + + if (i_hardif++ < i_hardif_s) + continue; + + if (batadv_v_neigh_dump_hardif(msg, portid, + cb->nlh->nlmsg_seq, + bat_priv, hard_iface, + &idx)) { + i_hardif--; + break; + } + } + } + rcu_read_unlock(); + + cb->args[0] = i_hardif; + cb->args[1] = idx; +} + +#ifdef CONFIG_BATMAN_ADV_DEBUGFS +/** * batadv_v_orig_print - print the originator table * @bat_priv: the bat priv with all the soft interface information * @seq: debugfs table seq_file struct @@ -265,6 +414,205 @@ next: if (batman_count == 0) seq_puts(seq, "No batman nodes in range ...\n"); } +#endif + +/** + * batadv_v_orig_dump_subentry - Dump an originator subentry into a + * message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @if_outgoing: Limit dump to entries with this outgoing interface + * @orig_node: Originator to dump + * @neigh_node: Single hops neighbour + * @best: Is the best originator + * + * Return: Error code, or 0 on success + */ +static int +batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *if_outgoing, + struct batadv_orig_node *orig_node, + struct batadv_neigh_node *neigh_node, + bool best) +{ + struct batadv_neigh_ifinfo *n_ifinfo; + unsigned int last_seen_msecs; + u32 throughput; + void *hdr; + + n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); + if (!n_ifinfo) + return 0; + + throughput = n_ifinfo->bat_v.throughput * 100; + + batadv_neigh_ifinfo_put(n_ifinfo); + + last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen); + + if (if_outgoing != BATADV_IF_DEFAULT && + if_outgoing != neigh_node->if_incoming) + return 0; + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI, + BATADV_CMD_GET_ORIGINATORS); + if (!hdr) + return -ENOBUFS; + + if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, orig_node->orig) || + nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, + neigh_node->addr) || + nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, + neigh_node->if_incoming->net_dev->ifindex) || + nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput) || + nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, + last_seen_msecs)) + goto nla_put_failure; + + if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +/** + * batadv_v_orig_dump_entry - Dump an originator entry into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @if_outgoing: Limit dump to entries with this outgoing interface + * @orig_node: Originator to dump + * @sub_s: Number of sub entries to skip + * + * This function assumes the caller holds rcu_read_lock(). + * + * Return: Error code, or 0 on success + */ +static int +batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *if_outgoing, + struct batadv_orig_node *orig_node, int *sub_s) +{ + struct batadv_neigh_node *neigh_node_best; + struct batadv_neigh_node *neigh_node; + int sub = 0; + bool best; + + neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing); + if (!neigh_node_best) + goto out; + + hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) { + if (sub++ < *sub_s) + continue; + + best = (neigh_node == neigh_node_best); + + if (batadv_v_orig_dump_subentry(msg, portid, seq, bat_priv, + if_outgoing, orig_node, + neigh_node, best)) { + batadv_neigh_node_put(neigh_node_best); + + *sub_s = sub - 1; + return -EMSGSIZE; + } + } + + out: + if (neigh_node_best) + batadv_neigh_node_put(neigh_node_best); + + *sub_s = 0; + return 0; +} + +/** + * batadv_v_orig_dump_bucket - Dump an originator bucket into a + * message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @if_outgoing: Limit dump to entries with this outgoing interface + * @head: Bucket to be dumped + * @idx_s: Number of entries to be skipped + * @sub: Number of sub entries to be skipped + * + * Return: Error code, or 0 on success + */ +static int +batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *if_outgoing, + struct hlist_head *head, int *idx_s, int *sub) +{ + struct batadv_orig_node *orig_node; + int idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, head, hash_entry) { + if (idx++ < *idx_s) + continue; + + if (batadv_v_orig_dump_entry(msg, portid, seq, bat_priv, + if_outgoing, orig_node, sub)) { + rcu_read_unlock(); + *idx_s = idx - 1; + return -EMSGSIZE; + } + } + rcu_read_unlock(); + + *idx_s = 0; + *sub = 0; + return 0; +} + +/** + * batadv_v_orig_dump - Dump the originators into a message + * @msg: Netlink message to dump into + * @cb: Control block containing additional options + * @bat_priv: The bat priv with all the soft interface information + * @if_outgoing: Limit dump to entries with this outgoing interface + */ +static void +batadv_v_orig_dump(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *bat_priv, + struct batadv_hard_iface *if_outgoing) +{ + struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *head; + int bucket = cb->args[0]; + int idx = cb->args[1]; + int sub = cb->args[2]; + int portid = NETLINK_CB(cb->skb).portid; + + while (bucket < hash->size) { + head = &hash->table[bucket]; + + if (batadv_v_orig_dump_bucket(msg, portid, + cb->nlh->nlmsg_seq, + bat_priv, if_outgoing, head, &idx, + &sub)) + break; + + bucket++; + } + + cb->args[0] = bucket; + cb->args[1] = idx; + cb->args[2] = sub; +} static int batadv_v_neigh_cmp(struct batadv_neigh_node *neigh1, struct batadv_hard_iface *if_outgoing1, @@ -320,6 +668,365 @@ err_ifinfo1: return ret; } +static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, + char *buff, size_t count) +{ + u32 old_class, class; + + if (!batadv_parse_throughput(bat_priv->soft_iface, buff, + "B.A.T.M.A.N. V GW selection class", + &class)) + return -EINVAL; + + old_class = atomic_read(&bat_priv->gw.sel_class); + atomic_set(&bat_priv->gw.sel_class, class); + + if (old_class != class) + batadv_gw_reselect(bat_priv); + + return count; +} + +static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff) +{ + u32 class = atomic_read(&bat_priv->gw.sel_class); + + return sprintf(buff, "%u.%u MBit\n", class / 10, class % 10); +} + +/** + * batadv_v_gw_throughput_get - retrieve the GW-bandwidth for a given GW + * @gw_node: the GW to retrieve the metric for + * @bw: the pointer where the metric will be stored. The metric is computed as + * the minimum between the GW advertised throughput and the path throughput to + * it in the mesh + * + * Return: 0 on success, -1 on failure + */ +static int batadv_v_gw_throughput_get(struct batadv_gw_node *gw_node, u32 *bw) +{ + struct batadv_neigh_ifinfo *router_ifinfo = NULL; + struct batadv_orig_node *orig_node; + struct batadv_neigh_node *router; + int ret = -1; + + orig_node = gw_node->orig_node; + router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); + if (!router) + goto out; + + router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); + if (!router_ifinfo) + goto out; + + /* the GW metric is computed as the minimum between the path throughput + * to reach the GW itself and the advertised bandwidth. + * This gives us an approximation of the effective throughput that the + * client can expect via this particular GW node + */ + *bw = router_ifinfo->bat_v.throughput; + *bw = min_t(u32, *bw, gw_node->bandwidth_down); + + ret = 0; +out: + if (router) + batadv_neigh_node_put(router); + if (router_ifinfo) + batadv_neigh_ifinfo_put(router_ifinfo); + + return ret; +} + +/** + * batadv_v_gw_get_best_gw_node - retrieve the best GW node + * @bat_priv: the bat priv with all the soft interface information + * + * Return: the GW node having the best GW-metric, NULL if no GW is known + */ +static struct batadv_gw_node * +batadv_v_gw_get_best_gw_node(struct batadv_priv *bat_priv) +{ + struct batadv_gw_node *gw_node, *curr_gw = NULL; + u32 max_bw = 0, bw; + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { + if (!kref_get_unless_zero(&gw_node->refcount)) + continue; + + if (batadv_v_gw_throughput_get(gw_node, &bw) < 0) + goto next; + + if (curr_gw && (bw <= max_bw)) + goto next; + + if (curr_gw) + batadv_gw_node_put(curr_gw); + + curr_gw = gw_node; + kref_get(&curr_gw->refcount); + max_bw = bw; + +next: + batadv_gw_node_put(gw_node); + } + rcu_read_unlock(); + + return curr_gw; +} + +/** + * batadv_v_gw_is_eligible - check if a originator would be selected as GW + * @bat_priv: the bat priv with all the soft interface information + * @curr_gw_orig: originator representing the currently selected GW + * @orig_node: the originator representing the new candidate + * + * Return: true if orig_node can be selected as current GW, false otherwise + */ +static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv, + struct batadv_orig_node *curr_gw_orig, + struct batadv_orig_node *orig_node) +{ + struct batadv_gw_node *curr_gw = NULL, *orig_gw = NULL; + u32 gw_throughput, orig_throughput, threshold; + bool ret = false; + + threshold = atomic_read(&bat_priv->gw.sel_class); + + curr_gw = batadv_gw_node_get(bat_priv, curr_gw_orig); + if (!curr_gw) { + ret = true; + goto out; + } + + if (batadv_v_gw_throughput_get(curr_gw, &gw_throughput) < 0) { + ret = true; + goto out; + } + + orig_gw = batadv_gw_node_get(bat_priv, orig_node); + if (!orig_node) + goto out; + + if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0) + goto out; + + if (orig_throughput < gw_throughput) + goto out; + + if ((orig_throughput - gw_throughput) < threshold) + goto out; + + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Restarting gateway selection: better gateway found (throughput curr: %u, throughput new: %u)\n", + gw_throughput, orig_throughput); + + ret = true; +out: + if (curr_gw) + batadv_gw_node_put(curr_gw); + if (orig_gw) + batadv_gw_node_put(orig_gw); + + return ret; +} + +#ifdef CONFIG_BATMAN_ADV_DEBUGFS +/* fails if orig_node has no router */ +static int batadv_v_gw_write_buffer_text(struct batadv_priv *bat_priv, + struct seq_file *seq, + const struct batadv_gw_node *gw_node) +{ + struct batadv_gw_node *curr_gw; + struct batadv_neigh_node *router; + struct batadv_neigh_ifinfo *router_ifinfo = NULL; + int ret = -1; + + router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); + if (!router) + goto out; + + router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); + if (!router_ifinfo) + goto out; + + curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + + seq_printf(seq, "%s %pM (%9u.%1u) %pM [%10s]: %u.%u/%u.%u MBit\n", + (curr_gw == gw_node ? "=>" : " "), + gw_node->orig_node->orig, + router_ifinfo->bat_v.throughput / 10, + router_ifinfo->bat_v.throughput % 10, router->addr, + router->if_incoming->net_dev->name, + gw_node->bandwidth_down / 10, + gw_node->bandwidth_down % 10, + gw_node->bandwidth_up / 10, + gw_node->bandwidth_up % 10); + ret = seq_has_overflowed(seq) ? -1 : 0; + + if (curr_gw) + batadv_gw_node_put(curr_gw); +out: + if (router_ifinfo) + batadv_neigh_ifinfo_put(router_ifinfo); + if (router) + batadv_neigh_node_put(router); + return ret; +} + +/** + * batadv_v_gw_print - print the gateway list + * @bat_priv: the bat priv with all the soft interface information + * @seq: gateway table seq_file struct + */ +static void batadv_v_gw_print(struct batadv_priv *bat_priv, + struct seq_file *seq) +{ + struct batadv_gw_node *gw_node; + int gw_count = 0; + + seq_puts(seq, + " Gateway ( throughput) Nexthop [outgoingIF]: advertised uplink bandwidth\n"); + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { + /* fails if orig_node has no router */ + if (batadv_v_gw_write_buffer_text(bat_priv, seq, gw_node) < 0) + continue; + + gw_count++; + } + rcu_read_unlock(); + + if (gw_count == 0) + seq_puts(seq, "No gateways in range ...\n"); +} +#endif + +/** + * batadv_v_gw_dump_entry - Dump a gateway into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @gw_node: Gateway to be dumped + * + * Return: Error code, or 0 on success + */ +static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_gw_node *gw_node) +{ + struct batadv_neigh_ifinfo *router_ifinfo = NULL; + struct batadv_neigh_node *router; + struct batadv_gw_node *curr_gw; + int ret = -EINVAL; + void *hdr; + + router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); + if (!router) + goto out; + + router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); + if (!router_ifinfo) + goto out; + + curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS); + if (!hdr) { + ret = -ENOBUFS; + goto out; + } + + ret = -EMSGSIZE; + + if (curr_gw == gw_node) { + if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) { + genlmsg_cancel(msg, hdr); + goto out; + } + } + + if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, + gw_node->orig_node->orig)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + if (nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, + router_ifinfo->bat_v.throughput)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + if (nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN, router->addr)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + if (nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, + router->if_incoming->net_dev->name)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN, + gw_node->bandwidth_down)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP, gw_node->bandwidth_up)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + genlmsg_end(msg, hdr); + ret = 0; + +out: + if (router_ifinfo) + batadv_neigh_ifinfo_put(router_ifinfo); + if (router) + batadv_neigh_node_put(router); + return ret; +} + +/** + * batadv_v_gw_dump - Dump gateways into a message + * @msg: Netlink message to dump into + * @cb: Control block containing additional options + * @bat_priv: The bat priv with all the soft interface information + */ +static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *bat_priv) +{ + int portid = NETLINK_CB(cb->skb).portid; + struct batadv_gw_node *gw_node; + int idx_skip = cb->args[0]; + int idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { + if (idx++ < idx_skip) + continue; + + if (batadv_v_gw_dump_entry(msg, portid, cb->nlh->nlmsg_seq, + bat_priv, gw_node)) { + idx_skip = idx - 1; + goto unlock; + } + } + + idx_skip = idx; +unlock: + rcu_read_unlock(); + + cb->args[0] = idx_skip; +} + static struct batadv_algo_ops batadv_batman_v __read_mostly = { .name = "BATMAN_V", .iface = { @@ -333,10 +1040,26 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = { .hardif_init = batadv_v_hardif_neigh_init, .cmp = batadv_v_neigh_cmp, .is_similar_or_better = batadv_v_neigh_is_sob, +#ifdef CONFIG_BATMAN_ADV_DEBUGFS .print = batadv_v_neigh_print, +#endif + .dump = batadv_v_neigh_dump, }, .orig = { +#ifdef CONFIG_BATMAN_ADV_DEBUGFS .print = batadv_v_orig_print, +#endif + .dump = batadv_v_orig_dump, + }, + .gw = { + .store_sel_class = batadv_v_store_sel_class, + .show_sel_class = batadv_v_show_sel_class, + .get_best_gw_node = batadv_v_gw_get_best_gw_node, + .is_eligible = batadv_v_gw_is_eligible, +#ifdef CONFIG_BATMAN_ADV_DEBUGFS + .print = batadv_v_gw_print, +#endif + .dump = batadv_v_gw_dump, }, }; @@ -363,7 +1086,16 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface) */ int batadv_v_mesh_init(struct batadv_priv *bat_priv) { - return batadv_v_ogm_init(bat_priv); + int ret = 0; + + ret = batadv_v_ogm_init(bat_priv); + if (ret < 0) + return ret; + + /* set default throughput difference threshold to 5Mbps */ + atomic_set(&bat_priv->gw.sel_class, 50); + + return 0; } /** diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 6fbba4eb0617..1aeeadca620c 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -73,13 +73,12 @@ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv, if (!orig_node) return NULL; + kref_get(&orig_node->refcount); hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, batadv_choose_orig, orig_node, &orig_node->hash_entry); if (hash_added != 0) { - /* orig_node->refcounter is initialised to 2 by - * batadv_orig_node_new() - */ + /* remove refcnt for newly created orig_node and hash entry */ batadv_orig_node_put(orig_node); batadv_orig_node_put(orig_node); orig_node = NULL; diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ad2ffe16d29f..e7f690b571ea 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -35,6 +35,7 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> +#include <linux/netlink.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/seq_file.h> @@ -45,12 +46,18 @@ #include <linux/string.h> #include <linux/workqueue.h> #include <net/arp.h> +#include <net/genetlink.h> +#include <net/netlink.h> +#include <net/sock.h> +#include <uapi/linux/batman_adv.h> #include "hard-interface.h" #include "hash.h" #include "log.h" +#include "netlink.h" #include "originator.h" #include "packet.h" +#include "soft-interface.h" #include "sysfs.h" #include "translation-table.h" @@ -519,11 +526,9 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig, atomic_set(&entry->wait_periods, 0); ether_addr_copy(entry->orig, orig); INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report); - - /* one for the hash, one for returning */ kref_init(&entry->refcount); - kref_get(&entry->refcount); + kref_get(&entry->refcount); hash_added = batadv_hash_add(bat_priv->bla.backbone_hash, batadv_compare_backbone_gw, batadv_choose_backbone_gw, entry, @@ -711,12 +716,13 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, claim->lasttime = jiffies; kref_get(&backbone_gw->refcount); claim->backbone_gw = backbone_gw; - kref_init(&claim->refcount); - kref_get(&claim->refcount); + batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", mac, BATADV_PRINT_VID(vid)); + + kref_get(&claim->refcount); hash_added = batadv_hash_add(bat_priv->bla.claim_hash, batadv_compare_claim, batadv_choose_claim, claim, @@ -1148,7 +1154,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv, /* Let the loopdetect frames on the mesh in any case. */ if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT) - return 0; + return false; /* check if it is a claim frame. */ ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, @@ -1990,6 +1996,7 @@ out: return ret; } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file * @seq: seq file to print on @@ -2050,8 +2057,172 @@ out: batadv_hardif_put(primary_if); return 0; } +#endif + +/** + * batadv_bla_claim_dump_entry - dump one entry of the claim table + * to a netlink socket + * @msg: buffer for the message + * @portid: netlink port + * @seq: Sequence number of netlink message + * @primary_if: primary interface + * @claim: entry to dump + * + * Return: 0 or error code. + */ +static int +batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_hard_iface *primary_if, + struct batadv_bla_claim *claim) +{ + u8 *primary_addr = primary_if->net_dev->dev_addr; + u16 backbone_crc; + bool is_own; + void *hdr; + int ret = -EINVAL; + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM); + if (!hdr) { + ret = -ENOBUFS; + goto out; + } + + is_own = batadv_compare_eth(claim->backbone_gw->orig, + primary_addr); + + spin_lock_bh(&claim->backbone_gw->crc_lock); + backbone_crc = claim->backbone_gw->crc; + spin_unlock_bh(&claim->backbone_gw->crc_lock); + + if (is_own) + if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) || + nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) || + nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN, + claim->backbone_gw->orig) || + nla_put_u16(msg, BATADV_ATTR_BLA_CRC, + backbone_crc)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + genlmsg_end(msg, hdr); + ret = 0; + +out: + return ret; +} + +/** + * batadv_bla_claim_dump_bucket - dump one bucket of the claim table + * to a netlink socket + * @msg: buffer for the message + * @portid: netlink port + * @seq: Sequence number of netlink message + * @primary_if: primary interface + * @head: bucket to dump + * @idx_skip: How many entries to skip + * + * Return: always 0. + */ +static int +batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_hard_iface *primary_if, + struct hlist_head *head, int *idx_skip) +{ + struct batadv_bla_claim *claim; + int idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, head, hash_entry) { + if (idx++ < *idx_skip) + continue; + if (batadv_bla_claim_dump_entry(msg, portid, seq, + primary_if, claim)) { + *idx_skip = idx - 1; + goto unlock; + } + } + + *idx_skip = idx; +unlock: + rcu_read_unlock(); + return 0; +} /** + * batadv_bla_claim_dump - dump claim table to a netlink socket + * @msg: buffer for the message + * @cb: callback structure containing arguments + * + * Return: message length. + */ +int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct batadv_hard_iface *primary_if = NULL; + int portid = NETLINK_CB(cb->skb).portid; + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct batadv_hashtable *hash; + struct batadv_priv *bat_priv; + int bucket = cb->args[0]; + struct hlist_head *head; + int idx = cb->args[1]; + int ifindex; + int ret = 0; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, + BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { + ret = -ENODEV; + goto out; + } + + bat_priv = netdev_priv(soft_iface); + hash = bat_priv->bla.claim_hash; + + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out; + } + + while (bucket < hash->size) { + head = &hash->table[bucket]; + + if (batadv_bla_claim_dump_bucket(msg, portid, + cb->nlh->nlmsg_seq, + primary_if, head, &idx)) + break; + bucket++; + } + + cb->args[0] = bucket; + cb->args[1] = idx; + + ret = msg->len; + +out: + if (primary_if) + batadv_hardif_put(primary_if); + + if (soft_iface) + dev_put(soft_iface); + + return ret; +} + +#ifdef CONFIG_BATMAN_ADV_DEBUGFS +/** * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq * file * @seq: seq file to print on @@ -2114,3 +2285,168 @@ out: batadv_hardif_put(primary_if); return 0; } +#endif + +/** + * batadv_bla_backbone_dump_entry - dump one entry of the backbone table + * to a netlink socket + * @msg: buffer for the message + * @portid: netlink port + * @seq: Sequence number of netlink message + * @primary_if: primary interface + * @backbone_gw: entry to dump + * + * Return: 0 or error code. + */ +static int +batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_hard_iface *primary_if, + struct batadv_bla_backbone_gw *backbone_gw) +{ + u8 *primary_addr = primary_if->net_dev->dev_addr; + u16 backbone_crc; + bool is_own; + int msecs; + void *hdr; + int ret = -EINVAL; + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE); + if (!hdr) { + ret = -ENOBUFS; + goto out; + } + + is_own = batadv_compare_eth(backbone_gw->orig, primary_addr); + + spin_lock_bh(&backbone_gw->crc_lock); + backbone_crc = backbone_gw->crc; + spin_unlock_bh(&backbone_gw->crc_lock); + + msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime); + + if (is_own) + if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN, + backbone_gw->orig) || + nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) || + nla_put_u16(msg, BATADV_ATTR_BLA_CRC, + backbone_crc) || + nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) { + genlmsg_cancel(msg, hdr); + goto out; + } + + genlmsg_end(msg, hdr); + ret = 0; + +out: + return ret; +} + +/** + * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table + * to a netlink socket + * @msg: buffer for the message + * @portid: netlink port + * @seq: Sequence number of netlink message + * @primary_if: primary interface + * @head: bucket to dump + * @idx_skip: How many entries to skip + * + * Return: always 0. + */ +static int +batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_hard_iface *primary_if, + struct hlist_head *head, int *idx_skip) +{ + struct batadv_bla_backbone_gw *backbone_gw; + int idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { + if (idx++ < *idx_skip) + continue; + if (batadv_bla_backbone_dump_entry(msg, portid, seq, + primary_if, backbone_gw)) { + *idx_skip = idx - 1; + goto unlock; + } + } + + *idx_skip = idx; +unlock: + rcu_read_unlock(); + return 0; +} + +/** + * batadv_bla_backbone_dump - dump backbone table to a netlink socket + * @msg: buffer for the message + * @cb: callback structure containing arguments + * + * Return: message length. + */ +int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct batadv_hard_iface *primary_if = NULL; + int portid = NETLINK_CB(cb->skb).portid; + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct batadv_hashtable *hash; + struct batadv_priv *bat_priv; + int bucket = cb->args[0]; + struct hlist_head *head; + int idx = cb->args[1]; + int ifindex; + int ret = 0; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, + BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { + ret = -ENODEV; + goto out; + } + + bat_priv = netdev_priv(soft_iface); + hash = bat_priv->bla.backbone_hash; + + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out; + } + + while (bucket < hash->size) { + head = &hash->table[bucket]; + + if (batadv_bla_backbone_dump_bucket(msg, portid, + cb->nlh->nlmsg_seq, + primary_if, head, &idx)) + break; + bucket++; + } + + cb->args[0] = bucket; + cb->args[1] = idx; + + ret = msg->len; + +out: + if (primary_if) + batadv_hardif_put(primary_if); + + if (soft_iface) + dev_put(soft_iface); + + return ret; +} diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 0f01daeb359e..1ae93e46fb98 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -23,6 +23,7 @@ #include <linux/types.h> struct net_device; +struct netlink_callback; struct seq_file; struct sk_buff; @@ -35,8 +36,10 @@ bool batadv_bla_is_backbone_gw(struct sk_buff *skb, struct batadv_orig_node *orig_node, int hdr_size); int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); +int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset); +int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb); bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, unsigned short vid); bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, @@ -47,7 +50,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, void batadv_bla_status_update(struct net_device *net_dev); int batadv_bla_init(struct batadv_priv *bat_priv); void batadv_bla_free(struct batadv_priv *bat_priv); - +int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb); #define BATADV_BLA_CRC_INIT 0 #else /* ifdef CONFIG_BATMAN_ADV_BLA */ @@ -112,6 +115,18 @@ static inline void batadv_bla_free(struct batadv_priv *bat_priv) { } +static inline int batadv_bla_claim_dump(struct sk_buff *msg, + struct netlink_callback *cb) +{ + return -EOPNOTSUPP; +} + +static inline int batadv_bla_backbone_dump(struct sk_buff *msg, + struct netlink_callback *cb) +{ + return -EOPNOTSUPP; +} + #endif /* ifdef CONFIG_BATMAN_ADV_BLA */ #endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */ diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 1d68b6e63b96..b4ffba7dd583 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -31,6 +31,7 @@ #include <linux/stddef.h> #include <linux/stringify.h> #include <linux/sysfs.h> +#include <net/net_namespace.h> #include "bat_algo.h" #include "bridge_loop_avoidance.h" @@ -305,12 +306,16 @@ void batadv_debugfs_destroy(void) */ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface) { + struct net *net = dev_net(hard_iface->net_dev); struct batadv_debuginfo **bat_debug; struct dentry *file; if (!batadv_debugfs) goto out; + if (net != &init_net) + return 0; + hard_iface->debug_dir = debugfs_create_dir(hard_iface->net_dev->name, batadv_debugfs); if (!hard_iface->debug_dir) @@ -341,6 +346,11 @@ out: */ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface) { + struct net *net = dev_net(hard_iface->net_dev); + + if (net != &init_net) + return; + if (batadv_debugfs) { debugfs_remove_recursive(hard_iface->debug_dir); hard_iface->debug_dir = NULL; @@ -351,11 +361,15 @@ int batadv_debugfs_add_meshif(struct net_device *dev) { struct batadv_priv *bat_priv = netdev_priv(dev); struct batadv_debuginfo **bat_debug; + struct net *net = dev_net(dev); struct dentry *file; if (!batadv_debugfs) goto out; + if (net != &init_net) + return 0; + bat_priv->debug_dir = debugfs_create_dir(dev->name, batadv_debugfs); if (!bat_priv->debug_dir) goto out; @@ -392,6 +406,10 @@ out: void batadv_debugfs_del_meshif(struct net_device *dev) { struct batadv_priv *bat_priv = netdev_priv(dev); + struct net *net = dev_net(dev); + + if (net != &init_net) + return; batadv_debug_log_cleanup(bat_priv); diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index 1ab4e2e63afc..c68ff3dcb926 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h @@ -26,7 +26,7 @@ struct net_device; #define BATADV_DEBUGFS_SUBDIR "batman_adv" -#if IS_ENABLED(CONFIG_DEBUG_FS) +#if IS_ENABLED(CONFIG_BATMAN_ADV_DEBUGFS) void batadv_debugfs_init(void); void batadv_debugfs_destroy(void); diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index b1cc8bfe11ac..e257efdc5d03 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -343,8 +343,8 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, ether_addr_copy(dat_entry->mac_addr, mac_addr); dat_entry->last_update = jiffies; kref_init(&dat_entry->refcount); - kref_get(&dat_entry->refcount); + kref_get(&dat_entry->refcount); hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat, batadv_hash_dat, dat_entry, &dat_entry->hash_entry); @@ -795,6 +795,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv) batadv_dat_hash_free(bat_priv); } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_dat_cache_seq_print_text - print the local DAT hash table * @seq: seq file to print on @@ -846,6 +847,7 @@ out: batadv_hardif_put(primary_if); return 0; } +#endif /** * batadv_arp_get_type - parse an ARP packet and gets the type diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 63a805d3f96e..de055d64debe 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -20,6 +20,7 @@ #include <linux/atomic.h> #include <linux/byteorder/generic.h> +#include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/fs.h> #include <linux/if_ether.h> @@ -31,6 +32,7 @@ #include <linux/kref.h> #include <linux/list.h> #include <linux/netdevice.h> +#include <linux/netlink.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/seq_file.h> @@ -39,13 +41,17 @@ #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/udp.h> +#include <net/sock.h> +#include <uapi/linux/batman_adv.h> #include "gateway_common.h" #include "hard-interface.h" #include "log.h" +#include "netlink.h" #include "originator.h" #include "packet.h" #include "routing.h" +#include "soft-interface.h" #include "sysfs.h" #include "translation-table.h" @@ -80,12 +86,12 @@ static void batadv_gw_node_release(struct kref *ref) * batadv_gw_node_put - decrement the gw_node refcounter and possibly release it * @gw_node: gateway node to free */ -static void batadv_gw_node_put(struct batadv_gw_node *gw_node) +void batadv_gw_node_put(struct batadv_gw_node *gw_node) { kref_put(&gw_node->refcount, batadv_gw_node_release); } -static struct batadv_gw_node * +struct batadv_gw_node * batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node; @@ -164,86 +170,6 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv) atomic_set(&bat_priv->gw.reselect, 1); } -static struct batadv_gw_node * -batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) -{ - struct batadv_neigh_node *router; - struct batadv_neigh_ifinfo *router_ifinfo; - struct batadv_gw_node *gw_node, *curr_gw = NULL; - u64 max_gw_factor = 0; - u64 tmp_gw_factor = 0; - u8 max_tq = 0; - u8 tq_avg; - struct batadv_orig_node *orig_node; - - rcu_read_lock(); - hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { - orig_node = gw_node->orig_node; - router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); - if (!router) - continue; - - router_ifinfo = batadv_neigh_ifinfo_get(router, - BATADV_IF_DEFAULT); - if (!router_ifinfo) - goto next; - - if (!kref_get_unless_zero(&gw_node->refcount)) - goto next; - - tq_avg = router_ifinfo->bat_iv.tq_avg; - - switch (atomic_read(&bat_priv->gw.sel_class)) { - case 1: /* fast connection */ - tmp_gw_factor = tq_avg * tq_avg; - tmp_gw_factor *= gw_node->bandwidth_down; - tmp_gw_factor *= 100 * 100; - tmp_gw_factor >>= 18; - - if ((tmp_gw_factor > max_gw_factor) || - ((tmp_gw_factor == max_gw_factor) && - (tq_avg > max_tq))) { - if (curr_gw) - batadv_gw_node_put(curr_gw); - curr_gw = gw_node; - kref_get(&curr_gw->refcount); - } - break; - - default: /* 2: stable connection (use best statistic) - * 3: fast-switch (use best statistic but change as - * soon as a better gateway appears) - * XX: late-switch (use best statistic but change as - * soon as a better gateway appears which has - * $routing_class more tq points) - */ - if (tq_avg > max_tq) { - if (curr_gw) - batadv_gw_node_put(curr_gw); - curr_gw = gw_node; - kref_get(&curr_gw->refcount); - } - break; - } - - if (tq_avg > max_tq) - max_tq = tq_avg; - - if (tmp_gw_factor > max_gw_factor) - max_gw_factor = tmp_gw_factor; - - batadv_gw_node_put(gw_node); - -next: - batadv_neigh_node_put(router); - if (router_ifinfo) - batadv_neigh_ifinfo_put(router_ifinfo); - } - rcu_read_unlock(); - - return curr_gw; -} - /** * batadv_gw_check_client_stop - check if client mode has been switched off * @bat_priv: the bat priv with all the soft interface information @@ -287,12 +213,19 @@ void batadv_gw_election(struct batadv_priv *bat_priv) if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT) goto out; + if (!bat_priv->algo_ops->gw.get_best_gw_node) + goto out; + curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) goto out; - next_gw = batadv_gw_get_best_gw_node(bat_priv); + /* if gw.reselect is set to 1 it means that a previous call to + * gw.is_eligible() said that we have a new best GW, therefore it can + * now be picked from the list and selected + */ + next_gw = bat_priv->algo_ops->gw.get_best_gw_node(bat_priv); if (curr_gw == next_gw) goto out; @@ -360,70 +293,31 @@ out: void batadv_gw_check_election(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { - struct batadv_neigh_ifinfo *router_orig_tq = NULL; - struct batadv_neigh_ifinfo *router_gw_tq = NULL; struct batadv_orig_node *curr_gw_orig; - struct batadv_neigh_node *router_gw = NULL; - struct batadv_neigh_node *router_orig = NULL; - u8 gw_tq_avg, orig_tq_avg; + + /* abort immediately if the routing algorithm does not support gateway + * election + */ + if (!bat_priv->algo_ops->gw.is_eligible) + return; curr_gw_orig = batadv_gw_get_selected_orig(bat_priv); if (!curr_gw_orig) goto reselect; - router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT); - if (!router_gw) - goto reselect; - - router_gw_tq = batadv_neigh_ifinfo_get(router_gw, - BATADV_IF_DEFAULT); - if (!router_gw_tq) - goto reselect; - /* this node already is the gateway */ if (curr_gw_orig == orig_node) goto out; - router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); - if (!router_orig) + if (!bat_priv->algo_ops->gw.is_eligible(bat_priv, curr_gw_orig, + orig_node)) goto out; - router_orig_tq = batadv_neigh_ifinfo_get(router_orig, - BATADV_IF_DEFAULT); - if (!router_orig_tq) - goto out; - - gw_tq_avg = router_gw_tq->bat_iv.tq_avg; - orig_tq_avg = router_orig_tq->bat_iv.tq_avg; - - /* the TQ value has to be better */ - if (orig_tq_avg < gw_tq_avg) - goto out; - - /* if the routing class is greater than 3 the value tells us how much - * greater the TQ value of the new gateway must be - */ - if ((atomic_read(&bat_priv->gw.sel_class) > 3) && - (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class))) - goto out; - - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", - gw_tq_avg, orig_tq_avg); - reselect: batadv_gw_reselect(bat_priv); out: if (curr_gw_orig) batadv_orig_node_put(curr_gw_orig); - if (router_gw) - batadv_neigh_node_put(router_gw); - if (router_orig) - batadv_neigh_node_put(router_orig); - if (router_gw_tq) - batadv_neigh_ifinfo_put(router_gw_tq); - if (router_orig_tq) - batadv_neigh_ifinfo_put(router_orig_tq); } /** @@ -445,14 +339,15 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, if (!gw_node) return; - kref_get(&orig_node->refcount); + kref_init(&gw_node->refcount); INIT_HLIST_NODE(&gw_node->list); + kref_get(&orig_node->refcount); gw_node->orig_node = orig_node; gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); - kref_init(&gw_node->refcount); spin_lock_bh(&bat_priv->gw.list_lock); + kref_get(&gw_node->refcount); hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list); spin_unlock_bh(&bat_priv->gw.list_lock); @@ -463,6 +358,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, ntohl(gateway->bandwidth_down) % 10, ntohl(gateway->bandwidth_up) / 10, ntohl(gateway->bandwidth_up) % 10); + + /* don't return reference to new gw_node */ + batadv_gw_node_put(gw_node); } /** @@ -472,9 +370,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, * * Return: gateway node if found or NULL otherwise. */ -static struct batadv_gw_node * -batadv_gw_node_get(struct batadv_priv *bat_priv, - struct batadv_orig_node *orig_node) +struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv, + struct batadv_orig_node *orig_node) { struct batadv_gw_node *gw_node_tmp, *gw_node = NULL; @@ -585,81 +482,87 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv) spin_unlock_bh(&bat_priv->gw.list_lock); } -/* fails if orig_node has no router */ -static int batadv_write_buffer_text(struct batadv_priv *bat_priv, - struct seq_file *seq, - const struct batadv_gw_node *gw_node) +#ifdef CONFIG_BATMAN_ADV_DEBUGFS +int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) { - struct batadv_gw_node *curr_gw; - struct batadv_neigh_node *router; - struct batadv_neigh_ifinfo *router_ifinfo = NULL; - int ret = -1; + struct net_device *net_dev = (struct net_device *)seq->private; + struct batadv_priv *bat_priv = netdev_priv(net_dev); + struct batadv_hard_iface *primary_if; - router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); - if (!router) - goto out; + primary_if = batadv_seq_print_text_primary_if_get(seq); + if (!primary_if) + return 0; - router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); - if (!router_ifinfo) - goto out; + seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", + BATADV_SOURCE_VERSION, primary_if->net_dev->name, + primary_if->net_dev->dev_addr, net_dev->name, + bat_priv->algo_ops->name); - curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + batadv_hardif_put(primary_if); - seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n", - (curr_gw == gw_node ? "=>" : " "), - gw_node->orig_node->orig, - router_ifinfo->bat_iv.tq_avg, router->addr, - router->if_incoming->net_dev->name, - gw_node->bandwidth_down / 10, - gw_node->bandwidth_down % 10, - gw_node->bandwidth_up / 10, - gw_node->bandwidth_up % 10); - ret = seq_has_overflowed(seq) ? -1 : 0; + if (!bat_priv->algo_ops->gw.print) { + seq_puts(seq, + "No printing function for this routing protocol\n"); + return 0; + } - if (curr_gw) - batadv_gw_node_put(curr_gw); -out: - if (router_ifinfo) - batadv_neigh_ifinfo_put(router_ifinfo); - if (router) - batadv_neigh_node_put(router); - return ret; + bat_priv->algo_ops->gw.print(bat_priv, seq); + + return 0; } +#endif -int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) +/** + * batadv_gw_dump - Dump gateways into a message + * @msg: Netlink message to dump into + * @cb: Control block containing additional options + * + * Return: Error code, or length of message + */ +int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb) { - struct net_device *net_dev = (struct net_device *)seq->private; - struct batadv_priv *bat_priv = netdev_priv(net_dev); - struct batadv_hard_iface *primary_if; - struct batadv_gw_node *gw_node; - int gw_count = 0; - - primary_if = batadv_seq_print_text_primary_if_get(seq); - if (!primary_if) + struct batadv_hard_iface *primary_if = NULL; + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct batadv_priv *bat_priv; + int ifindex; + int ret; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, + BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { + ret = -ENODEV; goto out; + } - seq_printf(seq, - " Gateway (#/255) Nexthop [outgoingIF]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", - BATADV_SOURCE_VERSION, primary_if->net_dev->name, - primary_if->net_dev->dev_addr, net_dev->name); + bat_priv = netdev_priv(soft_iface); - rcu_read_lock(); - hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { - /* fails if orig_node has no router */ - if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0) - continue; + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out; + } - gw_count++; + if (!bat_priv->algo_ops->gw.dump) { + ret = -EOPNOTSUPP; + goto out; } - rcu_read_unlock(); - if (gw_count == 0) - seq_puts(seq, "No gateways in range ...\n"); + bat_priv->algo_ops->gw.dump(msg, cb, bat_priv); + + ret = msg->len; out: if (primary_if) batadv_hardif_put(primary_if); - return 0; + if (soft_iface) + dev_put(soft_iface); + + return ret; } /** diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 582dd8c413c8..859166d03561 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -23,6 +23,7 @@ #include <linux/types.h> struct batadv_tvlv_gateway_data; +struct netlink_callback; struct seq_file; struct sk_buff; @@ -39,10 +40,16 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, void batadv_gw_node_delete(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node); void batadv_gw_node_free(struct batadv_priv *bat_priv); +void batadv_gw_node_put(struct batadv_gw_node *gw_node); +struct batadv_gw_node * +batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv); int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); +int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb); bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb); enum batadv_dhcp_recipient batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, u8 *chaddr); +struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv, + struct batadv_orig_node *orig_node); #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index d7bc6a87bcc9..21184810d89f 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c @@ -241,10 +241,9 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, batadv_gw_node_update(bat_priv, orig, &gateway); - /* restart gateway selection if fast or late switching was enabled */ + /* restart gateway selection */ if ((gateway.bandwidth_down != 0) && - (atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT) && - (atomic_read(&bat_priv->gw.sel_class) > 2)) + (atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT)) batadv_gw_check_election(bat_priv, orig); } diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 1f9080840566..08ce36147c4c 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -35,7 +35,8 @@ #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/workqueue.h> +#include <net/net_namespace.h> +#include <net/rtnetlink.h> #include "bat_v.h" #include "bridge_loop_avoidance.h" @@ -85,25 +86,55 @@ out: } /** + * batadv_getlink_net - return link net namespace (of use fallback) + * @netdev: net_device to check + * @fallback_net: return in case get_link_net is not available for @netdev + * + * Return: result of rtnl_link_ops->get_link_net or @fallback_net + */ +static const struct net *batadv_getlink_net(const struct net_device *netdev, + const struct net *fallback_net) +{ + if (!netdev->rtnl_link_ops) + return fallback_net; + + if (!netdev->rtnl_link_ops->get_link_net) + return fallback_net; + + return netdev->rtnl_link_ops->get_link_net(netdev); +} + +/** * batadv_mutual_parents - check if two devices are each others parent - * @dev1: 1st net_device - * @dev2: 2nd net_device + * @dev1: 1st net dev + * @net1: 1st devices netns + * @dev2: 2nd net dev + * @net2: 2nd devices netns * * veth devices come in pairs and each is the parent of the other! * * Return: true if the devices are each others parent, otherwise false */ static bool batadv_mutual_parents(const struct net_device *dev1, - const struct net_device *dev2) + const struct net *net1, + const struct net_device *dev2, + const struct net *net2) { int dev1_parent_iflink = dev_get_iflink(dev1); int dev2_parent_iflink = dev_get_iflink(dev2); + const struct net *dev1_parent_net; + const struct net *dev2_parent_net; + + dev1_parent_net = batadv_getlink_net(dev1, net1); + dev2_parent_net = batadv_getlink_net(dev2, net2); if (!dev1_parent_iflink || !dev2_parent_iflink) return false; return (dev1_parent_iflink == dev2->ifindex) && - (dev2_parent_iflink == dev1->ifindex); + (dev2_parent_iflink == dev1->ifindex) && + net_eq(dev1_parent_net, net2) && + net_eq(dev2_parent_net, net1); } /** @@ -121,8 +152,9 @@ static bool batadv_mutual_parents(const struct net_device *dev1, */ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) { - struct net_device *parent_dev; struct net *net = dev_net(net_dev); + struct net_device *parent_dev; + const struct net *parent_net; bool ret; /* check if this is a batman-adv mesh interface */ @@ -134,13 +166,16 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) dev_get_iflink(net_dev) == net_dev->ifindex) return false; + parent_net = batadv_getlink_net(net_dev, net); + /* recurse over the parent device */ - parent_dev = __dev_get_by_index(net, dev_get_iflink(net_dev)); + parent_dev = __dev_get_by_index((struct net *)parent_net, + dev_get_iflink(net_dev)); /* if we got a NULL parent_dev there is something broken.. */ if (WARN(!parent_dev, "Cannot find parent device")) return false; - if (batadv_mutual_parents(net_dev, parent_dev)) + if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) return false; ret = batadv_is_on_batman_iface(parent_dev); @@ -625,25 +660,6 @@ out: batadv_hardif_put(primary_if); } -/** - * batadv_hardif_remove_interface_finish - cleans up the remains of a hardif - * @work: work queue item - * - * Free the parts of the hard interface which can not be removed under - * rtnl lock (to prevent deadlock situations). - */ -static void batadv_hardif_remove_interface_finish(struct work_struct *work) -{ - struct batadv_hard_iface *hard_iface; - - hard_iface = container_of(work, struct batadv_hard_iface, - cleanup_work); - - batadv_debugfs_del_hardif(hard_iface); - batadv_sysfs_del_hardif(&hard_iface->hardif_obj); - batadv_hardif_put(hard_iface); -} - static struct batadv_hard_iface * batadv_hardif_add_interface(struct net_device *net_dev) { @@ -676,10 +692,9 @@ batadv_hardif_add_interface(struct net_device *net_dev) INIT_LIST_HEAD(&hard_iface->list); INIT_HLIST_HEAD(&hard_iface->neigh_list); - INIT_WORK(&hard_iface->cleanup_work, - batadv_hardif_remove_interface_finish); spin_lock_init(&hard_iface->neigh_list_lock); + kref_init(&hard_iface->refcount); hard_iface->num_bcasts = BATADV_NUM_BCASTS_DEFAULT; if (batadv_is_wifi_netdev(net_dev)) @@ -687,11 +702,8 @@ batadv_hardif_add_interface(struct net_device *net_dev) batadv_v_hardif_init(hard_iface); - /* extra reference for return */ - kref_init(&hard_iface->refcount); - kref_get(&hard_iface->refcount); - batadv_check_known_mac_addr(hard_iface->net_dev); + kref_get(&hard_iface->refcount); list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list); return hard_iface; @@ -713,13 +725,15 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface) /* first deactivate interface */ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) batadv_hardif_disable_interface(hard_iface, - BATADV_IF_CLEANUP_AUTO); + BATADV_IF_CLEANUP_KEEP); if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) return; hard_iface->if_status = BATADV_IF_TO_BE_REMOVED; - queue_work(batadv_event_workqueue, &hard_iface->cleanup_work); + batadv_debugfs_del_hardif(hard_iface); + batadv_sysfs_del_hardif(&hard_iface->hardif_obj); + batadv_hardif_put(hard_iface); } void batadv_hardif_remove_interfaces(void) diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h index 618d5de06f20..e44a7da51431 100644 --- a/net/batman-adv/icmp_socket.h +++ b/net/batman-adv/icmp_socket.h @@ -26,9 +26,25 @@ struct batadv_icmp_header; #define BATADV_ICMP_SOCKET "socket" -void batadv_socket_init(void); int batadv_socket_setup(struct batadv_priv *bat_priv); + +#ifdef CONFIG_BATMAN_ADV_DEBUGFS + +void batadv_socket_init(void); void batadv_socket_receive_packet(struct batadv_icmp_header *icmph, size_t icmp_len); +#else + +static inline void batadv_socket_init(void) +{ +} + +static inline void +batadv_socket_receive_packet(struct batadv_icmp_header *icmph, size_t icmp_len) +{ +} + +#endif + #endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */ diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index fe4c5e29f96b..2c017ab47557 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -82,6 +82,12 @@ static void batadv_recv_handler_init(void); static int __init batadv_init(void) { + int ret; + + ret = batadv_tt_cache_init(); + if (ret < 0) + return ret; + INIT_LIST_HEAD(&batadv_hardif_list); batadv_algo_init(); @@ -93,9 +99,8 @@ static int __init batadv_init(void) batadv_tp_meter_init(); batadv_event_workqueue = create_singlethread_workqueue("bat_events"); - if (!batadv_event_workqueue) - return -ENOMEM; + goto err_create_wq; batadv_socket_init(); batadv_debugfs_init(); @@ -108,6 +113,11 @@ static int __init batadv_init(void) BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION); return 0; + +err_create_wq: + batadv_tt_cache_destroy(); + + return -ENOMEM; } static void __exit batadv_exit(void) @@ -123,6 +133,8 @@ static void __exit batadv_exit(void) batadv_event_workqueue = NULL; rcu_barrier(); + + batadv_tt_cache_destroy(); } int batadv_mesh_init(struct net_device *soft_iface) @@ -270,6 +282,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr) return is_my_mac; } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_seq_print_text_primary_if_get - called from debugfs table printing * function that requires the primary interface @@ -305,6 +318,7 @@ batadv_seq_print_text_primary_if_get(struct seq_file *seq) out: return primary_if; } +#endif /** * batadv_max_header_len - calculate maximum encapsulation overhead for a @@ -638,3 +652,4 @@ MODULE_AUTHOR(BATADV_DRIVER_AUTHOR); MODULE_DESCRIPTION(BATADV_DRIVER_DESC); MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE); MODULE_VERSION(BATADV_SOURCE_VERSION); +MODULE_ALIAS_RTNL_LINK("batadv"); diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 06a860845434..09af21e27639 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -24,7 +24,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2016.3" +#define BATADV_SOURCE_VERSION "2016.4" #endif /* B.A.T.M.A.N. parameters */ diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index cc915073a753..13661f43386f 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -528,7 +528,7 @@ update: } return !(mcast_data.flags & - (BATADV_MCAST_WANT_ALL_IPV4 + BATADV_MCAST_WANT_ALL_IPV6)); + (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6)); } /** @@ -1134,6 +1134,7 @@ void batadv_mcast_init(struct batadv_priv *bat_priv) BATADV_TVLV_HANDLER_OGM_CIFNOTFND); } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_mcast_flags_print_header - print own mcast flags to debugfs table * @bat_priv: the bat priv with all the soft interface information @@ -1234,6 +1235,7 @@ int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset) return 0; } +#endif /** * batadv_mcast_free - free the multicast optimizations structures diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 231f8eaf075b..18831e72b0fb 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -18,6 +18,8 @@ #include "netlink.h" #include "main.h" +#include <linux/atomic.h> +#include <linux/byteorder/generic.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/genetlink.h> @@ -26,24 +28,33 @@ #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/printk.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/skbuff.h> #include <linux/stddef.h> #include <linux/types.h> #include <net/genetlink.h> #include <net/netlink.h> +#include <net/sock.h> #include <uapi/linux/batman_adv.h> +#include "bat_algo.h" +#include "bridge_loop_avoidance.h" +#include "gateway_client.h" #include "hard-interface.h" +#include "originator.h" +#include "packet.h" #include "soft-interface.h" #include "tp_meter.h" +#include "translation-table.h" -struct sk_buff; - -static struct genl_family batadv_netlink_family = { +struct genl_family batadv_netlink_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = BATADV_NL_NAME, .version = 1, .maxattr = BATADV_ATTR_MAX, + .netnsok = true, }; /* multicast groups */ @@ -69,9 +80,44 @@ static struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = { [BATADV_ATTR_TPMETER_TEST_TIME] = { .type = NLA_U32 }, [BATADV_ATTR_TPMETER_BYTES] = { .type = NLA_U64 }, [BATADV_ATTR_TPMETER_COOKIE] = { .type = NLA_U32 }, + [BATADV_ATTR_ACTIVE] = { .type = NLA_FLAG }, + [BATADV_ATTR_TT_ADDRESS] = { .len = ETH_ALEN }, + [BATADV_ATTR_TT_TTVN] = { .type = NLA_U8 }, + [BATADV_ATTR_TT_LAST_TTVN] = { .type = NLA_U8 }, + [BATADV_ATTR_TT_CRC32] = { .type = NLA_U32 }, + [BATADV_ATTR_TT_VID] = { .type = NLA_U16 }, + [BATADV_ATTR_TT_FLAGS] = { .type = NLA_U32 }, + [BATADV_ATTR_FLAG_BEST] = { .type = NLA_FLAG }, + [BATADV_ATTR_LAST_SEEN_MSECS] = { .type = NLA_U32 }, + [BATADV_ATTR_NEIGH_ADDRESS] = { .len = ETH_ALEN }, + [BATADV_ATTR_TQ] = { .type = NLA_U8 }, + [BATADV_ATTR_THROUGHPUT] = { .type = NLA_U32 }, + [BATADV_ATTR_BANDWIDTH_UP] = { .type = NLA_U32 }, + [BATADV_ATTR_BANDWIDTH_DOWN] = { .type = NLA_U32 }, + [BATADV_ATTR_ROUTER] = { .len = ETH_ALEN }, + [BATADV_ATTR_BLA_OWN] = { .type = NLA_FLAG }, + [BATADV_ATTR_BLA_ADDRESS] = { .len = ETH_ALEN }, + [BATADV_ATTR_BLA_VID] = { .type = NLA_U16 }, + [BATADV_ATTR_BLA_BACKBONE] = { .len = ETH_ALEN }, + [BATADV_ATTR_BLA_CRC] = { .type = NLA_U16 }, }; /** + * batadv_netlink_get_ifindex - Extract an interface index from a message + * @nlh: Message header + * @attrtype: Attribute which holds an interface index + * + * Return: interface index, or 0. + */ +int +batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype) +{ + struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype); + + return attr ? nla_get_u32(attr) : 0; +} + +/** * batadv_netlink_mesh_info_put - fill in generic information about mesh * interface * @msg: netlink message to be sent back @@ -93,8 +139,16 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface) nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, soft_iface->ifindex) || nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, soft_iface->name) || nla_put(msg, BATADV_ATTR_MESH_ADDRESS, ETH_ALEN, - soft_iface->dev_addr)) + soft_iface->dev_addr) || + nla_put_u8(msg, BATADV_ATTR_TT_TTVN, + (u8)atomic_read(&bat_priv->tt.vn))) + goto out; + +#ifdef CONFIG_BATMAN_ADV_BLA + if (nla_put_u16(msg, BATADV_ATTR_BLA_CRC, + ntohs(bat_priv->bla.claim_dest.group))) goto out; +#endif primary_if = batadv_primary_if_get_selected(bat_priv); if (primary_if && primary_if->if_status == BATADV_IF_ACTIVE) { @@ -380,6 +434,106 @@ out: return ret; } +/** + * batadv_netlink_dump_hardif_entry - Dump one hard interface into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @hard_iface: Hard interface to dump + * + * Return: error code, or 0 on success + */ +static int +batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_hard_iface *hard_iface) +{ + struct net_device *net_dev = hard_iface->net_dev; + void *hdr; + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI, + BATADV_CMD_GET_HARDIFS); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, + net_dev->ifindex) || + nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, + net_dev->name) || + nla_put(msg, BATADV_ATTR_HARD_ADDRESS, ETH_ALEN, + net_dev->dev_addr)) + goto nla_put_failure; + + if (hard_iface->if_status == BATADV_IF_ACTIVE) { + if (nla_put_flag(msg, BATADV_ATTR_ACTIVE)) + goto nla_put_failure; + } + + genlmsg_end(msg, hdr); + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +/** + * batadv_netlink_dump_hardifs - Dump all hard interface into a messages + * @msg: Netlink message to dump into + * @cb: Parameters from query + * + * Return: error code, or length of reply message on success + */ +static int +batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct batadv_hard_iface *hard_iface; + int ifindex; + int portid = NETLINK_CB(cb->skb).portid; + int seq = cb->nlh->nlmsg_seq; + int skip = cb->args[0]; + int i = 0; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, + BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface) + return -ENODEV; + + if (!batadv_softif_is_valid(soft_iface)) { + dev_put(soft_iface); + return -ENODEV; + } + + rcu_read_lock(); + + list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + if (hard_iface->soft_iface != soft_iface) + continue; + + if (i++ < skip) + continue; + + if (batadv_netlink_dump_hardif_entry(msg, portid, seq, + hard_iface)) { + i--; + break; + } + } + + rcu_read_unlock(); + + dev_put(soft_iface); + + cb->args[0] = i; + + return msg->len; +} + static struct genl_ops batadv_netlink_ops[] = { { .cmd = BATADV_CMD_GET_MESH_INFO, @@ -399,6 +553,61 @@ static struct genl_ops batadv_netlink_ops[] = { .policy = batadv_netlink_policy, .doit = batadv_netlink_tp_meter_cancel, }, + { + .cmd = BATADV_CMD_GET_ROUTING_ALGOS, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_algo_dump, + }, + { + .cmd = BATADV_CMD_GET_HARDIFS, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_netlink_dump_hardifs, + }, + { + .cmd = BATADV_CMD_GET_TRANSTABLE_LOCAL, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_tt_local_dump, + }, + { + .cmd = BATADV_CMD_GET_TRANSTABLE_GLOBAL, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_tt_global_dump, + }, + { + .cmd = BATADV_CMD_GET_ORIGINATORS, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_orig_dump, + }, + { + .cmd = BATADV_CMD_GET_NEIGHBORS, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_hardif_neigh_dump, + }, + { + .cmd = BATADV_CMD_GET_GATEWAYS, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_gw_dump, + }, + { + .cmd = BATADV_CMD_GET_BLA_CLAIM, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_bla_claim_dump, + }, + { + .cmd = BATADV_CMD_GET_BLA_BACKBONE, + .flags = GENL_ADMIN_PERM, + .policy = batadv_netlink_policy, + .dumpit = batadv_bla_backbone_dump, + }, + }; /** diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h index 945653ab58c6..52eb16281aba 100644 --- a/net/batman-adv/netlink.h +++ b/net/batman-adv/netlink.h @@ -21,12 +21,18 @@ #include "main.h" #include <linux/types.h> +#include <net/genetlink.h> + +struct nlmsghdr; void batadv_netlink_register(void); void batadv_netlink_unregister(void); +int batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype); int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst, u8 result, u32 test_time, u64 total_bytes, u32 cookie); +extern struct genl_family batadv_netlink_family; + #endif /* _NET_BATMAN_ADV_NETLINK_H_ */ diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 293ef4ffd4e1..e3baf697a35c 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -856,14 +856,12 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, if (!nc_node) return NULL; - kref_get(&orig_neigh_node->refcount); - /* Initialize nc_node */ INIT_LIST_HEAD(&nc_node->list); + kref_init(&nc_node->refcount); ether_addr_copy(nc_node->addr, orig_node->orig); + kref_get(&orig_neigh_node->refcount); nc_node->orig_node = orig_neigh_node; - kref_init(&nc_node->refcount); - kref_get(&nc_node->refcount); /* Select ingoing or outgoing coding node */ if (in_coding) { @@ -879,6 +877,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, /* Add nc_node to orig_node */ spin_lock_bh(lock); + kref_get(&nc_node->refcount); list_add_tail_rcu(&nc_node->list, list); spin_unlock_bh(lock); @@ -979,7 +978,6 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, INIT_LIST_HEAD(&nc_path->packet_list); spin_lock_init(&nc_path->packet_list_lock); kref_init(&nc_path->refcount); - kref_get(&nc_path->refcount); nc_path->last_valid = jiffies; ether_addr_copy(nc_path->next_hop, dst); ether_addr_copy(nc_path->prev_hop, src); @@ -989,6 +987,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, nc_path->next_hop); /* Add nc_path to hash table */ + kref_get(&nc_path->refcount); hash_added = batadv_hash_add(hash, batadv_nc_hash_compare, batadv_nc_hash_choose, &nc_path_key, &nc_path->hash_entry); @@ -1882,6 +1881,7 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv) batadv_hash_destroy(bat_priv->nc.decoding_hash); } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_nc_nodes_seq_print_text - print the nc node information * @seq: seq file to print on @@ -1981,3 +1981,4 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv) out: return -ENOMEM; } +#endif diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 3940b5d24421..5f3bfc41aeb1 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -28,11 +28,15 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> +#include <linux/netlink.h> #include <linux/rculist.h> #include <linux/seq_file.h> +#include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> +#include <net/sock.h> +#include <uapi/linux/batman_adv.h> #include "bat_algo.h" #include "distributed-arp-table.h" @@ -42,8 +46,10 @@ #include "hash.h" #include "log.h" #include "multicast.h" +#include "netlink.h" #include "network-coding.h" #include "routing.h" +#include "soft-interface.h" #include "translation-table.h" /* hash class keys */ @@ -127,9 +133,9 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, goto out; kref_init(&vlan->refcount); - kref_get(&vlan->refcount); vlan->vid = vid; + kref_get(&vlan->refcount); hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list); out: @@ -380,6 +386,7 @@ batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node, orig_ifinfo->if_outgoing = if_outgoing; INIT_HLIST_NODE(&orig_ifinfo->list); kref_init(&orig_ifinfo->refcount); + kref_get(&orig_ifinfo->refcount); hlist_add_head_rcu(&orig_ifinfo->list, &orig_node->ifinfo_list); @@ -453,9 +460,9 @@ batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh, INIT_HLIST_NODE(&neigh_ifinfo->list); kref_init(&neigh_ifinfo->refcount); - kref_get(&neigh_ifinfo->refcount); neigh_ifinfo->if_outgoing = if_outgoing; + kref_get(&neigh_ifinfo->refcount); hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list); out: @@ -647,8 +654,8 @@ batadv_neigh_node_create(struct batadv_orig_node *orig_node, /* extra reference for return */ kref_init(&neigh_node->refcount); - kref_get(&neigh_node->refcount); + kref_get(&neigh_node->refcount); hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv, @@ -686,6 +693,7 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node, return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr); } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list * @seq: neighbour table seq_file struct @@ -719,6 +727,84 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset) bat_priv->algo_ops->neigh.print(bat_priv, seq); return 0; } +#endif + +/** + * batadv_hardif_neigh_dump - Dump to netlink the neighbor infos for a specific + * outgoing interface + * @msg: message to dump into + * @cb: parameters for the dump + * + * Return: 0 or error value + */ +int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct net_device *hard_iface = NULL; + struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT; + struct batadv_priv *bat_priv; + struct batadv_hard_iface *primary_if = NULL; + int ret; + int ifindex, hard_ifindex; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { + ret = -ENODEV; + goto out; + } + + bat_priv = netdev_priv(soft_iface); + + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out; + } + + hard_ifindex = batadv_netlink_get_ifindex(cb->nlh, + BATADV_ATTR_HARD_IFINDEX); + if (hard_ifindex) { + hard_iface = dev_get_by_index(net, hard_ifindex); + if (hard_iface) + hardif = batadv_hardif_get_by_netdev(hard_iface); + + if (!hardif) { + ret = -ENODEV; + goto out; + } + + if (hardif->soft_iface != soft_iface) { + ret = -ENOENT; + goto out; + } + } + + if (!bat_priv->algo_ops->neigh.dump) { + ret = -EOPNOTSUPP; + goto out; + } + + bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hardif); + + ret = msg->len; + + out: + if (hardif) + batadv_hardif_put(hardif); + if (hard_iface) + dev_put(hard_iface); + if (primary_if) + batadv_hardif_put(primary_if); + if (soft_iface) + dev_put(soft_iface); + + return ret; +} /** * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for @@ -905,7 +991,6 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, /* extra reference for return */ kref_init(&orig_node->refcount); - kref_get(&orig_node->refcount); orig_node->bat_priv = bat_priv; ether_addr_copy(orig_node->orig, addr); @@ -1256,6 +1341,7 @@ void batadv_purge_orig_ref(struct batadv_priv *bat_priv) _batadv_purge_orig(bat_priv); } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; @@ -1329,6 +1415,84 @@ out: batadv_hardif_put(hard_iface); return 0; } +#endif + +/** + * batadv_orig_dump - Dump to netlink the originator infos for a specific + * outgoing interface + * @msg: message to dump into + * @cb: parameters for the dump + * + * Return: 0 or error value + */ +int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct net_device *hard_iface = NULL; + struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT; + struct batadv_priv *bat_priv; + struct batadv_hard_iface *primary_if = NULL; + int ret; + int ifindex, hard_ifindex; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { + ret = -ENODEV; + goto out; + } + + bat_priv = netdev_priv(soft_iface); + + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out; + } + + hard_ifindex = batadv_netlink_get_ifindex(cb->nlh, + BATADV_ATTR_HARD_IFINDEX); + if (hard_ifindex) { + hard_iface = dev_get_by_index(net, hard_ifindex); + if (hard_iface) + hardif = batadv_hardif_get_by_netdev(hard_iface); + + if (!hardif) { + ret = -ENODEV; + goto out; + } + + if (hardif->soft_iface != soft_iface) { + ret = -ENOENT; + goto out; + } + } + + if (!bat_priv->algo_ops->orig.dump) { + ret = -EOPNOTSUPP; + goto out; + } + + bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hardif); + + ret = msg->len; + + out: + if (hardif) + batadv_hardif_put(hardif); + if (hard_iface) + dev_put(hard_iface); + if (primary_if) + batadv_hardif_put(primary_if); + if (soft_iface) + dev_put(soft_iface); + + return ret; +} int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, int max_if_num) diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 566306bf05dc..ebc56183f358 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -31,7 +31,9 @@ #include "hash.h" +struct netlink_callback; struct seq_file; +struct sk_buff; bool batadv_compare_orig(const struct hlist_node *node, const void *data2); int batadv_originator_init(struct batadv_priv *bat_priv); @@ -61,6 +63,7 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh, struct batadv_hard_iface *if_outgoing); void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo); +int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset); struct batadv_orig_ifinfo * @@ -72,6 +75,7 @@ batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node, void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo); int batadv_orig_seq_print_text(struct seq_file *seq, void *offset); +int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, int max_if_num); diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 6b011ff64dd8..6afc0b86950e 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -129,42 +129,6 @@ enum batadv_tt_data_flags { }; /** - * enum batadv_tt_client_flags - TT client specific flags - * @BATADV_TT_CLIENT_DEL: the client has to be deleted from the table - * @BATADV_TT_CLIENT_ROAM: the client roamed to/from another node and the new - * update telling its new real location has not been received/sent yet - * @BATADV_TT_CLIENT_WIFI: this client is connected through a wifi interface. - * This information is used by the "AP Isolation" feature - * @BATADV_TT_CLIENT_ISOLA: this client is considered "isolated". This - * information is used by the Extended Isolation feature - * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from the table - * @BATADV_TT_CLIENT_NEW: this client has been added to the local table but has - * not been announced yet - * @BATADV_TT_CLIENT_PENDING: this client is marked for removal but it is kept - * in the table for one more originator interval for consistency purposes - * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be part of - * the network but no nnode has already announced it - * - * Bits from 0 to 7 are called _remote flags_ because they are sent on the wire. - * Bits from 8 to 15 are called _local flags_ because they are used for local - * computations only. - * - * Bits from 4 to 7 - a subset of remote flags - are ensured to be in sync with - * the other nodes in the network. To achieve this goal these flags are included - * in the TT CRC computation. - */ -enum batadv_tt_client_flags { - BATADV_TT_CLIENT_DEL = BIT(0), - BATADV_TT_CLIENT_ROAM = BIT(1), - BATADV_TT_CLIENT_WIFI = BIT(4), - BATADV_TT_CLIENT_ISOLA = BIT(5), - BATADV_TT_CLIENT_NOPURGE = BIT(8), - BATADV_TT_CLIENT_NEW = BIT(9), - BATADV_TT_CLIENT_PENDING = BIT(10), - BATADV_TT_CLIENT_TEMP = BIT(11), -}; - -/** * enum batadv_vlan_flags - flags for the four MSB of any vlan ID field * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not */ diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 7602c001e92b..610f2c45edcd 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -74,11 +74,23 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, if (!orig_ifinfo) return; - rcu_read_lock(); - curr_router = rcu_dereference(orig_ifinfo->router); - if (curr_router && !kref_get_unless_zero(&curr_router->refcount)) - curr_router = NULL; - rcu_read_unlock(); + spin_lock_bh(&orig_node->neigh_list_lock); + /* curr_router used earlier may not be the current orig_ifinfo->router + * anymore because it was dereferenced outside of the neigh_list_lock + * protected region. After the new best neighbor has replace the current + * best neighbor the reference counter needs to decrease. Consequently, + * the code needs to ensure the curr_router variable contains a pointer + * to the replaced best neighbor. + */ + curr_router = rcu_dereference_protected(orig_ifinfo->router, true); + + /* increase refcount of new best neighbor */ + if (neigh_node) + kref_get(&neigh_node->refcount); + + rcu_assign_pointer(orig_ifinfo->router, neigh_node); + spin_unlock_bh(&orig_node->neigh_list_lock); + batadv_orig_ifinfo_put(orig_ifinfo); /* route deleted */ if ((curr_router) && (!neigh_node)) { @@ -100,27 +112,6 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, curr_router->addr); } - if (curr_router) - batadv_neigh_node_put(curr_router); - - spin_lock_bh(&orig_node->neigh_list_lock); - /* curr_router used earlier may not be the current orig_ifinfo->router - * anymore because it was dereferenced outside of the neigh_list_lock - * protected region. After the new best neighbor has replace the current - * best neighbor the reference counter needs to decrease. Consequently, - * the code needs to ensure the curr_router variable contains a pointer - * to the replaced best neighbor. - */ - curr_router = rcu_dereference_protected(orig_ifinfo->router, true); - - /* increase refcount of new best neighbor */ - if (neigh_node) - kref_get(&neigh_node->refcount); - - rcu_assign_pointer(orig_ifinfo->router, neigh_node); - spin_unlock_bh(&orig_node->neigh_list_lock); - batadv_orig_ifinfo_put(orig_ifinfo); - /* decrease refcount of previous best neighbor */ if (curr_router) batadv_neigh_node_put(curr_router); diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 6191159484df..8d4e1f578574 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -315,8 +315,7 @@ out: * * Wrap the given skb into a batman-adv unicast or unicast-4addr header * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied - * as packet_type. Then send this frame to the given orig_node and release a - * reference to this orig_node. + * as packet_type. Then send this frame to the given orig_node. * * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. */ @@ -370,8 +369,6 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv, ret = NET_XMIT_SUCCESS; out: - if (orig_node) - batadv_orig_node_put(orig_node); if (ret == NET_XMIT_DROP) kfree_skb(skb); return ret; @@ -403,6 +400,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, struct ethhdr *ethhdr = (struct ethhdr *)skb->data; struct batadv_orig_node *orig_node; u8 *src, *dst; + int ret; src = ethhdr->h_source; dst = ethhdr->h_dest; @@ -414,8 +412,13 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, } orig_node = batadv_transtable_search(bat_priv, src, dst, vid); - return batadv_send_skb_unicast(bat_priv, skb, packet_type, - packet_subtype, orig_node, vid); + ret = batadv_send_skb_unicast(bat_priv, skb, packet_type, + packet_subtype, orig_node, vid); + + if (orig_node) + batadv_orig_node_put(orig_node); + + return ret; } /** @@ -433,12 +436,25 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) { struct batadv_orig_node *orig_node; + int ret; orig_node = batadv_gw_get_selected_orig(bat_priv); - return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, - BATADV_P_DATA, orig_node, vid); + ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, + BATADV_P_DATA, orig_node, vid); + + if (orig_node) + batadv_orig_node_put(orig_node); + + return ret; } +/** + * batadv_forw_packet_free - free a forwarding packet + * @forw_packet: The packet to free + * + * This frees a forwarding packet and releases any resources it might + * have claimed. + */ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) { kfree_skb(forw_packet->skb); @@ -446,9 +462,73 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) batadv_hardif_put(forw_packet->if_incoming); if (forw_packet->if_outgoing) batadv_hardif_put(forw_packet->if_outgoing); + if (forw_packet->queue_left) + atomic_inc(forw_packet->queue_left); kfree(forw_packet); } +/** + * batadv_forw_packet_alloc - allocate a forwarding packet + * @if_incoming: The (optional) if_incoming to be grabbed + * @if_outgoing: The (optional) if_outgoing to be grabbed + * @queue_left: The (optional) queue counter to decrease + * @bat_priv: The bat_priv for the mesh of this forw_packet + * + * Allocates a forwarding packet and tries to get a reference to the + * (optional) if_incoming, if_outgoing and queue_left. If queue_left + * is NULL then bat_priv is optional, too. + * + * Return: An allocated forwarding packet on success, NULL otherwise. + */ +struct batadv_forw_packet * +batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, + struct batadv_hard_iface *if_outgoing, + atomic_t *queue_left, + struct batadv_priv *bat_priv) +{ + struct batadv_forw_packet *forw_packet; + const char *qname; + + if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) { + qname = "unknown"; + + if (queue_left == &bat_priv->bcast_queue_left) + qname = "bcast"; + + if (queue_left == &bat_priv->batman_queue_left) + qname = "batman"; + + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "%s queue is full\n", qname); + + return NULL; + } + + forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); + if (!forw_packet) + goto err; + + if (if_incoming) + kref_get(&if_incoming->refcount); + + if (if_outgoing) + kref_get(&if_outgoing->refcount); + + forw_packet->skb = NULL; + forw_packet->queue_left = queue_left; + forw_packet->if_incoming = if_incoming; + forw_packet->if_outgoing = if_outgoing; + forw_packet->num_packets = 0; + + return forw_packet; + +err: + if (queue_left) + atomic_inc(queue_left); + + return NULL; +} + static void _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, @@ -487,24 +567,20 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, struct batadv_bcast_packet *bcast_packet; struct sk_buff *newskb; - if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "bcast packet queue full\n"); - goto out; - } - primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) - goto out_and_inc; - - forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); + goto err; + forw_packet = batadv_forw_packet_alloc(primary_if, NULL, + &bat_priv->bcast_queue_left, + bat_priv); + batadv_hardif_put(primary_if); if (!forw_packet) - goto out_and_inc; + goto err; newskb = skb_copy(skb, GFP_ATOMIC); if (!newskb) - goto packet_free; + goto err_packet_free; /* as we have a copy now, it is safe to decrease the TTL */ bcast_packet = (struct batadv_bcast_packet *)newskb->data; @@ -513,11 +589,6 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, skb_reset_mac_header(newskb); forw_packet->skb = newskb; - forw_packet->if_incoming = primary_if; - forw_packet->if_outgoing = NULL; - - /* how often did we send the bcast packet ? */ - forw_packet->num_packets = 0; INIT_DELAYED_WORK(&forw_packet->delayed_work, batadv_send_outstanding_bcast_packet); @@ -525,13 +596,9 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay); return NETDEV_TX_OK; -packet_free: - kfree(forw_packet); -out_and_inc: - atomic_inc(&bat_priv->bcast_queue_left); -out: - if (primary_if) - batadv_hardif_put(primary_if); +err_packet_free: + batadv_forw_packet_free(forw_packet); +err: return NETDEV_TX_BUSY; } @@ -592,7 +659,6 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) out: batadv_forw_packet_free(forw_packet); - atomic_inc(&bat_priv->bcast_queue_left); } void @@ -633,9 +699,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, if (pending) { hlist_del(&forw_packet->list); - if (!forw_packet->own) - atomic_inc(&bat_priv->bcast_queue_left); - batadv_forw_packet_free(forw_packet); } } @@ -663,9 +726,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, if (pending) { hlist_del(&forw_packet->list); - if (!forw_packet->own) - atomic_inc(&bat_priv->batman_queue_left); - batadv_forw_packet_free(forw_packet); } } diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index 7cecb7563b45..999f78683d9e 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h @@ -28,6 +28,12 @@ struct sk_buff; void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet); +struct batadv_forw_packet * +batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, + struct batadv_hard_iface *if_outgoing, + atomic_t *queue_left, + struct batadv_priv *bat_priv); + int batadv_send_skb_to_orig(struct sk_buff *skb, struct batadv_orig_node *orig_node, struct batadv_hard_iface *recv_if); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 7527c0652dd5..49e16b6e0ba3 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -39,6 +39,7 @@ #include <linux/random.h> #include <linux/rculist.h> #include <linux/rcupdate.h> +#include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/socket.h> @@ -46,7 +47,6 @@ #include <linux/stddef.h> #include <linux/string.h> #include <linux/types.h> -#include <linux/workqueue.h> #include "bat_algo.h" #include "bridge_loop_avoidance.h" @@ -57,6 +57,7 @@ #include "hard-interface.h" #include "multicast.h" #include "network-coding.h" +#include "originator.h" #include "packet.h" #include "send.h" #include "sysfs.h" @@ -377,6 +378,8 @@ dropped: dropped_freed: batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); end: + if (mcast_single_orig) + batadv_orig_node_put(mcast_single_orig); if (primary_if) batadv_hardif_put(primary_if); return NETDEV_TX_OK; @@ -591,6 +594,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) } spin_lock_bh(&bat_priv->softif_vlan_list_lock); + kref_get(&vlan->refcount); hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); spin_unlock_bh(&bat_priv->softif_vlan_list_lock); @@ -601,6 +605,9 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) bat_priv->soft_iface->dev_addr, vid, BATADV_NULL_IFINDEX, BATADV_NO_MARK); + /* don't return reference to new softif_vlan */ + batadv_softif_vlan_put(vlan); + return 0; } @@ -747,34 +754,6 @@ static void batadv_set_lockdep_class(struct net_device *dev) } /** - * batadv_softif_destroy_finish - cleans up the remains of a softif - * @work: work queue item - * - * Free the parts of the soft interface which can not be removed under - * rtnl lock (to prevent deadlock situations). - */ -static void batadv_softif_destroy_finish(struct work_struct *work) -{ - struct batadv_softif_vlan *vlan; - struct batadv_priv *bat_priv; - struct net_device *soft_iface; - - bat_priv = container_of(work, struct batadv_priv, - cleanup_work); - soft_iface = bat_priv->soft_iface; - - /* destroy the "untagged" VLAN */ - vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); - if (vlan) { - batadv_softif_destroy_vlan(bat_priv, vlan); - batadv_softif_vlan_put(vlan); - } - - batadv_sysfs_del_meshif(soft_iface); - unregister_netdev(soft_iface); -} - -/** * batadv_softif_init_late - late stage initialization of soft interface * @dev: registered network device to modify * @@ -791,7 +770,6 @@ static int batadv_softif_init_late(struct net_device *dev) bat_priv = netdev_priv(dev); bat_priv->soft_iface = dev; - INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish); /* batadv_interface_stats() needs to be available as soon as * register_netdevice() has been called @@ -1028,8 +1006,19 @@ struct net_device *batadv_softif_create(struct net *net, const char *name) void batadv_softif_destroy_sysfs(struct net_device *soft_iface) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_softif_vlan *vlan; - queue_work(batadv_event_workqueue, &bat_priv->cleanup_work); + ASSERT_RTNL(); + + /* destroy the "untagged" VLAN */ + vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); + if (vlan) { + batadv_softif_destroy_vlan(bat_priv, vlan); + batadv_softif_vlan_put(vlan); + } + + batadv_sysfs_del_meshif(soft_iface); + unregister_netdevice(soft_iface); } /** diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index fe9ca94ddee2..02d96f224c60 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -37,6 +37,7 @@ #include <linux/stddef.h> #include <linux/string.h> #include <linux/stringify.h> +#include <linux/workqueue.h> #include "bridge_loop_avoidance.h" #include "distributed-arp-table.h" @@ -428,6 +429,13 @@ static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr, struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); int bytes_written; + /* GW mode is not available if the routing algorithm in use does not + * implement the GW API + */ + if (!bat_priv->algo_ops->gw.get_best_gw_node || + !bat_priv->algo_ops->gw.is_eligible) + return -ENOENT; + switch (atomic_read(&bat_priv->gw.mode)) { case BATADV_GW_MODE_CLIENT: bytes_written = sprintf(buff, "%s\n", @@ -455,6 +463,13 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj, char *curr_gw_mode_str; int gw_mode_tmp = -1; + /* toggling GW mode is allowed only if the routing algorithm in use + * provides the GW API + */ + if (!bat_priv->algo_ops->gw.get_best_gw_node || + !bat_priv->algo_ops->gw.is_eligible) + return -EINVAL; + if (buff[count - 1] == '\n') buff[count - 1] = '\0'; @@ -514,6 +529,50 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj, return count; } +static ssize_t batadv_show_gw_sel_class(struct kobject *kobj, + struct attribute *attr, char *buff) +{ + struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); + + /* GW selection class is not available if the routing algorithm in use + * does not implement the GW API + */ + if (!bat_priv->algo_ops->gw.get_best_gw_node || + !bat_priv->algo_ops->gw.is_eligible) + return -ENOENT; + + if (bat_priv->algo_ops->gw.show_sel_class) + return bat_priv->algo_ops->gw.show_sel_class(bat_priv, buff); + + return sprintf(buff, "%i\n", atomic_read(&bat_priv->gw.sel_class)); +} + +static ssize_t batadv_store_gw_sel_class(struct kobject *kobj, + struct attribute *attr, char *buff, + size_t count) +{ + struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); + + /* setting the GW selection class is allowed only if the routing + * algorithm in use implements the GW API + */ + if (!bat_priv->algo_ops->gw.get_best_gw_node || + !bat_priv->algo_ops->gw.is_eligible) + return -EINVAL; + + if (buff[count - 1] == '\n') + buff[count - 1] = '\0'; + + if (bat_priv->algo_ops->gw.store_sel_class) + return bat_priv->algo_ops->gw.store_sel_class(bat_priv, buff, + count); + + return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE, + batadv_post_gw_reselect, attr, + &bat_priv->gw.sel_class, + bat_priv->soft_iface); +} + static ssize_t batadv_show_gw_bwidth(struct kobject *kobj, struct attribute *attr, char *buff) { @@ -625,8 +684,8 @@ BATADV_ATTR_SIF_UINT(orig_interval, orig_interval, S_IRUGO | S_IWUSR, 2 * BATADV_JITTER, INT_MAX, NULL); BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, S_IRUGO | S_IWUSR, 0, BATADV_TQ_MAX_VALUE, NULL); -BATADV_ATTR_SIF_UINT(gw_sel_class, gw.sel_class, S_IRUGO | S_IWUSR, 1, - BATADV_TQ_MAX_VALUE, batadv_post_gw_reselect); +static BATADV_ATTR(gw_sel_class, S_IRUGO | S_IWUSR, batadv_show_gw_sel_class, + batadv_store_gw_sel_class); static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth, batadv_store_gw_bwidth); #ifdef CONFIG_BATMAN_ADV_MCAST @@ -712,6 +771,8 @@ rem_attr: for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr)); + kobject_uevent(bat_priv->mesh_obj, KOBJ_REMOVE); + kobject_del(bat_priv->mesh_obj); kobject_put(bat_priv->mesh_obj); bat_priv->mesh_obj = NULL; out: @@ -726,6 +787,8 @@ void batadv_sysfs_del_meshif(struct net_device *dev) for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr)); + kobject_uevent(bat_priv->mesh_obj, KOBJ_REMOVE); + kobject_del(bat_priv->mesh_obj); kobject_put(bat_priv->mesh_obj); bat_priv->mesh_obj = NULL; } @@ -781,6 +844,10 @@ rem_attr: for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr)); + if (vlan->kobj != bat_priv->mesh_obj) { + kobject_uevent(vlan->kobj, KOBJ_REMOVE); + kobject_del(vlan->kobj); + } kobject_put(vlan->kobj); vlan->kobj = NULL; out: @@ -800,6 +867,10 @@ void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv, for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr)); + if (vlan->kobj != bat_priv->mesh_obj) { + kobject_uevent(vlan->kobj, KOBJ_REMOVE); + kobject_del(vlan->kobj); + } kobject_put(vlan->kobj); vlan->kobj = NULL; } @@ -828,31 +899,31 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj, return length; } -static ssize_t batadv_store_mesh_iface(struct kobject *kobj, - struct attribute *attr, char *buff, - size_t count) +/** + * batadv_store_mesh_iface_finish - store new hardif mesh_iface state + * @net_dev: netdevice to add/remove to/from batman-adv soft-interface + * @ifname: name of soft-interface to modify + * + * Changes the parts of the hard+soft interface which can not be modified under + * sysfs lock (to prevent deadlock situations). + * + * Return: 0 on success, 0 < on failure + */ +static int batadv_store_mesh_iface_finish(struct net_device *net_dev, + char ifname[IFNAMSIZ]) { - struct net_device *net_dev = batadv_kobj_to_netdev(kobj); struct net *net = dev_net(net_dev); struct batadv_hard_iface *hard_iface; - int status_tmp = -1; - int ret = count; + int status_tmp; + int ret = 0; + + ASSERT_RTNL(); hard_iface = batadv_hardif_get_by_netdev(net_dev); if (!hard_iface) - return count; - - if (buff[count - 1] == '\n') - buff[count - 1] = '\0'; - - if (strlen(buff) >= IFNAMSIZ) { - pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n", - buff); - batadv_hardif_put(hard_iface); - return -EINVAL; - } + return 0; - if (strncmp(buff, "none", 4) == 0) + if (strncmp(ifname, "none", 4) == 0) status_tmp = BATADV_IF_NOT_IN_USE; else status_tmp = BATADV_IF_I_WANT_YOU; @@ -861,15 +932,13 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj, goto out; if ((hard_iface->soft_iface) && - (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) + (strncmp(hard_iface->soft_iface->name, ifname, IFNAMSIZ) == 0)) goto out; - rtnl_lock(); - if (status_tmp == BATADV_IF_NOT_IN_USE) { batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_AUTO); - goto unlock; + goto out; } /* if the interface already is in use */ @@ -877,15 +946,71 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj, batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_AUTO); - ret = batadv_hardif_enable_interface(hard_iface, net, buff); - -unlock: - rtnl_unlock(); + ret = batadv_hardif_enable_interface(hard_iface, net, ifname); out: batadv_hardif_put(hard_iface); return ret; } +/** + * batadv_store_mesh_iface_work - store new hardif mesh_iface state + * @work: work queue item + * + * Changes the parts of the hard+soft interface which can not be modified under + * sysfs lock (to prevent deadlock situations). + */ +static void batadv_store_mesh_iface_work(struct work_struct *work) +{ + struct batadv_store_mesh_work *store_work; + int ret; + + store_work = container_of(work, struct batadv_store_mesh_work, work); + + rtnl_lock(); + ret = batadv_store_mesh_iface_finish(store_work->net_dev, + store_work->soft_iface_name); + rtnl_unlock(); + + if (ret < 0) + pr_err("Failed to store new mesh_iface state %s for %s: %d\n", + store_work->soft_iface_name, store_work->net_dev->name, + ret); + + dev_put(store_work->net_dev); + kfree(store_work); +} + +static ssize_t batadv_store_mesh_iface(struct kobject *kobj, + struct attribute *attr, char *buff, + size_t count) +{ + struct net_device *net_dev = batadv_kobj_to_netdev(kobj); + struct batadv_store_mesh_work *store_work; + + if (buff[count - 1] == '\n') + buff[count - 1] = '\0'; + + if (strlen(buff) >= IFNAMSIZ) { + pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n", + buff); + return -EINVAL; + } + + store_work = kmalloc(sizeof(*store_work), GFP_KERNEL); + if (!store_work) + return -ENOMEM; + + dev_hold(net_dev); + INIT_WORK(&store_work->work, batadv_store_mesh_iface_work); + store_work->net_dev = net_dev; + strlcpy(store_work->soft_iface_name, buff, + sizeof(store_work->soft_iface_name)); + + queue_work(batadv_event_workqueue, &store_work->work); + + return count; +} + static ssize_t batadv_show_iface_status(struct kobject *kobj, struct attribute *attr, char *buff) { @@ -1048,6 +1173,8 @@ out: void batadv_sysfs_del_hardif(struct kobject **hardif_obj) { + kobject_uevent(*hardif_obj, KOBJ_REMOVE); + kobject_del(*hardif_obj); kobject_put(*hardif_obj); *hardif_obj = NULL; } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 7e6df7a4964a..7f663092f6de 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -22,12 +22,14 @@ #include <linux/bitops.h> #include <linux/bug.h> #include <linux/byteorder/generic.h> +#include <linux/cache.h> #include <linux/compiler.h> #include <linux/crc32c.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/fs.h> #include <linux/if_ether.h> +#include <linux/init.h> #include <linux/jhash.h> #include <linux/jiffies.h> #include <linux/kernel.h> @@ -35,25 +37,39 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> +#include <linux/netlink.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/seq_file.h> +#include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/workqueue.h> +#include <net/genetlink.h> +#include <net/netlink.h> +#include <net/sock.h> +#include <uapi/linux/batman_adv.h> #include "bridge_loop_avoidance.h" #include "hard-interface.h" #include "hash.h" #include "log.h" #include "multicast.h" +#include "netlink.h" #include "originator.h" #include "packet.h" #include "soft-interface.h" #include "tvlv.h" +static struct kmem_cache *batadv_tl_cache __read_mostly; +static struct kmem_cache *batadv_tg_cache __read_mostly; +static struct kmem_cache *batadv_tt_orig_cache __read_mostly; +static struct kmem_cache *batadv_tt_change_cache __read_mostly; +static struct kmem_cache *batadv_tt_req_cache __read_mostly; +static struct kmem_cache *batadv_tt_roam_cache __read_mostly; + /* hash class keys */ static struct lock_class_key batadv_tt_local_hash_lock_class_key; static struct lock_class_key batadv_tt_global_hash_lock_class_key; @@ -205,6 +221,20 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr, } /** + * batadv_tt_local_entry_free_rcu - free the tt_local_entry + * @rcu: rcu pointer of the tt_local_entry + */ +static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu) +{ + struct batadv_tt_local_entry *tt_local_entry; + + tt_local_entry = container_of(rcu, struct batadv_tt_local_entry, + common.rcu); + + kmem_cache_free(batadv_tl_cache, tt_local_entry); +} + +/** * batadv_tt_local_entry_release - release tt_local_entry from lists and queue * for free after rcu grace period * @ref: kref pointer of the nc_node @@ -218,7 +248,7 @@ static void batadv_tt_local_entry_release(struct kref *ref) batadv_softif_vlan_put(tt_local_entry->vlan); - kfree_rcu(tt_local_entry, common.rcu); + call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu); } /** @@ -234,6 +264,20 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry) } /** + * batadv_tt_global_entry_free_rcu - free the tt_global_entry + * @rcu: rcu pointer of the tt_global_entry + */ +static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu) +{ + struct batadv_tt_global_entry *tt_global_entry; + + tt_global_entry = container_of(rcu, struct batadv_tt_global_entry, + common.rcu); + + kmem_cache_free(batadv_tg_cache, tt_global_entry); +} + +/** * batadv_tt_global_entry_release - release tt_global_entry from lists and queue * for free after rcu grace period * @ref: kref pointer of the nc_node @@ -246,7 +290,8 @@ static void batadv_tt_global_entry_release(struct kref *ref) common.refcount); batadv_tt_global_del_orig_list(tt_global_entry); - kfree_rcu(tt_global_entry, common.rcu); + + call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu); } /** @@ -384,6 +429,19 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node, } /** + * batadv_tt_orig_list_entry_free_rcu - free the orig_entry + * @rcu: rcu pointer of the orig_entry + */ +static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu) +{ + struct batadv_tt_orig_list_entry *orig_entry; + + orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); + + kmem_cache_free(batadv_tt_orig_cache, orig_entry); +} + +/** * batadv_tt_orig_list_entry_release - release tt orig entry from lists and * queue for free after rcu grace period * @ref: kref pointer of the tt orig entry @@ -396,7 +454,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref) refcount); batadv_orig_node_put(orig_entry->orig_node); - kfree_rcu(orig_entry, rcu); + call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu); } /** @@ -426,7 +484,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, bool event_removed = false; bool del_op_requested, del_op_entry; - tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); + tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC); if (!tt_change_node) return; @@ -467,8 +525,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, continue; del: list_del(&entry->list); - kfree(entry); - kfree(tt_change_node); + kmem_cache_free(batadv_tt_change_cache, entry); + kmem_cache_free(batadv_tt_change_cache, tt_change_node); event_removed = true; goto unlock; } @@ -646,7 +704,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, goto out; } - tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC); + tt_local = kmem_cache_alloc(batadv_tl_cache, GFP_ATOMIC); if (!tt_local) goto out; @@ -656,7 +714,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, net_ratelimited_function(batadv_info, soft_iface, "adding TT local entry %pM to non-existent VLAN %d\n", addr, BATADV_PRINT_VID(vid)); - kfree(tt_local); + kmem_cache_free(batadv_tl_cache, tt_local); tt_local = NULL; goto out; } @@ -676,7 +734,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, if (batadv_is_wifi_netdev(in_dev)) tt_local->common.flags |= BATADV_TT_CLIENT_WIFI; kref_init(&tt_local->common.refcount); - kref_get(&tt_local->common.refcount); tt_local->last_seen = jiffies; tt_local->common.added_at = tt_local->last_seen; tt_local->vlan = vlan; @@ -688,6 +745,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, is_multicast_ether_addr(addr)) tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE; + kref_get(&tt_local->common.refcount); hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, batadv_choose_tt, &tt_local->common, &tt_local->common.hash_entry); @@ -959,7 +1017,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) tt_diff_entries_count++; } list_del(&entry->list); - kfree(entry); + kmem_cache_free(batadv_tt_change_cache, entry); } spin_unlock_bh(&bat_priv->tt.changes_list_lock); @@ -989,6 +1047,7 @@ container_register: kfree(tt_data); } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; @@ -1056,6 +1115,165 @@ out: batadv_hardif_put(primary_if); return 0; } +#endif + +/** + * batadv_tt_local_dump_entry - Dump one TT local entry into a message + * @msg :Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @common: tt local & tt global common data + * + * Return: Error code, or 0 on success + */ +static int +batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_tt_common_entry *common) +{ + void *hdr; + struct batadv_softif_vlan *vlan; + struct batadv_tt_local_entry *local; + unsigned int last_seen_msecs; + u32 crc; + + local = container_of(common, struct batadv_tt_local_entry, common); + last_seen_msecs = jiffies_to_msecs(jiffies - local->last_seen); + + vlan = batadv_softif_vlan_get(bat_priv, common->vid); + if (!vlan) + return 0; + + crc = vlan->tt.crc; + + batadv_softif_vlan_put(vlan); + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, + BATADV_CMD_GET_TRANSTABLE_LOCAL); + if (!hdr) + return -ENOBUFS; + + if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) || + nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || + nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || + nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) + goto nla_put_failure; + + if (!(common->flags & BATADV_TT_CLIENT_NOPURGE) && + nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +/** + * batadv_tt_local_dump_bucket - Dump one TT local bucket into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @head: Pointer to the list containing the local tt entries + * @idx_s: Number of entries to skip + * + * Return: Error code, or 0 on success + */ +static int +batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct hlist_head *head, int *idx_s) +{ + struct batadv_tt_common_entry *common; + int idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(common, head, hash_entry) { + if (idx++ < *idx_s) + continue; + + if (batadv_tt_local_dump_entry(msg, portid, seq, bat_priv, + common)) { + rcu_read_unlock(); + *idx_s = idx - 1; + return -EMSGSIZE; + } + } + rcu_read_unlock(); + + *idx_s = 0; + return 0; +} + +/** + * batadv_tt_local_dump - Dump TT local entries into a message + * @msg: Netlink message to dump into + * @cb: Parameters from query + * + * Return: Error code, or 0 on success + */ +int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct batadv_priv *bat_priv; + struct batadv_hard_iface *primary_if = NULL; + struct batadv_hashtable *hash; + struct hlist_head *head; + int ret; + int ifindex; + int bucket = cb->args[0]; + int idx = cb->args[1]; + int portid = NETLINK_CB(cb->skb).portid; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { + ret = -ENODEV; + goto out; + } + + bat_priv = netdev_priv(soft_iface); + + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out; + } + + hash = bat_priv->tt.local_hash; + + while (bucket < hash->size) { + head = &hash->table[bucket]; + + if (batadv_tt_local_dump_bucket(msg, portid, cb->nlh->nlmsg_seq, + bat_priv, head, &idx)) + break; + + bucket++; + } + + ret = msg->len; + + out: + if (primary_if) + batadv_hardif_put(primary_if); + if (soft_iface) + dev_put(soft_iface); + + cb->args[0] = bucket; + cb->args[1] = idx; + + return ret; +} static void batadv_tt_local_set_pending(struct batadv_priv *bat_priv, @@ -1259,7 +1477,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv) list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, list) { list_del(&entry->list); - kfree(entry); + kmem_cache_free(batadv_tt_change_cache, entry); } atomic_set(&bat_priv->tt.local_changes, 0); @@ -1341,7 +1559,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, goto out; } - orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); + orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); if (!orig_entry) goto out; @@ -1351,9 +1569,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, orig_entry->orig_node = orig_node; orig_entry->ttvn = ttvn; kref_init(&orig_entry->refcount); - kref_get(&orig_entry->refcount); spin_lock_bh(&tt_global->list_lock); + kref_get(&orig_entry->refcount); hlist_add_head_rcu(&orig_entry->list, &tt_global->orig_list); spin_unlock_bh(&tt_global->list_lock); @@ -1411,7 +1629,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, goto out; if (!tt_global_entry) { - tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC); + tt_global_entry = kmem_cache_zalloc(batadv_tg_cache, + GFP_ATOMIC); if (!tt_global_entry) goto out; @@ -1428,13 +1647,13 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, if (flags & BATADV_TT_CLIENT_ROAM) tt_global_entry->roam_at = jiffies; kref_init(&common->refcount); - kref_get(&common->refcount); common->added_at = jiffies; INIT_HLIST_HEAD(&tt_global_entry->orig_list); atomic_set(&tt_global_entry->orig_list_count, 0); spin_lock_init(&tt_global_entry->list_lock); + kref_get(&common->refcount); hash_added = batadv_hash_add(bat_priv->tt.global_hash, batadv_compare_tt, batadv_choose_tt, common, @@ -1579,6 +1798,7 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv, return best_entry; } +#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** * batadv_tt_global_print_entry - print all orig nodes who announce the address * for this global entry @@ -1702,6 +1922,219 @@ out: batadv_hardif_put(primary_if); return 0; } +#endif + +/** + * batadv_tt_global_dump_subentry - Dump all TT local entries into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @common: tt local & tt global common data + * @orig: Originator node announcing a non-mesh client + * @best: Is the best originator for the TT entry + * + * Return: Error code, or 0 on success + */ +static int +batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_tt_common_entry *common, + struct batadv_tt_orig_list_entry *orig, + bool best) +{ + void *hdr; + struct batadv_orig_node_vlan *vlan; + u8 last_ttvn; + u32 crc; + + vlan = batadv_orig_node_vlan_get(orig->orig_node, + common->vid); + if (!vlan) + return 0; + + crc = vlan->tt.crc; + + batadv_orig_node_vlan_put(vlan); + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, + BATADV_CMD_GET_TRANSTABLE_GLOBAL); + if (!hdr) + return -ENOBUFS; + + last_ttvn = atomic_read(&orig->orig_node->last_ttvn); + + if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) || + nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, + orig->orig_node->orig) || + nla_put_u8(msg, BATADV_ATTR_TT_TTVN, orig->ttvn) || + nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || + nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || + nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || + nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) + goto nla_put_failure; + + if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +/** + * batadv_tt_global_dump_entry - Dump one TT global entry into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @common: tt local & tt global common data + * @sub_s: Number of entries to skip + * + * This function assumes the caller holds rcu_read_lock(). + * + * Return: Error code, or 0 on success + */ +static int +batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct batadv_tt_common_entry *common, int *sub_s) +{ + struct batadv_tt_orig_list_entry *orig_entry, *best_entry; + struct batadv_tt_global_entry *global; + struct hlist_head *head; + int sub = 0; + bool best; + + global = container_of(common, struct batadv_tt_global_entry, common); + best_entry = batadv_transtable_best_orig(bat_priv, global); + head = &global->orig_list; + + hlist_for_each_entry_rcu(orig_entry, head, list) { + if (sub++ < *sub_s) + continue; + + best = (orig_entry == best_entry); + + if (batadv_tt_global_dump_subentry(msg, portid, seq, common, + orig_entry, best)) { + *sub_s = sub - 1; + return -EMSGSIZE; + } + } + + *sub_s = 0; + return 0; +} + +/** + * batadv_tt_global_dump_bucket - Dump one TT local bucket into a message + * @msg: Netlink message to dump into + * @portid: Port making netlink request + * @seq: Sequence number of netlink message + * @bat_priv: The bat priv with all the soft interface information + * @head: Pointer to the list containing the global tt entries + * @idx_s: Number of entries to skip + * @sub: Number of entries to skip + * + * Return: Error code, or 0 on success + */ +static int +batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, + struct hlist_head *head, int *idx_s, int *sub) +{ + struct batadv_tt_common_entry *common; + int idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(common, head, hash_entry) { + if (idx++ < *idx_s) + continue; + + if (batadv_tt_global_dump_entry(msg, portid, seq, bat_priv, + common, sub)) { + rcu_read_unlock(); + *idx_s = idx - 1; + return -EMSGSIZE; + } + } + rcu_read_unlock(); + + *idx_s = 0; + *sub = 0; + return 0; +} + +/** + * batadv_tt_global_dump - Dump TT global entries into a message + * @msg: Netlink message to dump into + * @cb: Parameters from query + * + * Return: Error code, or length of message on success + */ +int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct batadv_priv *bat_priv; + struct batadv_hard_iface *primary_if = NULL; + struct batadv_hashtable *hash; + struct hlist_head *head; + int ret; + int ifindex; + int bucket = cb->args[0]; + int idx = cb->args[1]; + int sub = cb->args[2]; + int portid = NETLINK_CB(cb->skb).portid; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { + ret = -ENODEV; + goto out; + } + + bat_priv = netdev_priv(soft_iface); + + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out; + } + + hash = bat_priv->tt.global_hash; + + while (bucket < hash->size) { + head = &hash->table[bucket]; + + if (batadv_tt_global_dump_bucket(msg, portid, + cb->nlh->nlmsg_seq, bat_priv, + head, &idx, &sub)) + break; + + bucket++; + } + + ret = msg->len; + + out: + if (primary_if) + batadv_hardif_put(primary_if); + if (soft_iface) + dev_put(soft_iface); + + cb->args[0] = bucket; + cb->args[1] = idx; + cb->args[2] = sub; + + return ret; +} /** * _batadv_tt_global_del_orig_entry - remove and free an orig_entry @@ -2280,7 +2713,7 @@ static void batadv_tt_req_node_release(struct kref *ref) tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount); - kfree(tt_req_node); + kmem_cache_free(batadv_tt_req_cache, tt_req_node); } /** @@ -2367,7 +2800,7 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv, goto unlock; } - tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC); + tt_req_node = kmem_cache_alloc(batadv_tt_req_cache, GFP_ATOMIC); if (!tt_req_node) goto unlock; @@ -3104,7 +3537,7 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv) list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) { list_del(&node->list); - kfree(node); + kmem_cache_free(batadv_tt_roam_cache, node); } spin_unlock_bh(&bat_priv->tt.roam_list_lock); @@ -3121,7 +3554,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) continue; list_del(&node->list); - kfree(node); + kmem_cache_free(batadv_tt_roam_cache, node); } spin_unlock_bh(&bat_priv->tt.roam_list_lock); } @@ -3162,7 +3595,8 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client) } if (!ret) { - tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC); + tt_roam_node = kmem_cache_alloc(batadv_tt_roam_cache, + GFP_ATOMIC); if (!tt_roam_node) goto unlock; @@ -3865,3 +4299,85 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv, return ret; } + +/** + * batadv_tt_cache_init - Initialize tt memory object cache + * + * Return: 0 on success or negative error number in case of failure. + */ +int __init batadv_tt_cache_init(void) +{ + size_t tl_size = sizeof(struct batadv_tt_local_entry); + size_t tg_size = sizeof(struct batadv_tt_global_entry); + size_t tt_orig_size = sizeof(struct batadv_tt_orig_list_entry); + size_t tt_change_size = sizeof(struct batadv_tt_change_node); + size_t tt_req_size = sizeof(struct batadv_tt_req_node); + size_t tt_roam_size = sizeof(struct batadv_tt_roam_node); + + batadv_tl_cache = kmem_cache_create("batadv_tl_cache", tl_size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!batadv_tl_cache) + return -ENOMEM; + + batadv_tg_cache = kmem_cache_create("batadv_tg_cache", tg_size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!batadv_tg_cache) + goto err_tt_tl_destroy; + + batadv_tt_orig_cache = kmem_cache_create("batadv_tt_orig_cache", + tt_orig_size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!batadv_tt_orig_cache) + goto err_tt_tg_destroy; + + batadv_tt_change_cache = kmem_cache_create("batadv_tt_change_cache", + tt_change_size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!batadv_tt_change_cache) + goto err_tt_orig_destroy; + + batadv_tt_req_cache = kmem_cache_create("batadv_tt_req_cache", + tt_req_size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!batadv_tt_req_cache) + goto err_tt_change_destroy; + + batadv_tt_roam_cache = kmem_cache_create("batadv_tt_roam_cache", + tt_roam_size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!batadv_tt_roam_cache) + goto err_tt_req_destroy; + + return 0; + +err_tt_req_destroy: + kmem_cache_destroy(batadv_tt_req_cache); + batadv_tt_req_cache = NULL; +err_tt_change_destroy: + kmem_cache_destroy(batadv_tt_change_cache); + batadv_tt_change_cache = NULL; +err_tt_orig_destroy: + kmem_cache_destroy(batadv_tt_orig_cache); + batadv_tt_orig_cache = NULL; +err_tt_tg_destroy: + kmem_cache_destroy(batadv_tg_cache); + batadv_tg_cache = NULL; +err_tt_tl_destroy: + kmem_cache_destroy(batadv_tl_cache); + batadv_tl_cache = NULL; + + return -ENOMEM; +} + +/** + * batadv_tt_cache_destroy - Destroy tt memory object cache + */ +void batadv_tt_cache_destroy(void) +{ + kmem_cache_destroy(batadv_tl_cache); + kmem_cache_destroy(batadv_tg_cache); + kmem_cache_destroy(batadv_tt_orig_cache); + kmem_cache_destroy(batadv_tt_change_cache); + kmem_cache_destroy(batadv_tt_req_cache); + kmem_cache_destroy(batadv_tt_roam_cache); +} diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index 7c7e2c006bfe..783fdba84db2 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -22,8 +22,10 @@ #include <linux/types.h> +struct netlink_callback; struct net_device; struct seq_file; +struct sk_buff; int batadv_tt_init(struct batadv_priv *bat_priv); bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, @@ -33,6 +35,8 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const char *message, bool roaming); int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset); int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset); +int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb); +int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb); void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, s32 match_vid, const char *message); @@ -59,4 +63,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid); +int batadv_tt_cache_init(void); +void batadv_tt_cache_destroy(void); + #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index 3d1cf0fb112d..77654f055f24 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -257,8 +257,13 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv, spin_lock_bh(&bat_priv->tvlv.container_list_lock); tvlv_old = batadv_tvlv_container_get(bat_priv, type, version); batadv_tvlv_container_remove(bat_priv, tvlv_old); + + kref_get(&tvlv_new->refcount); hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list); spin_unlock_bh(&bat_priv->tvlv.container_list_lock); + + /* don't return reference to new tvlv_container */ + batadv_tvlv_container_put(tvlv_new); } /** @@ -542,8 +547,12 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, INIT_HLIST_NODE(&tvlv_handler->list); spin_lock_bh(&bat_priv->tvlv.handler_list_lock); + kref_get(&tvlv_handler->refcount); hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list); spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); + + /* don't return reference to new tvlv_handler */ + batadv_tvlv_handler_put(tvlv_handler); } /** diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index a64522c3b45d..b3dd1a381aad 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -28,6 +28,7 @@ #include <linux/if_ether.h> #include <linux/kref.h> #include <linux/netdevice.h> +#include <linux/netlink.h> #include <linux/sched.h> /* for linux/wait.h */ #include <linux/spinlock.h> #include <linux/types.h> @@ -132,7 +133,6 @@ struct batadv_hard_iface_bat_v { * @rcu: struct used for freeing in an RCU-safe manner * @bat_iv: per hard-interface B.A.T.M.A.N. IV data * @bat_v: per hard-interface B.A.T.M.A.N. V data - * @cleanup_work: work queue callback item for hard-interface deinit * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs * @neigh_list: list of unique single hop neighbors via this interface * @neigh_list_lock: lock protecting neigh_list @@ -152,7 +152,6 @@ struct batadv_hard_iface { #ifdef CONFIG_BATMAN_ADV_BATMAN_V struct batadv_hard_iface_bat_v bat_v; #endif - struct work_struct cleanup_work; struct dentry *debug_dir; struct hlist_head neigh_list; /* neigh_list_lock protects: neigh_list */ @@ -1015,7 +1014,6 @@ struct batadv_priv_bat_v { * @forw_bcast_list_lock: lock protecting forw_bcast_list * @tp_list_lock: spinlock protecting @tp_list * @orig_work: work queue callback item for orig node purging - * @cleanup_work: work queue callback item for soft-interface deinit * @primary_if: one of the hard-interfaces assigned to this mesh interface * becomes the primary interface * @algo_ops: routing algorithm used by this mesh interface @@ -1074,7 +1072,6 @@ struct batadv_priv { spinlock_t tp_list_lock; /* protects tp_list */ atomic_t tp_num; struct delayed_work orig_work; - struct work_struct cleanup_work; struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ struct batadv_algo_ops *algo_ops; struct hlist_head softif_vlan_list; @@ -1379,6 +1376,7 @@ struct batadv_skb_cb { * locally generated packet * @if_outgoing: packet where the packet should be sent to, or NULL if * unspecified + * @queue_left: The queue (counter) this packet was applied to */ struct batadv_forw_packet { struct hlist_node list; @@ -1391,11 +1389,13 @@ struct batadv_forw_packet { struct delayed_work delayed_work; struct batadv_hard_iface *if_incoming; struct batadv_hard_iface *if_outgoing; + atomic_t *queue_left; }; /** * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific) * @activate: start routing mechanisms when hard-interface is brought up + * (optional) * @enable: init routing info when hard-interface is enabled * @disable: de-init routing info when hard-interface is disabled * @update_mac: (re-)init mac addresses of the protocol information @@ -1413,11 +1413,13 @@ struct batadv_algo_iface_ops { /** * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific) * @hardif_init: called on creation of single hop entry + * (optional) * @cmp: compare the metrics of two neighbors for their respective outgoing * interfaces * @is_similar_or_better: check if neigh1 is equally similar or better than * neigh2 for their respective outgoing interface from the metric prospective * @print: print the single hop neighbor list (optional) + * @dump: dump neighbors to a netlink socket (optional) */ struct batadv_algo_neigh_ops { void (*hardif_init)(struct batadv_hardif_neigh_node *neigh); @@ -1429,26 +1431,64 @@ struct batadv_algo_neigh_ops { struct batadv_hard_iface *if_outgoing1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2); +#ifdef CONFIG_BATMAN_ADV_DEBUGFS void (*print)(struct batadv_priv *priv, struct seq_file *seq); +#endif + void (*dump)(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *priv, + struct batadv_hard_iface *hard_iface); }; /** * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific) * @free: free the resources allocated by the routing algorithm for an orig_node - * object + * object (optional) * @add_if: ask the routing algorithm to apply the needed changes to the - * orig_node due to a new hard-interface being added into the mesh + * orig_node due to a new hard-interface being added into the mesh (optional) * @del_if: ask the routing algorithm to apply the needed changes to the - * orig_node due to an hard-interface being removed from the mesh + * orig_node due to an hard-interface being removed from the mesh (optional) * @print: print the originator table (optional) + * @dump: dump originators to a netlink socket (optional) */ struct batadv_algo_orig_ops { void (*free)(struct batadv_orig_node *orig_node); int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, int del_if_num); +#ifdef CONFIG_BATMAN_ADV_DEBUGFS void (*print)(struct batadv_priv *priv, struct seq_file *seq, struct batadv_hard_iface *hard_iface); +#endif + void (*dump)(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *priv, + struct batadv_hard_iface *hard_iface); +}; + +/** + * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) + * @store_sel_class: parse and stores a new GW selection class (optional) + * @show_sel_class: prints the current GW selection class (optional) + * @get_best_gw_node: select the best GW from the list of available nodes + * (optional) + * @is_eligible: check if a newly discovered GW is a potential candidate for + * the election as best GW (optional) + * @print: print the gateway table (optional) + * @dump: dump gateways to a netlink socket (optional) + */ +struct batadv_algo_gw_ops { + ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, + size_t count); + ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff); + struct batadv_gw_node *(*get_best_gw_node) + (struct batadv_priv *bat_priv); + bool (*is_eligible)(struct batadv_priv *bat_priv, + struct batadv_orig_node *curr_gw_orig, + struct batadv_orig_node *orig_node); +#ifdef CONFIG_BATMAN_ADV_DEBUGFS + void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq); +#endif + void (*dump)(struct sk_buff *msg, struct netlink_callback *cb, + struct batadv_priv *priv); }; /** @@ -1458,6 +1498,7 @@ struct batadv_algo_orig_ops { * @iface: callbacks related to interface handling * @neigh: callbacks related to neighbors handling * @orig: callbacks related to originators handling + * @gw: callbacks related to GW mode */ struct batadv_algo_ops { struct hlist_node list; @@ -1465,6 +1506,7 @@ struct batadv_algo_ops { struct batadv_algo_iface_ops iface; struct batadv_algo_neigh_ops neigh; struct batadv_algo_orig_ops orig; + struct batadv_algo_gw_ops gw; }; /** @@ -1564,4 +1606,17 @@ enum batadv_tvlv_handler_flags { BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2), }; +/** + * struct batadv_store_mesh_work - Work queue item to detach add/del interface + * from sysfs locks + * @net_dev: netdevice to add/remove to/from batman-adv soft-interface + * @soft_iface_name: name of soft-interface to modify + * @work: work queue item + */ +struct batadv_store_mesh_work { + struct net_device *net_dev; + char soft_iface_name[IFNAMSIZ]; + struct work_struct work; +}; + #endif /* _NET_BATMAN_ADV_TYPES_H_ */ diff --git a/net/bridge/Makefile b/net/bridge/Makefile index a1cda5d4718d..0aefc011b668 100644 --- a/net/bridge/Makefile +++ b/net/bridge/Makefile @@ -20,4 +20,6 @@ bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o +bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o + obj-$(CONFIG_NETFILTER) += netfilter/ diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 63a83d8d7da3..32a02de39cd2 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -29,7 +29,8 @@ static inline int should_deliver(const struct net_bridge_port *p, vg = nbp_vlan_group_rcu(p); return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && - br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING; + br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING && + nbp_switchdev_allowed_egress(p, skb); } int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f2fede05d32c..1da3221845f1 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -545,6 +545,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (err) goto err5; + err = nbp_switchdev_mark_set(p); + if (err) + goto err6; + dev_disable_lro(dev); list_add_rcu(&p->list, &br->port_list); @@ -566,7 +570,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) err = nbp_vlan_init(p); if (err) { netdev_err(dev, "failed to initialize vlan filtering on this port\n"); - goto err6; + goto err7; } spin_lock_bh(&br->lock); @@ -589,12 +593,12 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) return 0; -err6: +err7: list_del_rcu(&p->list); br_fdb_delete_by_port(br, p, 0, 1); nbp_update_port_count(br); +err6: netdev_upper_dev_unlink(dev, br->dev); - err5: dev->priv_flags &= ~IFF_BRIDGE_PORT; netdev_rx_handler_unregister(dev); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 8e486203d133..3132cfc80e9d 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -145,6 +145,8 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid)) goto out; + nbp_switchdev_frame_mark(p, skb); + /* insert into forwarding database after filtering to avoid spoofing */ br = p->br; if (p->flags & BR_LEARNING) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index f2a29e467e78..190a5bc00f4a 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1245,14 +1245,30 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) return 0; } -static size_t bridge_get_linkxstats_size(const struct net_device *dev) +static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) { - struct net_bridge *br = netdev_priv(dev); + struct net_bridge_port *p = NULL; struct net_bridge_vlan_group *vg; struct net_bridge_vlan *v; + struct net_bridge *br; int numvls = 0; - vg = br_vlan_group(br); + switch (attr) { + case IFLA_STATS_LINK_XSTATS: + br = netdev_priv(dev); + vg = br_vlan_group(br); + break; + case IFLA_STATS_LINK_XSTATS_SLAVE: + p = br_port_get_rtnl(dev); + if (!p) + return 0; + br = p->br; + vg = nbp_vlan_group(p); + break; + default: + return 0; + } + if (vg) { /* we need to count all, even placeholder entries */ list_for_each_entry(v, &vg->vlan_list, vlist) @@ -1264,45 +1280,42 @@ static size_t bridge_get_linkxstats_size(const struct net_device *dev) nla_total_size(0); } -static size_t brport_get_linkxstats_size(const struct net_device *dev) -{ - return nla_total_size(sizeof(struct br_mcast_stats)) + - nla_total_size(0); -} - -static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) +static int br_fill_linkxstats(struct sk_buff *skb, + const struct net_device *dev, + int *prividx, int attr) { - size_t retsize = 0; + struct nlattr *nla __maybe_unused; + struct net_bridge_port *p = NULL; + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *v; + struct net_bridge *br; + struct nlattr *nest; + int vl_idx = 0; switch (attr) { case IFLA_STATS_LINK_XSTATS: - retsize = bridge_get_linkxstats_size(dev); + br = netdev_priv(dev); + vg = br_vlan_group(br); break; case IFLA_STATS_LINK_XSTATS_SLAVE: - retsize = brport_get_linkxstats_size(dev); + p = br_port_get_rtnl(dev); + if (!p) + return 0; + br = p->br; + vg = nbp_vlan_group(p); break; + default: + return -EINVAL; } - return retsize; -} - -static int bridge_fill_linkxstats(struct sk_buff *skb, - const struct net_device *dev, - int *prividx) -{ - struct net_bridge *br = netdev_priv(dev); - struct nlattr *nla __maybe_unused; - struct net_bridge_vlan_group *vg; - struct net_bridge_vlan *v; - struct nlattr *nest; - int vl_idx = 0; - nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); if (!nest) return -EMSGSIZE; - vg = br_vlan_group(br); if (vg) { + u16 pvid; + + pvid = br_get_pvid(vg); list_for_each_entry(v, &vg->vlan_list, vlist) { struct bridge_vlan_xstats vxi; struct br_vlan_stats stats; @@ -1311,6 +1324,9 @@ static int bridge_fill_linkxstats(struct sk_buff *skb, continue; memset(&vxi, 0, sizeof(vxi)); vxi.vid = v->vid; + vxi.flags = v->flags; + if (v->vid == pvid) + vxi.flags |= BRIDGE_VLAN_INFO_PVID; br_vlan_get_stats(v, &stats); vxi.rx_bytes = stats.rx_bytes; vxi.rx_packets = stats.rx_packets; @@ -1329,7 +1345,7 @@ static int bridge_fill_linkxstats(struct sk_buff *skb, BRIDGE_XSTATS_PAD); if (!nla) goto nla_put_failure; - br_multicast_get_stats(br, NULL, nla_data(nla)); + br_multicast_get_stats(br, p, nla_data(nla)); } #endif nla_nest_end(skb, nest); @@ -1344,52 +1360,6 @@ nla_put_failure: return -EMSGSIZE; } -static int brport_fill_linkxstats(struct sk_buff *skb, - const struct net_device *dev, - int *prividx) -{ - struct net_bridge_port *p = br_port_get_rtnl(dev); - struct nlattr *nla __maybe_unused; - struct nlattr *nest; - - if (!p) - return 0; - - nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); - if (!nest) - return -EMSGSIZE; -#ifdef CONFIG_BRIDGE_IGMP_SNOOPING - nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, - sizeof(struct br_mcast_stats), - BRIDGE_XSTATS_PAD); - if (!nla) { - nla_nest_end(skb, nest); - return -EMSGSIZE; - } - br_multicast_get_stats(p->br, p, nla_data(nla)); -#endif - nla_nest_end(skb, nest); - - return 0; -} - -static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev, - int *prividx, int attr) -{ - int ret = -EINVAL; - - switch (attr) { - case IFLA_STATS_LINK_XSTATS: - ret = bridge_fill_linkxstats(skb, dev, prividx); - break; - case IFLA_STATS_LINK_XSTATS_SLAVE: - ret = brport_fill_linkxstats(skb, dev, prividx); - break; - } - - return ret; -} - static struct rtnl_af_ops br_af_ops __read_mostly = { .family = AF_BRIDGE, .get_link_af_size = br_get_link_af_size_filtered, diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index aac2a6e6b008..2379b2b865c9 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -251,6 +251,9 @@ struct net_bridge_port #ifdef CONFIG_BRIDGE_VLAN_FILTERING struct net_bridge_vlan_group __rcu *vlgrp; #endif +#ifdef CONFIG_NET_SWITCHDEV + int offload_fwd_mark; +#endif }; #define br_auto_port(p) ((p)->flags & BR_AUTO_MASK) @@ -359,6 +362,11 @@ struct net_bridge struct timer_list gc_timer; struct kobject *ifobj; u32 auto_cnt; + +#ifdef CONFIG_NET_SWITCHDEV + int offload_fwd_mark; +#endif + #ifdef CONFIG_BRIDGE_VLAN_FILTERING struct net_bridge_vlan_group __rcu *vlgrp; u8 vlan_enabled; @@ -381,6 +389,10 @@ struct br_input_skb_cb { #ifdef CONFIG_BRIDGE_VLAN_FILTERING bool vlan_filtered; #endif + +#ifdef CONFIG_NET_SWITCHDEV + int offload_fwd_mark; +#endif }; #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) @@ -1034,4 +1046,29 @@ static inline int br_sysfs_addbr(struct net_device *dev) { return 0; } static inline void br_sysfs_delbr(struct net_device *dev) { return; } #endif /* CONFIG_SYSFS */ +/* br_switchdev.c */ +#ifdef CONFIG_NET_SWITCHDEV +int nbp_switchdev_mark_set(struct net_bridge_port *p); +void nbp_switchdev_frame_mark(const struct net_bridge_port *p, + struct sk_buff *skb); +bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p, + const struct sk_buff *skb); +#else +static inline int nbp_switchdev_mark_set(struct net_bridge_port *p) +{ + return 0; +} + +static inline void nbp_switchdev_frame_mark(const struct net_bridge_port *p, + struct sk_buff *skb) +{ +} + +static inline bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p, + const struct sk_buff *skb) +{ + return true; +} +#endif /* CONFIG_NET_SWITCHDEV */ + #endif diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c new file mode 100644 index 000000000000..f4097b900de1 --- /dev/null +++ b/net/bridge/br_switchdev.c @@ -0,0 +1,57 @@ +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> +#include <net/switchdev.h> + +#include "br_private.h" + +static int br_switchdev_mark_get(struct net_bridge *br, struct net_device *dev) +{ + struct net_bridge_port *p; + + /* dev is yet to be added to the port list. */ + list_for_each_entry(p, &br->port_list, list) { + if (switchdev_port_same_parent_id(dev, p->dev)) + return p->offload_fwd_mark; + } + + return ++br->offload_fwd_mark; +} + +int nbp_switchdev_mark_set(struct net_bridge_port *p) +{ + struct switchdev_attr attr = { + .orig_dev = p->dev, + .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, + }; + int err; + + ASSERT_RTNL(); + + err = switchdev_port_attr_get(p->dev, &attr); + if (err) { + if (err == -EOPNOTSUPP) + return 0; + return err; + } + + p->offload_fwd_mark = br_switchdev_mark_get(p->br, p->dev); + + return 0; +} + +void nbp_switchdev_frame_mark(const struct net_bridge_port *p, + struct sk_buff *skb) +{ + if (skb->offload_fwd_mark && !WARN_ON_ONCE(!p->offload_fwd_mark)) + BR_INPUT_SKB_CB(skb)->offload_fwd_mark = p->offload_fwd_mark; +} + +bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p, + const struct sk_buff *skb) +{ + return !skb->offload_fwd_mark || + BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark; +} diff --git a/net/core/dev.c b/net/core/dev.c index dd6ce598de89..1d5c6dda1988 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3355,16 +3355,6 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) else skb_dst_force(skb); -#ifdef CONFIG_NET_SWITCHDEV - /* Don't forward if offload device already forwarded */ - if (skb->offload_fwd_mark && - skb->offload_fwd_mark == dev->offload_fwd_mark) { - consume_skb(skb); - rc = NET_XMIT_SUCCESS; - goto out; - } -#endif - txq = netdev_pick_tx(dev, skb, accel_priv); q = rcu_dereference_bh(txq->qdisc); @@ -4292,15 +4282,25 @@ int netif_receive_skb(struct sk_buff *skb) } EXPORT_SYMBOL(netif_receive_skb); -/* Network device is going away, flush any packets still pending - * Called with irqs disabled. - */ -static void flush_backlog(void *arg) +struct flush_work { + struct net_device *dev; + struct work_struct work; +}; + +DEFINE_PER_CPU(struct flush_work, flush_works); + +/* Network device is going away, flush any packets still pending */ +static void flush_backlog(struct work_struct *work) { - struct net_device *dev = arg; - struct softnet_data *sd = this_cpu_ptr(&softnet_data); + struct flush_work *flush = container_of(work, typeof(*flush), work); + struct net_device *dev = flush->dev; struct sk_buff *skb, *tmp; + struct softnet_data *sd; + local_bh_disable(); + sd = this_cpu_ptr(&softnet_data); + + local_irq_disable(); rps_lock(sd); skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev == dev) { @@ -4310,6 +4310,7 @@ static void flush_backlog(void *arg) } } rps_unlock(sd); + local_irq_enable(); skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev == dev) { @@ -4318,6 +4319,27 @@ static void flush_backlog(void *arg) input_queue_head_incr(sd); } } + local_bh_enable(); +} + +static void flush_all_backlogs(struct net_device *dev) +{ + unsigned int cpu; + + get_online_cpus(); + + for_each_online_cpu(cpu) { + struct flush_work *flush = per_cpu_ptr(&flush_works, cpu); + + INIT_WORK(&flush->work, flush_backlog); + flush->dev = dev; + queue_work_on(cpu, system_highpri_wq, &flush->work); + } + + for_each_online_cpu(cpu) + flush_work(&per_cpu_ptr(&flush_works, cpu)->work); + + put_online_cpus(); } static int napi_gro_complete(struct sk_buff *skb) @@ -4805,8 +4827,9 @@ static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) static int process_backlog(struct napi_struct *napi, int quota) { - int work = 0; struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); + bool again = true; + int work = 0; /* Check if we have pending ipi, its better to send them now, * not waiting net_rx_action() end. @@ -4817,23 +4840,20 @@ static int process_backlog(struct napi_struct *napi, int quota) } napi->weight = weight_p; - local_irq_disable(); - while (1) { + while (again) { struct sk_buff *skb; while ((skb = __skb_dequeue(&sd->process_queue))) { rcu_read_lock(); - local_irq_enable(); __netif_receive_skb(skb); rcu_read_unlock(); - local_irq_disable(); input_queue_head_incr(sd); - if (++work >= quota) { - local_irq_enable(); + if (++work >= quota) return work; - } + } + local_irq_disable(); rps_lock(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { /* @@ -4845,16 +4865,14 @@ static int process_backlog(struct napi_struct *napi, int quota) * and we dont need an smp_mb() memory barrier. */ napi->state = 0; - rps_unlock(sd); - - break; + again = false; + } else { + skb_queue_splice_tail_init(&sd->input_pkt_queue, + &sd->process_queue); } - - skb_queue_splice_tail_init(&sd->input_pkt_queue, - &sd->process_queue); rps_unlock(sd); + local_irq_enable(); } - local_irq_enable(); return work; } @@ -6707,7 +6725,7 @@ static void rollback_registered_many(struct list_head *head) unlist_netdevice(dev); dev->reg_state = NETREG_UNREGISTERING; - on_each_cpu(flush_backlog, dev, 1); + flush_all_backlogs(dev); } synchronize_net(); @@ -7625,6 +7643,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, INIT_LIST_HEAD(&dev->all_adj_list.lower); INIT_LIST_HEAD(&dev->ptype_all); INIT_LIST_HEAD(&dev->ptype_specific); +#ifdef CONFIG_NET_SCHED + hash_init(dev->qdisc_hash); +#endif dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); diff --git a/net/core/filter.c b/net/core/filter.c index cb06aceb512a..a83766be1ad2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1350,14 +1350,18 @@ struct bpf_scratchpad { static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); +static inline int __bpf_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + return skb_ensure_writable(skb, write_len); +} + static inline int bpf_try_make_writable(struct sk_buff *skb, unsigned int write_len) { - int err; + int err = __bpf_try_make_writable(skb, write_len); - err = skb_ensure_writable(skb, write_len); bpf_compute_data_end(skb); - return err; } @@ -1976,8 +1980,8 @@ static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) u32 pkt_type = r2; /* We only allow a restricted subset to be changed for now. */ - if (unlikely(skb->pkt_type > PACKET_OTHERHOST || - pkt_type > PACKET_OTHERHOST)) + if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || + !skb_pkt_type_ok(pkt_type))) return -EINVAL; skb->pkt_type = pkt_type; @@ -1992,6 +1996,92 @@ static const struct bpf_func_proto bpf_skb_change_type_proto = { .arg2_type = ARG_ANYTHING, }; +static u32 __bpf_skb_min_len(const struct sk_buff *skb) +{ + u32 min_len = skb_network_offset(skb); + + if (skb_transport_header_was_set(skb)) + min_len = skb_transport_offset(skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) + min_len = skb_checksum_start_offset(skb) + + skb->csum_offset + sizeof(__sum16); + return min_len; +} + +static u32 __bpf_skb_max_len(const struct sk_buff *skb) +{ + return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : + 65536; +} + +static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) +{ + unsigned int old_len = skb->len; + int ret; + + ret = __skb_grow_rcsum(skb, new_len); + if (!ret) + memset(skb->data + old_len, 0, new_len - old_len); + return ret; +} + +static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) +{ + return __skb_trim_rcsum(skb, new_len); +} + +static u64 bpf_skb_change_tail(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *)(long) r1; + u32 max_len = __bpf_skb_max_len(skb); + u32 min_len = __bpf_skb_min_len(skb); + u32 new_len = (u32) r2; + int ret; + + if (unlikely(flags || new_len > max_len || new_len < min_len)) + return -EINVAL; + if (skb->encapsulation) + return -ENOTSUPP; + + /* The basic idea of this helper is that it's performing the + * needed work to either grow or trim an skb, and eBPF program + * rewrites the rest via helpers like bpf_skb_store_bytes(), + * bpf_lX_csum_replace() and others rather than passing a raw + * buffer here. This one is a slow path helper and intended + * for replies with control messages. + * + * Like in bpf_skb_change_proto(), we want to keep this rather + * minimal and without protocol specifics so that we are able + * to separate concerns as in bpf_skb_store_bytes() should only + * be the one responsible for writing buffers. + * + * It's really expected to be a slow path operation here for + * control message replies, so we're implicitly linearizing, + * uncloning and drop offloads from the skb by this. + */ + ret = __bpf_try_make_writable(skb, skb->len); + if (!ret) { + if (new_len > skb->len) + ret = bpf_skb_grow_rcsum(skb, new_len); + else if (new_len < skb->len) + ret = bpf_skb_trim_rcsum(skb, new_len); + if (!ret && skb_is_gso(skb)) + skb_gso_reset(skb); + } + + bpf_compute_data_end(skb); + return ret; +} + +static const struct bpf_func_proto bpf_skb_change_tail_proto = { + .func = bpf_skb_change_tail, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + bool bpf_helper_changes_skb_data(void *func) { if (func == bpf_skb_vlan_push) @@ -2002,6 +2092,8 @@ bool bpf_helper_changes_skb_data(void *func) return true; if (func == bpf_skb_change_proto) return true; + if (func == bpf_skb_change_tail) + return true; if (func == bpf_l3_csum_replace) return true; if (func == bpf_l4_csum_replace) @@ -2282,7 +2374,6 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) } } -#ifdef CONFIG_SOCK_CGROUP_DATA static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long)r1; @@ -2303,7 +2394,7 @@ static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) if (unlikely(!cgrp)) return -EAGAIN; - return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); + return sk_under_cgroup_hierarchy(sk, cgrp); } static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { @@ -2314,7 +2405,41 @@ static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, }; -#endif + +static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, + unsigned long off, unsigned long len) +{ + memcpy(dst_buff, src_buff + off, len); + return 0; +} + +static u64 bpf_xdp_event_output(u64 r1, u64 r2, u64 flags, u64 r4, + u64 meta_size) +{ + struct xdp_buff *xdp = (struct xdp_buff *)(long) r1; + struct bpf_map *map = (struct bpf_map *)(long) r2; + u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; + void *meta = (void *)(long) r4; + + if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) + return -EINVAL; + if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data))) + return -EFAULT; + + return bpf_event_output(map, flags, meta, meta_size, xdp, xdp_size, + bpf_xdp_copy); +} + +static const struct bpf_func_proto bpf_xdp_event_output_proto = { + .func = bpf_xdp_event_output, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_STACK, + .arg5_type = ARG_CONST_STACK_SIZE, +}; static const struct bpf_func_proto * sk_filter_func_proto(enum bpf_func_id func_id) @@ -2368,6 +2493,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) return &bpf_skb_change_proto_proto; case BPF_FUNC_skb_change_type: return &bpf_skb_change_type_proto; + case BPF_FUNC_skb_change_tail: + return &bpf_skb_change_tail_proto; case BPF_FUNC_skb_get_tunnel_key: return &bpf_skb_get_tunnel_key_proto; case BPF_FUNC_skb_set_tunnel_key: @@ -2386,10 +2513,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) return &bpf_skb_event_output_proto; case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; -#ifdef CONFIG_SOCK_CGROUP_DATA case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; -#endif default: return sk_filter_func_proto(func_id); } @@ -2398,7 +2523,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) static const struct bpf_func_proto * xdp_func_proto(enum bpf_func_id func_id) { - return sk_filter_func_proto(func_id); + switch (func_id) { + case BPF_FUNC_perf_event_output: + return &bpf_xdp_event_output_proto; + default: + return sk_filter_func_proto(func_id); + } } static bool __is_valid_access(int off, int size, enum bpf_access_type type) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 61ad43f61c5e..a2879c0f6c4c 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -6,6 +6,8 @@ #include <linux/if_vlan.h> #include <net/ip.h> #include <net/ipv6.h> +#include <net/gre.h> +#include <net/pptp.h> #include <linux/igmp.h> #include <linux/icmp.h> #include <linux/sctp.h> @@ -116,13 +118,16 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_tags *key_tags; + struct flow_dissector_key_vlan *key_vlan; struct flow_dissector_key_keyid *key_keyid; + bool skip_vlan = false; u8 ip_proto = 0; bool ret = false; if (!data) { data = skb->data; - proto = skb->protocol; + proto = skb_vlan_tag_present(skb) ? + skb->vlan_proto : skb->protocol; nhoff = skb_network_offset(skb); hlen = skb_headlen(skb); } @@ -241,23 +246,45 @@ ipv6: case htons(ETH_P_8021AD): case htons(ETH_P_8021Q): { const struct vlan_hdr *vlan; - struct vlan_hdr _vlan; - vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); - if (!vlan) - goto out_bad; + if (skb_vlan_tag_present(skb)) + proto = skb->protocol; + + if (!skb_vlan_tag_present(skb) || + proto == cpu_to_be16(ETH_P_8021Q) || + proto == cpu_to_be16(ETH_P_8021AD)) { + struct vlan_hdr _vlan; + vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), + data, hlen, &_vlan); + if (!vlan) + goto out_bad; + proto = vlan->h_vlan_encapsulated_proto; + nhoff += sizeof(*vlan); + if (skip_vlan) + goto again; + } + + skip_vlan = true; if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_VLANID)) { - key_tags = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_VLANID, + FLOW_DISSECTOR_KEY_VLAN)) { + key_vlan = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_VLAN, target_container); - key_tags->vlan_id = skb_vlan_tag_get_id(skb); + if (skb_vlan_tag_present(skb)) { + key_vlan->vlan_id = skb_vlan_tag_get_id(skb); + key_vlan->vlan_priority = + (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); + } else { + key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) & + VLAN_VID_MASK; + key_vlan->vlan_priority = + (ntohs(vlan->h_vlan_TCI) & + VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + } } - proto = vlan->h_vlan_encapsulated_proto; - nhoff += sizeof(*vlan); goto again; } case htons(ETH_P_PPP_SES): { @@ -338,32 +365,42 @@ mpls: ip_proto_again: switch (ip_proto) { case IPPROTO_GRE: { - struct gre_hdr { - __be16 flags; - __be16 proto; - } *hdr, _hdr; + struct gre_base_hdr *hdr, _hdr; + u16 gre_ver; + int offset = 0; hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) goto out_bad; - /* - * Only look inside GRE if version zero and no - * routing - */ - if (hdr->flags & (GRE_VERSION | GRE_ROUTING)) + + /* Only look inside GRE without routing */ + if (hdr->flags & GRE_ROUTING) break; - proto = hdr->proto; - nhoff += 4; + /* Only look inside GRE for version 0 and 1 */ + gre_ver = ntohs(hdr->flags & GRE_VERSION); + if (gre_ver > 1) + break; + + proto = hdr->protocol; + if (gre_ver) { + /* Version1 must be PPTP, and check the flags */ + if (!(proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY))) + break; + } + + offset += sizeof(struct gre_base_hdr); + if (hdr->flags & GRE_CSUM) - nhoff += 4; + offset += sizeof(((struct gre_full_hdr *)0)->csum) + + sizeof(((struct gre_full_hdr *)0)->reserved1); + if (hdr->flags & GRE_KEY) { const __be32 *keyid; __be32 _keyid; - keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid), + keyid = __skb_header_pointer(skb, nhoff + offset, sizeof(_keyid), data, hlen, &_keyid); - if (!keyid) goto out_bad; @@ -372,32 +409,65 @@ ip_proto_again: key_keyid = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_GRE_KEYID, target_container); - key_keyid->keyid = *keyid; + if (gre_ver == 0) + key_keyid->keyid = *keyid; + else + key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; } - nhoff += 4; + offset += sizeof(((struct gre_full_hdr *)0)->key); } + if (hdr->flags & GRE_SEQ) - nhoff += 4; - if (proto == htons(ETH_P_TEB)) { - const struct ethhdr *eth; - struct ethhdr _eth; - - eth = __skb_header_pointer(skb, nhoff, - sizeof(_eth), - data, hlen, &_eth); - if (!eth) + offset += sizeof(((struct pptp_gre_header *)0)->seq); + + if (gre_ver == 0) { + if (proto == htons(ETH_P_TEB)) { + const struct ethhdr *eth; + struct ethhdr _eth; + + eth = __skb_header_pointer(skb, nhoff + offset, + sizeof(_eth), + data, hlen, &_eth); + if (!eth) + goto out_bad; + proto = eth->h_proto; + offset += sizeof(*eth); + + /* Cap headers that we access via pointers at the + * end of the Ethernet header as our maximum alignment + * at that point is only 2 bytes. + */ + if (NET_IP_ALIGN) + hlen = (nhoff + offset); + } + } else { /* version 1, must be PPTP */ + u8 _ppp_hdr[PPP_HDRLEN]; + u8 *ppp_hdr; + + if (hdr->flags & GRE_ACK) + offset += sizeof(((struct pptp_gre_header *)0)->ack); + + ppp_hdr = skb_header_pointer(skb, nhoff + offset, + sizeof(_ppp_hdr), _ppp_hdr); + if (!ppp_hdr) goto out_bad; - proto = eth->h_proto; - nhoff += sizeof(*eth); - - /* Cap headers that we access via pointers at the - * end of the Ethernet header as our maximum alignment - * at that point is only 2 bytes. - */ - if (NET_IP_ALIGN) - hlen = nhoff; + + switch (PPP_PROTOCOL(ppp_hdr)) { + case PPP_IP: + proto = htons(ETH_P_IP); + break; + case PPP_IPV6: + proto = htons(ETH_P_IPV6); + break; + default: + /* Could probably catch some more like MPLS */ + break; + } + + offset += PPP_HDRLEN; } + nhoff += offset; key_control->flags |= FLOW_DIS_ENCAPSULATION; if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) goto out_good; @@ -872,8 +942,8 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { .offset = offsetof(struct flow_keys, ports), }, { - .key_id = FLOW_DISSECTOR_KEY_VLANID, - .offset = offsetof(struct flow_keys, tags), + .key_id = FLOW_DISSECTOR_KEY_VLAN, + .offset = offsetof(struct flow_keys, vlan), }, { .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, diff --git a/net/core/neighbour.c b/net/core/neighbour.c index cf26e04c4046..2ae929f9bd06 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1148,7 +1148,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, } else goto out; } else { - if (lladdr == neigh->ha && new == NUD_STALE) + if (lladdr == neigh->ha && new == NUD_STALE && + !(flags & NEIGH_UPDATE_F_ADMIN)) new = old; } } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 2c2eb1b629b1..1fe58167d39a 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -37,6 +37,8 @@ struct net init_net = { }; EXPORT_SYMBOL(init_net); +static bool init_net_initialized; + #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; @@ -750,6 +752,8 @@ static int __init net_ns_init(void) if (setup_net(&init_net, &init_user_ns)) panic("Could not setup the initial network namespace"); + init_net_initialized = true; + rtnl_lock(); list_add_tail_rcu(&init_net.list, &net_namespace_list); rtnl_unlock(); @@ -811,15 +815,24 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { + if (!init_net_initialized) { + list_add_tail(&ops->list, list); + return 0; + } + return ops_init(ops, &init_net); } static void __unregister_pernet_operations(struct pernet_operations *ops) { - LIST_HEAD(net_exit_list); - list_add(&init_net.exit_list, &net_exit_list); - ops_exit_list(ops, &net_exit_list); - ops_free_list(ops, &net_exit_list); + if (!init_net_initialized) { + list_del(&ops->list); + } else { + LIST_HEAD(net_exit_list); + list_add(&init_net.exit_list, &net_exit_list); + ops_exit_list(ops, &net_exit_list); + ops_free_list(ops, &net_exit_list); + } } #endif /* CONFIG_NET_NS */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 189cc78c77eb..318fc5231b2b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -704,6 +704,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) } else if (i == RTAX_FEATURES - 1) { u32 user_features = metrics[i] & RTAX_FEATURE_MASK; + if (!user_features) + continue; BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); if (nla_put_u32(skb, i + 1, user_features)) goto nla_put_failure; diff --git a/net/core/sock.c b/net/core/sock.c index 25dab8b60223..51a730485649 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1315,24 +1315,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk) #endif } -void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) -{ - unsigned long nulls1, nulls2; - - nulls1 = offsetof(struct sock, __sk_common.skc_node.next); - nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); - if (nulls1 > nulls2) - swap(nulls1, nulls2); - - if (nulls1 != 0) - memset((char *)sk, 0, nulls1); - memset((char *)sk + nulls1 + sizeof(void *), 0, - nulls2 - nulls1 - sizeof(void *)); - memset((char *)sk + nulls2 + sizeof(void *), 0, - size - nulls2 - sizeof(void *)); -} -EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); - static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, int family) { @@ -1344,12 +1326,8 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); if (!sk) return sk; - if (priority & __GFP_ZERO) { - if (prot->clear_sk) - prot->clear_sk(sk, prot->obj_size); - else - sk_prot_clear_nulls(sk, prot->obj_size); - } + if (priority & __GFP_ZERO) + sk_prot_clear_nulls(sk, prot->obj_size); } else sk = kmalloc(prot->obj_size, priority); diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 7e68bc6bc853..d8d267e9a872 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -61,27 +61,27 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { static DEFINE_MUTEX(dsa_switch_drivers_mutex); static LIST_HEAD(dsa_switch_drivers); -void register_switch_driver(struct dsa_switch_driver *drv) +void register_switch_driver(struct dsa_switch_ops *ops) { mutex_lock(&dsa_switch_drivers_mutex); - list_add_tail(&drv->list, &dsa_switch_drivers); + list_add_tail(&ops->list, &dsa_switch_drivers); mutex_unlock(&dsa_switch_drivers_mutex); } EXPORT_SYMBOL_GPL(register_switch_driver); -void unregister_switch_driver(struct dsa_switch_driver *drv) +void unregister_switch_driver(struct dsa_switch_ops *ops) { mutex_lock(&dsa_switch_drivers_mutex); - list_del_init(&drv->list); + list_del_init(&ops->list); mutex_unlock(&dsa_switch_drivers_mutex); } EXPORT_SYMBOL_GPL(unregister_switch_driver); -static struct dsa_switch_driver * +static struct dsa_switch_ops * dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, const char **_name, void **priv) { - struct dsa_switch_driver *ret; + struct dsa_switch_ops *ret; struct list_head *list; const char *name; @@ -90,13 +90,13 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, mutex_lock(&dsa_switch_drivers_mutex); list_for_each(list, &dsa_switch_drivers) { - struct dsa_switch_driver *drv; + struct dsa_switch_ops *ops; - drv = list_entry(list, struct dsa_switch_driver, list); + ops = list_entry(list, struct dsa_switch_ops, list); - name = drv->probe(parent, host_dev, sw_addr, priv); + name = ops->probe(parent, host_dev, sw_addr, priv); if (name != NULL) { - ret = drv; + ret = ops; break; } } @@ -117,7 +117,7 @@ static ssize_t temp1_input_show(struct device *dev, struct dsa_switch *ds = dev_get_drvdata(dev); int temp, ret; - ret = ds->drv->get_temp(ds, &temp); + ret = ds->ops->get_temp(ds, &temp); if (ret < 0) return ret; @@ -131,7 +131,7 @@ static ssize_t temp1_max_show(struct device *dev, struct dsa_switch *ds = dev_get_drvdata(dev); int temp, ret; - ret = ds->drv->get_temp_limit(ds, &temp); + ret = ds->ops->get_temp_limit(ds, &temp); if (ret < 0) return ret; @@ -149,7 +149,7 @@ static ssize_t temp1_max_store(struct device *dev, if (ret < 0) return ret; - ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000)); + ret = ds->ops->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000)); if (ret < 0) return ret; @@ -164,7 +164,7 @@ static ssize_t temp1_max_alarm_show(struct device *dev, bool alarm; int ret; - ret = ds->drv->get_temp_alarm(ds, &alarm); + ret = ds->ops->get_temp_alarm(ds, &alarm); if (ret < 0) return ret; @@ -184,15 +184,15 @@ static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, { struct device *dev = container_of(kobj, struct device, kobj); struct dsa_switch *ds = dev_get_drvdata(dev); - struct dsa_switch_driver *drv = ds->drv; + struct dsa_switch_ops *ops = ds->ops; umode_t mode = attr->mode; if (index == 1) { - if (!drv->get_temp_limit) + if (!ops->get_temp_limit) mode = 0; - else if (!drv->set_temp_limit) + else if (!ops->set_temp_limit) mode &= ~S_IWUSR; - } else if (index == 2 && !drv->get_temp_alarm) { + } else if (index == 2 && !ops->get_temp_alarm) { mode = 0; } return mode; @@ -228,8 +228,8 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, genphy_config_init(phydev); genphy_read_status(phydev); - if (ds->drv->adjust_link) - ds->drv->adjust_link(ds, port, phydev); + if (ds->ops->adjust_link) + ds->ops->adjust_link(ds, port, phydev); } return 0; @@ -303,7 +303,7 @@ void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds) static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) { - struct dsa_switch_driver *drv = ds->drv; + struct dsa_switch_ops *ops = ds->ops; struct dsa_switch_tree *dst = ds->dst; struct dsa_chip_data *cd = ds->cd; bool valid_name_found = false; @@ -354,7 +354,10 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) * switch. */ if (dst->cpu_switch == index) { - dst->tag_ops = dsa_resolve_tag_protocol(drv->tag_protocol); + enum dsa_tag_protocol tag_protocol; + + tag_protocol = ops->get_tag_protocol(ds); + dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); if (IS_ERR(dst->tag_ops)) { ret = PTR_ERR(dst->tag_ops); goto out; @@ -368,15 +371,15 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) /* * Do basic register setup. */ - ret = drv->setup(ds); + ret = ops->setup(ds); if (ret < 0) goto out; - ret = drv->set_addr(ds, dst->master_netdev->dev_addr); + ret = ops->set_addr(ds, dst->master_netdev->dev_addr); if (ret < 0) goto out; - if (!ds->slave_mii_bus && drv->phy_read) { + if (!ds->slave_mii_bus && ops->phy_read) { ds->slave_mii_bus = devm_mdiobus_alloc(parent); if (!ds->slave_mii_bus) { ret = -ENOMEM; @@ -423,7 +426,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) * register with hardware monitoring subsystem. * Treat registration error as non-fatal and ignore it. */ - if (drv->get_temp) { + if (ops->get_temp) { const char *netname = netdev_name(dst->master_netdev); char hname[IFNAMSIZ + 1]; int i, j; @@ -454,7 +457,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, struct device *parent, struct device *host_dev) { struct dsa_chip_data *cd = dst->pd->chip + index; - struct dsa_switch_driver *drv; + struct dsa_switch_ops *ops; struct dsa_switch *ds; int ret; const char *name; @@ -463,8 +466,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Probe for switch model. */ - drv = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv); - if (drv == NULL) { + ops = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv); + if (!ops) { netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", index); return ERR_PTR(-EINVAL); @@ -483,7 +486,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ds->dst = dst; ds->index = index; ds->cd = cd; - ds->drv = drv; + ds->ops = ops; ds->priv = priv; ds->dev = parent; @@ -538,12 +541,12 @@ static void dsa_switch_destroy(struct dsa_switch *ds) ds->dsa_port_mask |= ~(1 << port); } - if (ds->slave_mii_bus && ds->drv->phy_read) + if (ds->slave_mii_bus && ds->ops->phy_read) mdiobus_unregister(ds->slave_mii_bus); } #ifdef CONFIG_PM_SLEEP -static int dsa_switch_suspend(struct dsa_switch *ds) +int dsa_switch_suspend(struct dsa_switch *ds) { int i, ret = 0; @@ -557,18 +560,19 @@ static int dsa_switch_suspend(struct dsa_switch *ds) return ret; } - if (ds->drv->suspend) - ret = ds->drv->suspend(ds); + if (ds->ops->suspend) + ret = ds->ops->suspend(ds); return ret; } +EXPORT_SYMBOL_GPL(dsa_switch_suspend); -static int dsa_switch_resume(struct dsa_switch *ds) +int dsa_switch_resume(struct dsa_switch *ds) { int i, ret = 0; - if (ds->drv->resume) - ret = ds->drv->resume(ds); + if (ds->ops->resume) + ret = ds->ops->resume(ds); if (ret) return ret; @@ -585,6 +589,7 @@ static int dsa_switch_resume(struct dsa_switch *ds) return 0; } +EXPORT_SYMBOL_GPL(dsa_switch_resume); #endif /* platform driver init and cleanup *****************************************/ @@ -1086,7 +1091,6 @@ static int dsa_resume(struct device *d) static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume); static const struct of_device_id dsa_of_match_table[] = { - { .compatible = "brcm,bcm7445-switch-v4.0" }, { .compatible = "marvell,dsa", }, {} }; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index f30bad9678f0..8278385dcd21 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -294,25 +294,25 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds) int err; /* Initialize ds->phys_mii_mask before registering the slave MDIO bus - * driver and before drv->setup() has run, since the switch drivers and + * driver and before ops->setup() has run, since the switch drivers and * the slave MDIO bus driver rely on these values for probing PHY * devices or not */ ds->phys_mii_mask = ds->enabled_port_mask; - err = ds->drv->setup(ds); + err = ds->ops->setup(ds); if (err < 0) return err; - err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr); + err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr); if (err < 0) return err; - err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr); + err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr); if (err < 0) return err; - if (!ds->slave_mii_bus && ds->drv->phy_read) { + if (!ds->slave_mii_bus && ds->ops->phy_read) { ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev); if (!ds->slave_mii_bus) return -ENOMEM; @@ -374,7 +374,7 @@ static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds) dsa_user_port_unapply(port, index, ds); } - if (ds->slave_mii_bus && ds->drv->phy_read) + if (ds->slave_mii_bus && ds->ops->phy_read) mdiobus_unregister(ds->slave_mii_bus); } @@ -443,6 +443,7 @@ static int dsa_cpu_parse(struct device_node *port, u32 index, struct dsa_switch_tree *dst, struct dsa_switch *ds) { + enum dsa_tag_protocol tag_protocol; struct net_device *ethernet_dev; struct device_node *ethernet; @@ -465,7 +466,8 @@ static int dsa_cpu_parse(struct device_node *port, u32 index, dst->cpu_port = index; } - dst->tag_ops = dsa_resolve_tag_protocol(ds->drv->tag_protocol); + tag_protocol = ds->ops->get_tag_protocol(ds); + dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); if (IS_ERR(dst->tag_ops)) { dev_warn(ds->dev, "No tagger for this switch\n"); return PTR_ERR(dst->tag_ops); @@ -541,7 +543,7 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds) ds->ports[reg].dn = port; - /* Initialize enabled_port_mask now for drv->setup() + /* Initialize enabled_port_mask now for ops->setup() * to have access to a correct value, just like what * net/dsa/dsa.c::dsa_switch_setup_one does. */ diff --git a/net/dsa/slave.c b/net/dsa/slave.c index fc9196745225..9f6c2a20f6ff 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -28,7 +28,7 @@ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) struct dsa_switch *ds = bus->priv; if (ds->phys_mii_mask & (1 << addr)) - return ds->drv->phy_read(ds, addr, reg); + return ds->ops->phy_read(ds, addr, reg); return 0xffff; } @@ -38,7 +38,7 @@ static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) struct dsa_switch *ds = bus->priv; if (ds->phys_mii_mask & (1 << addr)) - return ds->drv->phy_write(ds, addr, reg, val); + return ds->ops->phy_write(ds, addr, reg, val); return 0; } @@ -98,14 +98,14 @@ static int dsa_slave_open(struct net_device *dev) goto clear_allmulti; } - if (ds->drv->port_enable) { - err = ds->drv->port_enable(ds, p->port, p->phy); + if (ds->ops->port_enable) { + err = ds->ops->port_enable(ds, p->port, p->phy); if (err) goto clear_promisc; } - if (ds->drv->port_stp_state_set) - ds->drv->port_stp_state_set(ds, p->port, stp_state); + if (ds->ops->port_stp_state_set) + ds->ops->port_stp_state_set(ds, p->port, stp_state); if (p->phy) phy_start(p->phy); @@ -144,11 +144,11 @@ static int dsa_slave_close(struct net_device *dev) if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); - if (ds->drv->port_disable) - ds->drv->port_disable(ds, p->port, p->phy); + if (ds->ops->port_disable) + ds->ops->port_disable(ds, p->port, p->phy); - if (ds->drv->port_stp_state_set) - ds->drv->port_stp_state_set(ds, p->port, BR_STATE_DISABLED); + if (ds->ops->port_stp_state_set) + ds->ops->port_stp_state_set(ds, p->port, BR_STATE_DISABLED); return 0; } @@ -209,13 +209,13 @@ static int dsa_slave_port_vlan_add(struct net_device *dev, struct dsa_switch *ds = p->parent; if (switchdev_trans_ph_prepare(trans)) { - if (!ds->drv->port_vlan_prepare || !ds->drv->port_vlan_add) + if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add) return -EOPNOTSUPP; - return ds->drv->port_vlan_prepare(ds, p->port, vlan, trans); + return ds->ops->port_vlan_prepare(ds, p->port, vlan, trans); } - ds->drv->port_vlan_add(ds, p->port, vlan, trans); + ds->ops->port_vlan_add(ds, p->port, vlan, trans); return 0; } @@ -226,10 +226,10 @@ static int dsa_slave_port_vlan_del(struct net_device *dev, struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (!ds->drv->port_vlan_del) + if (!ds->ops->port_vlan_del) return -EOPNOTSUPP; - return ds->drv->port_vlan_del(ds, p->port, vlan); + return ds->ops->port_vlan_del(ds, p->port, vlan); } static int dsa_slave_port_vlan_dump(struct net_device *dev, @@ -239,8 +239,8 @@ static int dsa_slave_port_vlan_dump(struct net_device *dev, struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (ds->drv->port_vlan_dump) - return ds->drv->port_vlan_dump(ds, p->port, vlan, cb); + if (ds->ops->port_vlan_dump) + return ds->ops->port_vlan_dump(ds, p->port, vlan, cb); return -EOPNOTSUPP; } @@ -253,13 +253,13 @@ static int dsa_slave_port_fdb_add(struct net_device *dev, struct dsa_switch *ds = p->parent; if (switchdev_trans_ph_prepare(trans)) { - if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add) + if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add) return -EOPNOTSUPP; - return ds->drv->port_fdb_prepare(ds, p->port, fdb, trans); + return ds->ops->port_fdb_prepare(ds, p->port, fdb, trans); } - ds->drv->port_fdb_add(ds, p->port, fdb, trans); + ds->ops->port_fdb_add(ds, p->port, fdb, trans); return 0; } @@ -271,8 +271,8 @@ static int dsa_slave_port_fdb_del(struct net_device *dev, struct dsa_switch *ds = p->parent; int ret = -EOPNOTSUPP; - if (ds->drv->port_fdb_del) - ret = ds->drv->port_fdb_del(ds, p->port, fdb); + if (ds->ops->port_fdb_del) + ret = ds->ops->port_fdb_del(ds, p->port, fdb); return ret; } @@ -284,8 +284,8 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev, struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (ds->drv->port_fdb_dump) - return ds->drv->port_fdb_dump(ds, p->port, fdb, cb); + if (ds->ops->port_fdb_dump) + return ds->ops->port_fdb_dump(ds, p->port, fdb, cb); return -EOPNOTSUPP; } @@ -308,9 +308,9 @@ static int dsa_slave_stp_state_set(struct net_device *dev, struct dsa_switch *ds = p->parent; if (switchdev_trans_ph_prepare(trans)) - return ds->drv->port_stp_state_set ? 0 : -EOPNOTSUPP; + return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP; - ds->drv->port_stp_state_set(ds, p->port, attr->u.stp_state); + ds->ops->port_stp_state_set(ds, p->port, attr->u.stp_state); return 0; } @@ -326,8 +326,8 @@ static int dsa_slave_vlan_filtering(struct net_device *dev, if (switchdev_trans_ph_prepare(trans)) return 0; - if (ds->drv->port_vlan_filtering) - return ds->drv->port_vlan_filtering(ds, p->port, + if (ds->ops->port_vlan_filtering) + return ds->ops->port_vlan_filtering(ds, p->port, attr->u.vlan_filtering); return 0; @@ -365,8 +365,8 @@ static int dsa_slave_ageing_time(struct net_device *dev, ds->ports[p->port].ageing_time = ageing_time; ageing_time = dsa_fastest_ageing_time(ds, ageing_time); - if (ds->drv->set_ageing_time) - return ds->drv->set_ageing_time(ds, ageing_time); + if (ds->ops->set_ageing_time) + return ds->ops->set_ageing_time(ds, ageing_time); return 0; } @@ -481,8 +481,8 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, p->bridge_dev = br; - if (ds->drv->port_bridge_join) - ret = ds->drv->port_bridge_join(ds, p->port, br); + if (ds->ops->port_bridge_join) + ret = ds->ops->port_bridge_join(ds, p->port, br); return ret == -EOPNOTSUPP ? 0 : ret; } @@ -493,16 +493,16 @@ static void dsa_slave_bridge_port_leave(struct net_device *dev) struct dsa_switch *ds = p->parent; - if (ds->drv->port_bridge_leave) - ds->drv->port_bridge_leave(ds, p->port); + if (ds->ops->port_bridge_leave) + ds->ops->port_bridge_leave(ds, p->port); p->bridge_dev = NULL; /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, * so allow it to be in BR_STATE_FORWARDING to be kept functional */ - if (ds->drv->port_stp_state_set) - ds->drv->port_stp_state_set(ds, p->port, BR_STATE_FORWARDING); + if (ds->ops->port_stp_state_set) + ds->ops->port_stp_state_set(ds, p->port, BR_STATE_FORWARDING); } static int dsa_slave_port_attr_get(struct net_device *dev, @@ -605,8 +605,8 @@ static int dsa_slave_get_regs_len(struct net_device *dev) struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (ds->drv->get_regs_len) - return ds->drv->get_regs_len(ds, p->port); + if (ds->ops->get_regs_len) + return ds->ops->get_regs_len(ds, p->port); return -EOPNOTSUPP; } @@ -617,8 +617,8 @@ dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (ds->drv->get_regs) - ds->drv->get_regs(ds, p->port, regs, _p); + if (ds->ops->get_regs) + ds->ops->get_regs(ds, p->port, regs, _p); } static int dsa_slave_nway_reset(struct net_device *dev) @@ -651,8 +651,8 @@ static int dsa_slave_get_eeprom_len(struct net_device *dev) if (ds->cd && ds->cd->eeprom_len) return ds->cd->eeprom_len; - if (ds->drv->get_eeprom_len) - return ds->drv->get_eeprom_len(ds); + if (ds->ops->get_eeprom_len) + return ds->ops->get_eeprom_len(ds); return 0; } @@ -663,8 +663,8 @@ static int dsa_slave_get_eeprom(struct net_device *dev, struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (ds->drv->get_eeprom) - return ds->drv->get_eeprom(ds, eeprom, data); + if (ds->ops->get_eeprom) + return ds->ops->get_eeprom(ds, eeprom, data); return -EOPNOTSUPP; } @@ -675,8 +675,8 @@ static int dsa_slave_set_eeprom(struct net_device *dev, struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (ds->drv->set_eeprom) - return ds->drv->set_eeprom(ds, eeprom, data); + if (ds->ops->set_eeprom) + return ds->ops->set_eeprom(ds, eeprom, data); return -EOPNOTSUPP; } @@ -694,8 +694,8 @@ static void dsa_slave_get_strings(struct net_device *dev, strncpy(data + len, "tx_bytes", len); strncpy(data + 2 * len, "rx_packets", len); strncpy(data + 3 * len, "rx_bytes", len); - if (ds->drv->get_strings != NULL) - ds->drv->get_strings(ds, p->port, data + 4 * len); + if (ds->ops->get_strings) + ds->ops->get_strings(ds, p->port, data + 4 * len); } } @@ -714,8 +714,8 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev, dst->master_ethtool_ops.get_ethtool_stats(dev, stats, data); } - if (ds->drv->get_ethtool_stats) - ds->drv->get_ethtool_stats(ds, cpu_port, data + count); + if (ds->ops->get_ethtool_stats) + ds->ops->get_ethtool_stats(ds, cpu_port, data + count); } static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset) @@ -727,8 +727,8 @@ static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset) if (dst->master_ethtool_ops.get_sset_count) count += dst->master_ethtool_ops.get_sset_count(dev, sset); - if (sset == ETH_SS_STATS && ds->drv->get_sset_count) - count += ds->drv->get_sset_count(ds); + if (sset == ETH_SS_STATS && ds->ops->get_sset_count) + count += ds->ops->get_sset_count(ds); return count; } @@ -755,14 +755,14 @@ static void dsa_cpu_port_get_strings(struct net_device *dev, dst->master_ethtool_ops.get_strings(dev, stringset, data); } - if (stringset == ETH_SS_STATS && ds->drv->get_strings) { + if (stringset == ETH_SS_STATS && ds->ops->get_strings) { ndata = data + mcount * len; /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle * the output after to prepend our CPU port prefix we * constructed earlier */ - ds->drv->get_strings(ds, cpu_port, ndata); - count = ds->drv->get_sset_count(ds); + ds->ops->get_strings(ds, cpu_port, ndata); + count = ds->ops->get_sset_count(ds); for (i = 0; i < count; i++) { memmove(ndata + (i * len + sizeof(pfx)), ndata + i * len, len - sizeof(pfx)); @@ -782,8 +782,8 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev, data[1] = dev->stats.tx_bytes; data[2] = dev->stats.rx_packets; data[3] = dev->stats.rx_bytes; - if (ds->drv->get_ethtool_stats != NULL) - ds->drv->get_ethtool_stats(ds, p->port, data + 4); + if (ds->ops->get_ethtool_stats) + ds->ops->get_ethtool_stats(ds, p->port, data + 4); } static int dsa_slave_get_sset_count(struct net_device *dev, int sset) @@ -795,8 +795,8 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset) int count; count = 4; - if (ds->drv->get_sset_count != NULL) - count += ds->drv->get_sset_count(ds); + if (ds->ops->get_sset_count) + count += ds->ops->get_sset_count(ds); return count; } @@ -809,8 +809,8 @@ static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - if (ds->drv->get_wol) - ds->drv->get_wol(ds, p->port, w); + if (ds->ops->get_wol) + ds->ops->get_wol(ds, p->port, w); } static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) @@ -819,8 +819,8 @@ static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) struct dsa_switch *ds = p->parent; int ret = -EOPNOTSUPP; - if (ds->drv->set_wol) - ret = ds->drv->set_wol(ds, p->port, w); + if (ds->ops->set_wol) + ret = ds->ops->set_wol(ds, p->port, w); return ret; } @@ -831,10 +831,10 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) struct dsa_switch *ds = p->parent; int ret; - if (!ds->drv->set_eee) + if (!ds->ops->set_eee) return -EOPNOTSUPP; - ret = ds->drv->set_eee(ds, p->port, p->phy, e); + ret = ds->ops->set_eee(ds, p->port, p->phy, e); if (ret) return ret; @@ -850,10 +850,10 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) struct dsa_switch *ds = p->parent; int ret; - if (!ds->drv->get_eee) + if (!ds->ops->get_eee) return -EOPNOTSUPP; - ret = ds->drv->get_eee(ds, p->port, e); + ret = ds->ops->get_eee(ds, p->port, e); if (ret) return ret; @@ -988,8 +988,8 @@ static void dsa_slave_adjust_link(struct net_device *dev) p->old_pause = p->phy->pause; } - if (ds->drv->adjust_link && status_changed) - ds->drv->adjust_link(ds, p->port, p->phy); + if (ds->ops->adjust_link && status_changed) + ds->ops->adjust_link(ds, p->port, p->phy); if (status_changed) phy_print_status(p->phy); @@ -1004,8 +1004,8 @@ static int dsa_slave_fixed_link_update(struct net_device *dev, if (dev) { p = netdev_priv(dev); ds = p->parent; - if (ds->drv->fixed_link_update) - ds->drv->fixed_link_update(ds, p->port, status); + if (ds->ops->fixed_link_update) + ds->ops->fixed_link_update(ds, p->port, status); } return 0; @@ -1062,8 +1062,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, phy_dn = port_dn; } - if (ds->drv->get_phy_flags) - phy_flags = ds->drv->get_phy_flags(ds, p->port); + if (ds->ops->get_phy_flags) + phy_flags = ds->ops->get_phy_flags(ds, p->port); if (phy_dn) { int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 55513e654d79..e94b47be0019 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -211,24 +211,19 @@ int inet_listen(struct socket *sock, int backlog) * we can only allow the backlog to be adjusted. */ if (old_state != TCP_LISTEN) { - /* Check special setups for testing purpose to enable TFO w/o - * requiring TCP_FASTOPEN sockopt. + /* Enable TFO w/o requiring TCP_FASTOPEN socket option. * Note that only TCP sockets (SOCK_STREAM) will reach here. - * Also fastopenq may already been allocated because this - * socket was in TCP_LISTEN state previously but was - * shutdown() (rather than close()). + * Also fastopen backlog may already been set via the option + * because the socket was in TCP_LISTEN state previously but + * was shutdown() rather than close(). */ - if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 && + if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) && + (sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { - if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0) - fastopen_queue_tune(sk, backlog); - else if ((sysctl_tcp_fastopen & - TFO_SERVER_WO_SOCKOPT2) != 0) - fastopen_queue_tune(sk, - ((uint)sysctl_tcp_fastopen) >> 16); - + fastopen_queue_tune(sk, backlog); tcp_fastopen_init_key_once(true); } + err = inet_csk_listen_start(sk, backlog); if (err) goto out; @@ -921,6 +916,8 @@ const struct proto_ops inet_stream_ops = { .mmap = sock_no_mmap, .sendpage = inet_sendpage, .splice_read = tcp_splice_read, + .read_sock = tcp_read_sock, + .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index ef2ebeb89d0f..317c31939732 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -93,9 +93,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id) return NULL; switch (id) { - case RT_TABLE_LOCAL: - rcu_assign_pointer(net->ipv4.fib_local, tb); - break; case RT_TABLE_MAIN: rcu_assign_pointer(net->ipv4.fib_main, tb); break; @@ -137,9 +134,6 @@ static void fib_replace_table(struct net *net, struct fib_table *old, { #ifdef CONFIG_IP_MULTIPLE_TABLES switch (new->tb_id) { - case RT_TABLE_LOCAL: - rcu_assign_pointer(net->ipv4.fib_local, new); - break; case RT_TABLE_MAIN: rcu_assign_pointer(net->ipv4.fib_main, new); break; @@ -1249,7 +1243,6 @@ static void ip_fib_net_exit(struct net *net) rtnl_lock(); #ifdef CONFIG_IP_MULTIPLE_TABLES - RCU_INIT_POINTER(net->ipv4.fib_local, NULL); RCU_INIT_POINTER(net->ipv4.fib_main, NULL); RCU_INIT_POINTER(net->ipv4.fib_default, NULL); #endif diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 539fa264e67d..8066ccc48a17 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1576,7 +1576,8 @@ static bool fib_good_nh(const struct fib_nh *nh) rcu_read_lock_bh(); - n = __ipv4_neigh_lookup_noref(nh->nh_dev, nh->nh_gw); + n = __ipv4_neigh_lookup_noref(nh->nh_dev, + (__force u32)nh->nh_gw); if (n) state = n->nud_state; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 9b4ca87f70ba..606cc3e85d2b 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -472,6 +472,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, continue; } + /* Based on RFC3376 5.1. Should not send source-list change + * records when there is a filter mode change. + */ + if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) || + (!gdeleted && pmc->crcount)) && + (type == IGMPV3_ALLOW_NEW_SOURCES || + type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) + goto decrease_sf_crcount; + /* clear marks on query responses */ if (isquery) psf->sf_gsresp = 0; @@ -499,6 +508,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, scount++; stotal++; if ((type == IGMPV3_ALLOW_NEW_SOURCES || type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) { +decrease_sf_crcount: psf->sf_crcount--; if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { if (psf_prev) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 38c2c47fe0e8..abfbe492ebfe 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -45,6 +45,7 @@ struct inet_diag_entry { u16 family; u16 userlocks; u32 ifindex; + u32 mark; }; static DEFINE_MUTEX(inet_diag_table_mutex); @@ -580,6 +581,14 @@ static int inet_diag_bc_run(const struct nlattr *_bc, yes = 0; break; } + case INET_DIAG_BC_MARK_COND: { + struct inet_diag_markcond *cond; + + cond = (struct inet_diag_markcond *)(op + 1); + if ((entry->mark & cond->mask) != cond->mark) + yes = 0; + break; + } } if (yes) { @@ -624,6 +633,12 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) entry.dport = ntohs(inet->inet_dport); entry.ifindex = sk->sk_bound_dev_if; entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; + if (sk_fullsock(sk)) + entry.mark = sk->sk_mark; + else if (sk->sk_state == TCP_NEW_SYN_RECV) + entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark; + else + entry.mark = 0; return inet_diag_bc_run(bc, &entry); } @@ -706,10 +721,25 @@ static bool valid_port_comparison(const struct inet_diag_bc_op *op, return true; } -static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) +static bool valid_markcond(const struct inet_diag_bc_op *op, int len, + int *min_len) +{ + *min_len += sizeof(struct inet_diag_markcond); + return len >= *min_len; +} + +static int inet_diag_bc_audit(const struct nlattr *attr, + const struct sk_buff *skb) { - const void *bc = bytecode; - int len = bytecode_len; + bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN); + const void *bytecode, *bc; + int bytecode_len, len; + + if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op)) + return -EINVAL; + + bytecode = bc = nla_data(attr); + len = bytecode_len = nla_len(attr); while (len > 0) { int min_len = sizeof(struct inet_diag_bc_op); @@ -732,6 +762,12 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) if (!valid_port_comparison(bc, len, &min_len)) return -EINVAL; break; + case INET_DIAG_BC_MARK_COND: + if (!net_admin) + return -EPERM; + if (!valid_markcond(bc, len, &min_len)) + return -EINVAL; + break; case INET_DIAG_BC_AUTO: case INET_DIAG_BC_JMP: case INET_DIAG_BC_NOP: @@ -1020,13 +1056,13 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) if (nlh->nlmsg_flags & NLM_F_DUMP) { if (nlmsg_attrlen(nlh, hdrlen)) { struct nlattr *attr; + int err; attr = nlmsg_find_attr(nlh, hdrlen, INET_DIAG_REQ_BYTECODE); - if (!attr || - nla_len(attr) < sizeof(struct inet_diag_bc_op) || - inet_diag_bc_audit(nla_data(attr), nla_len(attr))) - return -EINVAL; + err = inet_diag_bc_audit(attr, skb); + if (err) + return err; } { struct netlink_dump_control c = { @@ -1051,13 +1087,13 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h) h->nlmsg_flags & NLM_F_DUMP) { if (nlmsg_attrlen(h, hdrlen)) { struct nlattr *attr; + int err; attr = nlmsg_find_attr(h, hdrlen, INET_DIAG_REQ_BYTECODE); - if (!attr || - nla_len(attr) < sizeof(struct inet_diag_bc_op) || - inet_diag_bc_audit(nla_data(attr), nla_len(attr))) - return -EINVAL; + err = inet_diag_bc_audit(attr, skb); + if (err) + return err; } { struct netlink_dump_control c = { diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 1d71c40eaaf3..071a785c65eb 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -85,7 +85,6 @@ /* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */ #define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */ #define CONF_SEND_RETRIES 6 /* Send six requests per open */ -#define CONF_INTER_TIMEOUT (HZ) /* Inter-device timeout: 1 second */ #define CONF_BASE_TIMEOUT (HZ*2) /* Initial timeout: 2 seconds */ #define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */ #define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */ @@ -188,7 +187,7 @@ struct ic_device { }; static struct ic_device *ic_first_dev __initdata; /* List of open device */ -static struct net_device *ic_dev __initdata; /* Selected device */ +static struct ic_device *ic_dev __initdata; /* Selected device */ static bool __init ic_is_init_dev(struct net_device *dev) { @@ -307,7 +306,7 @@ static void __init ic_close_devs(void) while ((d = next)) { next = d->next; dev = d->dev; - if (dev != ic_dev && !netdev_uses_dsa(dev)) { + if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) { pr_debug("IP-Config: Downing %s\n", dev->name); dev_change_flags(dev, d->flags); } @@ -372,7 +371,7 @@ static int __init ic_setup_if(void) int err; memset(&ir, 0, sizeof(ir)); - strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name); + strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->dev->name); set_sockaddr(sin, ic_myaddr, 0); if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) { pr_err("IP-Config: Unable to set interface address (%d)\n", @@ -396,7 +395,7 @@ static int __init ic_setup_if(void) * out, we'll try to muddle along. */ if (ic_dev_mtu != 0) { - strcpy(ir.ifr_name, ic_dev->name); + strcpy(ir.ifr_name, ic_dev->dev->name); ir.ifr_mtu = ic_dev_mtu; if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0) pr_err("IP-Config: Unable to set interface mtu to %d (%d)\n", @@ -568,7 +567,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt goto drop_unlock; /* We have a winner! */ - ic_dev = dev; + ic_dev = d; if (ic_myaddr == NONE) ic_myaddr = tip; ic_servaddr = sip; @@ -655,8 +654,6 @@ static struct packet_type bootp_packet_type __initdata = { .func = ic_bootp_recv, }; -static __be32 ic_dev_xid; /* Device under configuration */ - /* * Initialize DHCP/BOOTP extension fields in the request. */ @@ -666,14 +663,14 @@ static const u8 ic_bootp_cookie[4] = { 99, 130, 83, 99 }; #ifdef IPCONFIG_DHCP static void __init -ic_dhcp_init_options(u8 *options) +ic_dhcp_init_options(u8 *options, struct ic_device *d) { u8 mt = ((ic_servaddr == NONE) ? DHCPDISCOVER : DHCPREQUEST); u8 *e = options; int len; - pr_debug("DHCP: Sending message type %d\n", mt); + pr_debug("DHCP: Sending message type %d (%s)\n", mt, d->dev->name); memcpy(e, ic_bootp_cookie, 4); /* RFC1048 Magic Cookie */ e += 4; @@ -857,7 +854,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d /* add DHCP options or BOOTP extensions */ #ifdef IPCONFIG_DHCP if (ic_proto_enabled & IC_USE_DHCP) - ic_dhcp_init_options(b->exten); + ic_dhcp_init_options(b->exten, d); else #endif ic_bootp_init_ext(b->exten); @@ -1033,14 +1030,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str /* Is it a reply to our BOOTP request? */ if (b->op != BOOTP_REPLY || b->xid != d->xid) { - net_err_ratelimited("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n", - b->op, b->xid); - goto drop_unlock; - } - - /* Is it a reply for the device we are configuring? */ - if (b->xid != ic_dev_xid) { - net_err_ratelimited("DHCP/BOOTP: Ignoring delayed packet\n"); + net_err_ratelimited("DHCP/BOOTP: Reply not for us on %s, op[%x] xid[%x]\n", + d->dev->name, b->op, b->xid); goto drop_unlock; } @@ -1075,7 +1066,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str } } - pr_debug("DHCP: Got message type %d\n", mt); + pr_debug("DHCP: Got message type %d (%s)\n", mt, d->dev->name); switch (mt) { case DHCPOFFER: @@ -1130,7 +1121,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str } /* We have a winner! */ - ic_dev = dev; + ic_dev = d; ic_myaddr = b->your_ip; ic_servaddr = b->server_ip; ic_addrservaddr = b->iph.saddr; @@ -1225,9 +1216,6 @@ static int __init ic_dynamic(void) timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM); for (;;) { #ifdef IPCONFIG_BOOTP - /* Track the device we are configuring */ - ic_dev_xid = d->xid; - if (do_bootp && (d->able & IC_BOOTP)) ic_bootp_send_if(d, jiffies - start_jiffies); #endif @@ -1236,15 +1224,19 @@ static int __init ic_dynamic(void) ic_rarp_send_if(d); #endif - jiff = jiffies + (d->next ? CONF_INTER_TIMEOUT : timeout); - while (time_before(jiffies, jiff) && !ic_got_reply) - schedule_timeout_uninterruptible(1); + if (!d->next) { + jiff = jiffies + timeout; + while (time_before(jiffies, jiff) && !ic_got_reply) + schedule_timeout_uninterruptible(1); + } #ifdef IPCONFIG_DHCP /* DHCP isn't done until we get a DHCPACK. */ if ((ic_got_reply & IC_BOOTP) && (ic_proto_enabled & IC_USE_DHCP) && ic_dhcp_msgtype != DHCPACK) { ic_got_reply = 0; + /* continue on device that got the reply */ + d = ic_dev; pr_cont(","); continue; } @@ -1487,7 +1479,7 @@ static int __init ip_auto_config(void) #endif /* IPCONFIG_DYNAMIC */ } else { /* Device selected manually or only one device -> use it */ - ic_dev = ic_first_dev->dev; + ic_dev = ic_first_dev; } addr = root_nfs_parse_addr(root_server_path); @@ -1501,14 +1493,6 @@ static int __init ip_auto_config(void) return -1; /* - * Close all network devices except the device we've - * autoconfigured and set up routes. - */ - ic_close_devs(); - if (ic_setup_if() < 0 || ic_setup_routes() < 0) - return -1; - - /* * Record which protocol was actually used. */ #ifdef IPCONFIG_DYNAMIC @@ -1522,7 +1506,7 @@ static int __init ip_auto_config(void) pr_info("IP-Config: Complete:\n"); pr_info(" device=%s, hwaddr=%*phC, ipaddr=%pI4, mask=%pI4, gw=%pI4\n", - ic_dev->name, ic_dev->addr_len, ic_dev->dev_addr, + ic_dev->dev->name, ic_dev->dev->addr_len, ic_dev->dev->dev_addr, &ic_myaddr, &ic_netmask, &ic_gateway); pr_info(" host=%s, domain=%s, nis-domain=%s\n", utsname()->nodename, ic_domain, utsname()->domainname); @@ -1542,7 +1526,18 @@ static int __init ip_auto_config(void) pr_cont("\n"); #endif /* !SILENT */ - return 0; + /* + * Close all network devices except the device we've + * autoconfigured and set up routes. + */ + if (ic_setup_if() < 0 || ic_setup_routes() < 0) + err = -1; + else + err = 0; + + ic_close_devs(); + + return err; } late_initcall(ip_auto_config); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 9f665b63a927..1ed015e4bc79 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -257,6 +257,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS), SNMP_MIB_ITEM("TCPMD5NotFound", LINUX_MIB_TCPMD5NOTFOUND), SNMP_MIB_ITEM("TCPMD5Unexpected", LINUX_MIB_TCPMD5UNEXPECTED), + SNMP_MIB_ITEM("TCPMD5Failure", LINUX_MIB_TCPMD5FAILURE), SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ffbb218de520..77311a92275c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1570,6 +1570,12 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, } EXPORT_SYMBOL(tcp_read_sock); +int tcp_peek_len(struct socket *sock) +{ + return tcp_inq(sock->sk); +} +EXPORT_SYMBOL(tcp_peek_len); + /* * This routine copies from a sock struct into the user buffer. * @@ -3092,23 +3098,6 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) } EXPORT_SYMBOL(tcp_get_md5sig_pool); -int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, - const struct tcphdr *th) -{ - struct scatterlist sg; - struct tcphdr hdr; - - /* We are not allowed to change tcphdr, make a local copy */ - memcpy(&hdr, th, sizeof(hdr)); - hdr.check = 0; - - /* options aren't included in the hash */ - sg_init_one(&sg, &hdr, sizeof(hdr)); - ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(hdr)); - return crypto_ahash_update(hp->md5_req); -} -EXPORT_SYMBOL(tcp_md5_hash_header); - int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, const struct sk_buff *skb, unsigned int header_len) { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3ebf45b38bc3..8cd02c0b056c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4392,12 +4392,9 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, if (tcp_prune_queue(sk) < 0) return -1; - if (!sk_rmem_schedule(sk, skb, size)) { + while (!sk_rmem_schedule(sk, skb, size)) { if (!tcp_prune_ofo_queue(sk)) return -1; - - if (!sk_rmem_schedule(sk, skb, size)) - return -1; } } return 0; @@ -4874,29 +4871,41 @@ static void tcp_collapse_ofo_queue(struct sock *sk) } /* - * Purge the out-of-order queue. - * Return true if queue was pruned. + * Clean the out-of-order queue to make room. + * We drop high sequences packets to : + * 1) Let a chance for holes to be filled. + * 2) not add too big latencies if thousands of packets sit there. + * (But if application shrinks SO_RCVBUF, we could still end up + * freeing whole queue here) + * + * Return true if queue has shrunk. */ static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - bool res = false; + struct sk_buff *skb; - if (!skb_queue_empty(&tp->out_of_order_queue)) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); - __skb_queue_purge(&tp->out_of_order_queue); + if (skb_queue_empty(&tp->out_of_order_queue)) + return false; - /* Reset SACK state. A conforming SACK implementation will - * do the same at a timeout based retransmit. When a connection - * is in a sad state like this, we care only about integrity - * of the connection not performance. - */ - if (tp->rx_opt.sack_ok) - tcp_sack_reset(&tp->rx_opt); + NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); + + while ((skb = __skb_dequeue_tail(&tp->out_of_order_queue)) != NULL) { + tcp_drop(sk, skb); sk_mem_reclaim(sk); - res = true; + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && + !tcp_under_memory_pressure(sk)) + break; } - return res; + + /* Reset SACK state. A conforming SACK implementation will + * do the same at a timeout based retransmit. When a connection + * is in a sad state like this, we care only about integrity + * of the connection not performance. + */ + if (tp->rx_opt.sack_ok) + tcp_sack_reset(&tp->rx_opt); + return true; } /* Reduce allocated memory if we can, trying to get diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 7158d4f8dae4..a75bf48d7950 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1175,6 +1175,7 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", &iph->saddr, ntohs(th->source), &iph->daddr, ntohs(th->dest), @@ -1537,6 +1538,34 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(tcp_prequeue); +bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) +{ + u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; + + /* Only socket owner can try to collapse/prune rx queues + * to reduce memory overhead, so add a little headroom here. + * Few sockets backlog are possibly concurrently non empty. + */ + limit += 64*1024; + + /* In case all data was pulled from skb frags (in __pskb_pull_tail()), + * we can fix skb->truesize to its real value to avoid future drops. + * This is valid because skb is not yet charged to the socket. + * It has been noticed pure SACK packets were sometimes dropped + * (if cooked by drivers without copybreak feature). + */ + if (!skb->data_len) + skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); + + if (unlikely(sk_add_backlog(sk, skb, limit))) { + bh_unlock_sock(sk); + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); + return true; + } + return false; +} +EXPORT_SYMBOL(tcp_add_backlog); + /* * From tcp_input.c */ @@ -1608,6 +1637,7 @@ process: sk = req->rsk_listener; if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { + sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } @@ -1666,10 +1696,7 @@ process: if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); - } else if (unlikely(sk_add_backlog(sk, skb, - sk->sk_rcvbuf + sk->sk_sndbuf))) { - bh_unlock_sock(sk); - __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP); + } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } bh_unlock_sock(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bdaef7fd6e47..8b45794eb6b2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2776,7 +2776,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk)); tcp_for_write_queue_from(skb, sk) { - __u8 sacked = TCP_SKB_CB(skb)->sacked; + __u8 sacked; int segs; if (skb == tcp_send_head(sk)) @@ -2788,6 +2788,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) segs = tp->snd_cwnd - tcp_packets_in_flight(tp); if (segs <= 0) return; + sacked = TCP_SKB_CB(skb)->sacked; /* In case tcp_shift_skb_data() have aggregated large skbs, * we need to make sure not sending too bigs TSO packets */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5fdcb8d108d4..058c31286ce1 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -114,6 +114,7 @@ #include <net/busy_poll.h> #include "udp_impl.h" #include <net/sock_reuseport.h> +#include <net/addrconf.h> struct udp_table udp_table __read_mostly; EXPORT_SYMBOL(udp_table); @@ -2192,6 +2193,20 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) } EXPORT_SYMBOL(udp_poll); +int udp_abort(struct sock *sk, int err) +{ + lock_sock(sk); + + sk->sk_err = err; + sk->sk_error_report(sk); + udp_disconnect(sk, 0); + + release_sock(sk); + + return 0; +} +EXPORT_SYMBOL_GPL(udp_abort); + struct proto udp_prot = { .name = "UDP", .owner = THIS_MODULE, @@ -2221,7 +2236,7 @@ struct proto udp_prot = { .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif - .clear_sk = sk_prot_clear_portaddr_nulls, + .diag_destroy = udp_abort, }; EXPORT_SYMBOL(udp_prot); diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 3d5ccf4b1412..8a9f6e535caa 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -165,12 +165,88 @@ static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, r->idiag_wqueue = sk_wmem_alloc_get(sk); } +#ifdef CONFIG_INET_DIAG_DESTROY +static int __udp_diag_destroy(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req, + struct udp_table *tbl) +{ + struct net *net = sock_net(in_skb->sk); + struct sock *sk; + int err; + + rcu_read_lock(); + + if (req->sdiag_family == AF_INET) + sk = __udp4_lib_lookup(net, + req->id.idiag_dst[0], req->id.idiag_dport, + req->id.idiag_src[0], req->id.idiag_sport, + req->id.idiag_if, tbl, NULL); +#if IS_ENABLED(CONFIG_IPV6) + else if (req->sdiag_family == AF_INET6) { + if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) && + ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src)) + sk = __udp4_lib_lookup(net, + req->id.idiag_dst[0], req->id.idiag_dport, + req->id.idiag_src[0], req->id.idiag_sport, + req->id.idiag_if, tbl, NULL); + + else + sk = __udp6_lib_lookup(net, + (struct in6_addr *)req->id.idiag_dst, + req->id.idiag_dport, + (struct in6_addr *)req->id.idiag_src, + req->id.idiag_sport, + req->id.idiag_if, tbl, NULL); + } +#endif + else { + rcu_read_unlock(); + return -EINVAL; + } + + if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) + sk = NULL; + + rcu_read_unlock(); + + if (!sk) + return -ENOENT; + + if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) { + sock_put(sk); + return -ENOENT; + } + + err = sock_diag_destroy(sk, ECONNABORTED); + + sock_put(sk); + + return err; +} + +static int udp_diag_destroy(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req) +{ + return __udp_diag_destroy(in_skb, req, &udp_table); +} + +static int udplite_diag_destroy(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req) +{ + return __udp_diag_destroy(in_skb, req, &udplite_table); +} + +#endif + static const struct inet_diag_handler udp_diag_handler = { .dump = udp_diag_dump, .dump_one = udp_diag_dump_one, .idiag_get_info = udp_diag_get_info, .idiag_type = IPPROTO_UDP, .idiag_info_size = 0, +#ifdef CONFIG_INET_DIAG_DESTROY + .destroy = udp_diag_destroy, +#endif }; static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, @@ -192,6 +268,9 @@ static const struct inet_diag_handler udplite_diag_handler = { .idiag_get_info = udp_diag_get_info, .idiag_type = IPPROTO_UDPLITE, .idiag_info_size = 0, +#ifdef CONFIG_INET_DIAG_DESTROY + .destroy = udplite_diag_destroy, +#endif }; static int __init udp_diag_init(void) diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 2eea073e27ef..af817158d830 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -60,7 +60,6 @@ struct proto udplite_prot = { .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif - .clear_sk = sk_prot_clear_portaddr_nulls, }; EXPORT_SYMBOL(udplite_prot); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index b454055ba625..46ad699937fd 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -545,6 +545,8 @@ const struct proto_ops inet6_stream_ops = { .mmap = sock_no_mmap, .sendpage = inet_sendpage, .splice_read = tcp_splice_read, + .read_sock = tcp_read_sock, + .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c index ec9efbcdad35..aba0998ddbfb 100644 --- a/net/ipv6/ila/ila_common.c +++ b/net/ipv6/ila/ila_common.c @@ -172,6 +172,5 @@ static void __exit ila_fini(void) module_init(ila_init); module_exit(ila_fini); -MODULE_ALIAS_RTNL_LWT(ILA); MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>"); MODULE_LICENSE("GPL"); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 704274cbd495..397e1ed3daa3 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -61,12 +61,12 @@ static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); -#define HASH_SIZE_SHIFT 5 -#define HASH_SIZE (1 << HASH_SIZE_SHIFT) +#define IP6_GRE_HASH_SIZE_SHIFT 5 +#define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT) static int ip6gre_net_id __read_mostly; struct ip6gre_net { - struct ip6_tnl __rcu *tunnels[4][HASH_SIZE]; + struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; struct net_device *fb_tunnel_dev; }; @@ -96,12 +96,12 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); will match fallback tunnel. */ -#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1)) +#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1)) static u32 HASH_ADDR(const struct in6_addr *addr) { u32 hash = ipv6_addr_hash(addr); - return hash_32(hash, HASH_SIZE_SHIFT); + return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT); } #define tunnels_r_l tunnels[3] @@ -1087,7 +1087,7 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) for (prio = 0; prio < 4; prio++) { int h; - for (h = 0; h < HASH_SIZE; h++) { + for (h = 0; h < IP6_GRE_HASH_SIZE; h++) { struct ip6_tnl *t; t = rtnl_dereference(ign->tunnels[prio][h]); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 7b0481e3738f..2050217df565 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -64,8 +64,8 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("ip6tnl"); MODULE_ALIAS_NETDEV("ip6tnl0"); -#define HASH_SIZE_SHIFT 5 -#define HASH_SIZE (1 << HASH_SIZE_SHIFT) +#define IP6_TUNNEL_HASH_SIZE_SHIFT 5 +#define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT) static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); @@ -75,7 +75,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) { u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); - return hash_32(hash, HASH_SIZE_SHIFT); + return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); } static int ip6_tnl_dev_init(struct net_device *dev); @@ -87,7 +87,7 @@ struct ip6_tnl_net { /* the IPv6 tunnel fallback device */ struct net_device *fb_tnl_dev; /* lists for storing tunnels in use */ - struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; + struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE]; struct ip6_tnl __rcu *tnls_wc[1]; struct ip6_tnl __rcu **tnls[2]; }; @@ -2031,7 +2031,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net) if (dev->rtnl_link_ops == &ip6_link_ops) unregister_netdevice_queue(dev, &list); - for (h = 0; h < HASH_SIZE; h++) { + for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) { t = rtnl_dereference(ip6n->tnls_r_l[h]); while (t) { /* If dev is in the same netns, it has already diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d90a11f14040..cc7e05898307 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -50,14 +50,14 @@ #include <net/net_namespace.h> #include <net/netns/generic.h> -#define HASH_SIZE_SHIFT 5 -#define HASH_SIZE (1 << HASH_SIZE_SHIFT) +#define IP6_VTI_HASH_SIZE_SHIFT 5 +#define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT) static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) { u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); - return hash_32(hash, HASH_SIZE_SHIFT); + return hash_32(hash, IP6_VTI_HASH_SIZE_SHIFT); } static int vti6_dev_init(struct net_device *dev); @@ -69,7 +69,7 @@ struct vti6_net { /* the vti6 tunnel fallback device */ struct net_device *fb_tnl_dev; /* lists for storing tunnels in use */ - struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; + struct ip6_tnl __rcu *tnls_r_l[IP6_VTI_HASH_SIZE]; struct ip6_tnl __rcu *tnls_wc[1]; struct ip6_tnl __rcu **tnls[2]; }; @@ -1040,7 +1040,7 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n) struct ip6_tnl *t; LIST_HEAD(list); - for (h = 0; h < HASH_SIZE; h++) { + for (h = 0; h < IP6_VTI_HASH_SIZE; h++) { t = rtnl_dereference(ip6n->tnls_r_l[h]); while (t) { unregister_netdevice_queue(t->dev, &list); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index d64ee7e83664..75c1fc54f188 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1739,6 +1739,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, continue; } + /* Based on RFC3810 6.1. Should not send source-list change + * records when there is a filter mode change. + */ + if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) || + (!gdeleted && pmc->mca_crcount)) && + (type == MLD2_ALLOW_NEW_SOURCES || + type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) + goto decrease_sf_crcount; + /* clear marks on query responses */ if (isquery) psf->sf_gsresp = 0; @@ -1766,6 +1775,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, scount++; stotal++; if ((type == MLD2_ALLOW_NEW_SOURCES || type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) { +decrease_sf_crcount: psf->sf_crcount--; if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { if (psf_prev) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 182b6a9be29d..b1cdf8009d29 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -62,7 +62,7 @@ For comments look at net/ipv4/ip_gre.c --ANK */ -#define HASH_SIZE 16 +#define IP6_SIT_HASH_SIZE 16 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) static bool log_ecn_error = true; @@ -78,9 +78,9 @@ static struct rtnl_link_ops sit_link_ops __read_mostly; static int sit_net_id __read_mostly; struct sit_net { - struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE]; - struct ip_tunnel __rcu *tunnels_r[HASH_SIZE]; - struct ip_tunnel __rcu *tunnels_l[HASH_SIZE]; + struct ip_tunnel __rcu *tunnels_r_l[IP6_SIT_HASH_SIZE]; + struct ip_tunnel __rcu *tunnels_r[IP6_SIT_HASH_SIZE]; + struct ip_tunnel __rcu *tunnels_l[IP6_SIT_HASH_SIZE]; struct ip_tunnel __rcu *tunnels_wc[1]; struct ip_tunnel __rcu **tunnels[4]; @@ -1126,7 +1126,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t, } #endif -bool ipip6_valid_ip_proto(u8 ipproto) +static bool ipip6_valid_ip_proto(u8 ipproto) { return ipproto == IPPROTO_IPV6 || ipproto == IPPROTO_IPIP || @@ -1783,7 +1783,7 @@ static void __net_exit sit_destroy_tunnels(struct net *net, for (prio = 1; prio < 4; prio++) { int h; - for (h = 0; h < HASH_SIZE; h++) { + for (h = 0; h < IP6_SIT_HASH_SIZE; h++) { struct ip_tunnel *t; t = rtnl_dereference(sitn->tunnels[prio][h]); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 94f4f89d73e7..04529a3d42cb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -671,6 +671,7 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", genhash ? "failed" : "mismatch", &ip6h->saddr, ntohs(th->source), @@ -1415,6 +1416,7 @@ process: sk = req->rsk_listener; tcp_v6_fill_cb(skb, hdr, th); if (tcp_v6_inbound_md5_hash(sk, skb)) { + sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } @@ -1471,10 +1473,7 @@ process: if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); - } else if (unlikely(sk_add_backlog(sk, skb, - sk->sk_rcvbuf + sk->sk_sndbuf))) { - bh_unlock_sock(sk); - __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP); + } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } bh_unlock_sock(sk); @@ -1868,17 +1867,6 @@ void tcp6_proc_exit(struct net *net) } #endif -static void tcp_v6_clear_sk(struct sock *sk, int size) -{ - struct inet_sock *inet = inet_sk(sk); - - /* we do not want to clear pinet6 field, because of RCU lookups */ - sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6)); - - size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); - memset(&inet->pinet6 + 1, 0, size); -} - struct proto tcpv6_prot = { .name = "TCPv6", .owner = THIS_MODULE, @@ -1920,7 +1908,6 @@ struct proto tcpv6_prot = { .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif - .clear_sk = tcp_v6_clear_sk, .diag_destroy = tcp_abort, }; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 19ac3a1c308d..9aa7c1c7a9ce 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1424,17 +1424,6 @@ void udp6_proc_exit(struct net *net) } #endif /* CONFIG_PROC_FS */ -void udp_v6_clear_sk(struct sock *sk, int size) -{ - struct inet_sock *inet = inet_sk(sk); - - /* we do not want to clear pinet6 field, because of RCU lookups */ - sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6)); - - size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); - memset(&inet->pinet6 + 1, 0, size); -} - /* ------------------------------------------------------------------------ */ struct proto udpv6_prot = { @@ -1465,7 +1454,7 @@ struct proto udpv6_prot = { .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif - .clear_sk = udp_v6_clear_sk, + .diag_destroy = udp_abort, }; static struct inet_protosw udpv6_protosw = { diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 0682c031ccdc..f6eb1ab34f4b 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -29,8 +29,6 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); void udpv6_destroy_sock(struct sock *sk); -void udp_v6_clear_sk(struct sock *sk, int size); - #ifdef CONFIG_PROC_FS int udp6_seq_show(struct seq_file *seq, void *v); #endif diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index fd6ef414899b..47d0d2b87106 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -55,7 +55,6 @@ struct proto udplitev6_prot = { .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif - .clear_sk = udp_v6_clear_sk, }; static struct inet_protosw udplite6_protosw = { diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 8d2f7c9b491d..db639690c205 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -845,9 +845,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) if (sock->state != SS_UNCONNECTED) goto out; - if ((sk = sock->sk) == NULL) - goto out; - err = -EOPNOTSUPP; if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && (sk->sk_type != SOCK_DGRAM)) diff --git a/net/kcm/Kconfig b/net/kcm/Kconfig index 5db94d940ecc..87fca36e6c47 100644 --- a/net/kcm/Kconfig +++ b/net/kcm/Kconfig @@ -3,6 +3,7 @@ config AF_KCM tristate "KCM sockets" depends on INET select BPF_SYSCALL + select STREAM_PARSER ---help--- KCM (Kernel Connection Multiplexor) sockets provide a method for multiplexing messages of a message based application diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c index 16c2e03bd388..bf75c9231cca 100644 --- a/net/kcm/kcmproc.c +++ b/net/kcm/kcmproc.c @@ -155,8 +155,8 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq, seq_printf(seq, " psock-%-5u %-10llu %-16llu %-10llu %-16llu %-8d %-8d %-8d %-8d ", psock->index, - psock->stats.rx_msgs, - psock->stats.rx_bytes, + psock->strp.stats.rx_msgs, + psock->strp.stats.rx_bytes, psock->stats.tx_msgs, psock->stats.tx_bytes, psock->sk->sk_receive_queue.qlen, @@ -170,14 +170,27 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq, if (psock->tx_stopped) seq_puts(seq, "TxStop "); - if (psock->rx_stopped) + if (psock->strp.rx_stopped) seq_puts(seq, "RxStop "); if (psock->tx_kcm) seq_printf(seq, "Rsvd-%d ", psock->tx_kcm->index); - if (psock->ready_rx_msg) - seq_puts(seq, "RdyRx "); + if (!psock->strp.rx_paused && !psock->ready_rx_msg) { + if (psock->sk->sk_receive_queue.qlen) { + if (psock->strp.rx_need_bytes) + seq_printf(seq, "RxWait=%u ", + psock->strp.rx_need_bytes); + else + seq_printf(seq, "RxWait "); + } + } else { + if (psock->strp.rx_paused) + seq_puts(seq, "RxPause "); + + if (psock->ready_rx_msg) + seq_puts(seq, "RdyRx "); + } seq_puts(seq, "\n"); } @@ -275,6 +288,7 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v) { struct kcm_psock_stats psock_stats; struct kcm_mux_stats mux_stats; + struct strp_aggr_stats strp_stats; struct kcm_mux *mux; struct kcm_psock *psock; struct net *net = seq->private; @@ -282,20 +296,28 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v) memset(&mux_stats, 0, sizeof(mux_stats)); memset(&psock_stats, 0, sizeof(psock_stats)); + memset(&strp_stats, 0, sizeof(strp_stats)); mutex_lock(&knet->mutex); aggregate_mux_stats(&knet->aggregate_mux_stats, &mux_stats); aggregate_psock_stats(&knet->aggregate_psock_stats, &psock_stats); + aggregate_strp_stats(&knet->aggregate_strp_stats, + &strp_stats); list_for_each_entry_rcu(mux, &knet->mux_list, kcm_mux_list) { spin_lock_bh(&mux->lock); aggregate_mux_stats(&mux->stats, &mux_stats); aggregate_psock_stats(&mux->aggregate_psock_stats, &psock_stats); - list_for_each_entry(psock, &mux->psocks, psock_list) + aggregate_strp_stats(&mux->aggregate_strp_stats, + &strp_stats); + list_for_each_entry(psock, &mux->psocks, psock_list) { aggregate_psock_stats(&psock->stats, &psock_stats); + save_strp_stats(&psock->strp, &strp_stats); + } + spin_unlock_bh(&mux->lock); } @@ -328,7 +350,7 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v) mux_stats.rx_ready_drops); seq_printf(seq, - "%-8s %-10s %-16s %-10s %-16s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s\n", + "%-8s %-10s %-16s %-10s %-16s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s\n", "Psock", "RX-Msgs", "RX-Bytes", @@ -337,6 +359,8 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v) "Reserved", "Unreserved", "RX-Aborts", + "RX-Intr", + "RX-Unrecov", "RX-MemFail", "RX-NeedMor", "RX-BadLen", @@ -345,20 +369,22 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v) "TX-Aborts"); seq_printf(seq, - "%-8s %-10llu %-16llu %-10llu %-16llu %-10llu %-10llu %-10u %-10u %-10u %-10u %-10u %-10u %-10u\n", + "%-8s %-10llu %-16llu %-10llu %-16llu %-10llu %-10llu %-10u %-10u %-10u %-10u %-10u %-10u %-10u %-10u %-10u\n", "", - psock_stats.rx_msgs, - psock_stats.rx_bytes, + strp_stats.rx_msgs, + strp_stats.rx_bytes, psock_stats.tx_msgs, psock_stats.tx_bytes, psock_stats.reserved, psock_stats.unreserved, - psock_stats.rx_aborts, - psock_stats.rx_mem_fail, - psock_stats.rx_need_more_hdr, - psock_stats.rx_bad_hdr_len, - psock_stats.rx_msg_too_big, - psock_stats.rx_msg_timeouts, + strp_stats.rx_aborts, + strp_stats.rx_interrupted, + strp_stats.rx_unrecov_intr, + strp_stats.rx_mem_fail, + strp_stats.rx_need_more_hdr, + strp_stats.rx_bad_hdr_len, + strp_stats.rx_msg_too_big, + strp_stats.rx_msg_timeouts, psock_stats.tx_aborts); return 0; diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index cb39e05b166c..2632ac748371 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1,3 +1,13 @@ +/* + * Kernel Connection Multiplexor + * + * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + #include <linux/bpf.h> #include <linux/errno.h> #include <linux/errqueue.h> @@ -16,7 +26,6 @@ #include <net/kcm.h> #include <net/netns/generic.h> #include <net/sock.h> -#include <net/tcp.h> #include <uapi/linux/kcm.h> unsigned int kcm_net_id; @@ -35,38 +44,12 @@ static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb) return (struct kcm_tx_msg *)skb->cb; } -static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb) -{ - return (struct kcm_rx_msg *)((void *)skb->cb + - offsetof(struct qdisc_skb_cb, data)); -} - static void report_csk_error(struct sock *csk, int err) { csk->sk_err = EPIPE; csk->sk_error_report(csk); } -/* Callback lock held */ -static void kcm_abort_rx_psock(struct kcm_psock *psock, int err, - struct sk_buff *skb) -{ - struct sock *csk = psock->sk; - - /* Unrecoverable error in receive */ - - del_timer(&psock->rx_msg_timer); - - if (psock->rx_stopped) - return; - - psock->rx_stopped = 1; - KCM_STATS_INCR(psock->stats.rx_aborts); - - /* Report an error on the lower socket */ - report_csk_error(csk, err); -} - static void kcm_abort_tx_psock(struct kcm_psock *psock, int err, bool wakeup_kcm) { @@ -109,12 +92,13 @@ static void kcm_abort_tx_psock(struct kcm_psock *psock, int err, static void kcm_update_rx_mux_stats(struct kcm_mux *mux, struct kcm_psock *psock) { - KCM_STATS_ADD(mux->stats.rx_bytes, - psock->stats.rx_bytes - psock->saved_rx_bytes); + STRP_STATS_ADD(mux->stats.rx_bytes, + psock->strp.stats.rx_bytes - + psock->saved_rx_bytes); mux->stats.rx_msgs += - psock->stats.rx_msgs - psock->saved_rx_msgs; - psock->saved_rx_msgs = psock->stats.rx_msgs; - psock->saved_rx_bytes = psock->stats.rx_bytes; + psock->strp.stats.rx_msgs - psock->saved_rx_msgs; + psock->saved_rx_msgs = psock->strp.stats.rx_msgs; + psock->saved_rx_bytes = psock->strp.stats.rx_bytes; } static void kcm_update_tx_mux_stats(struct kcm_mux *mux, @@ -167,11 +151,11 @@ static void kcm_rcv_ready(struct kcm_sock *kcm) */ list_del(&psock->psock_ready_list); psock->ready_rx_msg = NULL; - /* Commit clearing of ready_rx_msg for queuing work */ smp_mb(); - queue_work(kcm_wq, &psock->rx_work); + strp_unpause(&psock->strp); + strp_check_rcv(&psock->strp); } /* Buffer limit is okay now, add to ready list */ @@ -285,6 +269,7 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock, if (list_empty(&mux->kcm_rx_waiters)) { psock->ready_rx_msg = head; + strp_pause(&psock->strp); list_add_tail(&psock->psock_ready_list, &mux->psocks_ready); spin_unlock_bh(&mux->rx_lock); @@ -353,346 +338,60 @@ static void unreserve_rx_kcm(struct kcm_psock *psock, spin_unlock_bh(&mux->rx_lock); } -static void kcm_start_rx_timer(struct kcm_psock *psock) -{ - if (psock->sk->sk_rcvtimeo) - mod_timer(&psock->rx_msg_timer, psock->sk->sk_rcvtimeo); -} - -/* Macro to invoke filter function. */ -#define KCM_RUN_FILTER(prog, ctx) \ - (*prog->bpf_func)(ctx, prog->insnsi) - -/* Lower socket lock held */ -static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, - unsigned int orig_offset, size_t orig_len) -{ - struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data; - struct kcm_rx_msg *rxm; - struct kcm_sock *kcm; - struct sk_buff *head, *skb; - size_t eaten = 0, cand_len; - ssize_t extra; - int err; - bool cloned_orig = false; - - if (psock->ready_rx_msg) - return 0; - - head = psock->rx_skb_head; - if (head) { - /* Message already in progress */ - - rxm = kcm_rx_msg(head); - if (unlikely(rxm->early_eaten)) { - /* Already some number of bytes on the receive sock - * data saved in rx_skb_head, just indicate they - * are consumed. - */ - eaten = orig_len <= rxm->early_eaten ? - orig_len : rxm->early_eaten; - rxm->early_eaten -= eaten; - - return eaten; - } - - if (unlikely(orig_offset)) { - /* Getting data with a non-zero offset when a message is - * in progress is not expected. If it does happen, we - * need to clone and pull since we can't deal with - * offsets in the skbs for a message expect in the head. - */ - orig_skb = skb_clone(orig_skb, GFP_ATOMIC); - if (!orig_skb) { - KCM_STATS_INCR(psock->stats.rx_mem_fail); - desc->error = -ENOMEM; - return 0; - } - if (!pskb_pull(orig_skb, orig_offset)) { - KCM_STATS_INCR(psock->stats.rx_mem_fail); - kfree_skb(orig_skb); - desc->error = -ENOMEM; - return 0; - } - cloned_orig = true; - orig_offset = 0; - } - - if (!psock->rx_skb_nextp) { - /* We are going to append to the frags_list of head. - * Need to unshare the frag_list. - */ - err = skb_unclone(head, GFP_ATOMIC); - if (err) { - KCM_STATS_INCR(psock->stats.rx_mem_fail); - desc->error = err; - return 0; - } - - if (unlikely(skb_shinfo(head)->frag_list)) { - /* We can't append to an sk_buff that already - * has a frag_list. We create a new head, point - * the frag_list of that to the old head, and - * then are able to use the old head->next for - * appending to the message. - */ - if (WARN_ON(head->next)) { - desc->error = -EINVAL; - return 0; - } - - skb = alloc_skb(0, GFP_ATOMIC); - if (!skb) { - KCM_STATS_INCR(psock->stats.rx_mem_fail); - desc->error = -ENOMEM; - return 0; - } - skb->len = head->len; - skb->data_len = head->len; - skb->truesize = head->truesize; - *kcm_rx_msg(skb) = *kcm_rx_msg(head); - psock->rx_skb_nextp = &head->next; - skb_shinfo(skb)->frag_list = head; - psock->rx_skb_head = skb; - head = skb; - } else { - psock->rx_skb_nextp = - &skb_shinfo(head)->frag_list; - } - } - } - - while (eaten < orig_len) { - /* Always clone since we will consume something */ - skb = skb_clone(orig_skb, GFP_ATOMIC); - if (!skb) { - KCM_STATS_INCR(psock->stats.rx_mem_fail); - desc->error = -ENOMEM; - break; - } - - cand_len = orig_len - eaten; - - head = psock->rx_skb_head; - if (!head) { - head = skb; - psock->rx_skb_head = head; - /* Will set rx_skb_nextp on next packet if needed */ - psock->rx_skb_nextp = NULL; - rxm = kcm_rx_msg(head); - memset(rxm, 0, sizeof(*rxm)); - rxm->offset = orig_offset + eaten; - } else { - /* Unclone since we may be appending to an skb that we - * already share a frag_list with. - */ - err = skb_unclone(skb, GFP_ATOMIC); - if (err) { - KCM_STATS_INCR(psock->stats.rx_mem_fail); - desc->error = err; - break; - } - - rxm = kcm_rx_msg(head); - *psock->rx_skb_nextp = skb; - psock->rx_skb_nextp = &skb->next; - head->data_len += skb->len; - head->len += skb->len; - head->truesize += skb->truesize; - } - - if (!rxm->full_len) { - ssize_t len; - - len = KCM_RUN_FILTER(psock->bpf_prog, head); - - if (!len) { - /* Need more header to determine length */ - if (!rxm->accum_len) { - /* Start RX timer for new message */ - kcm_start_rx_timer(psock); - } - rxm->accum_len += cand_len; - eaten += cand_len; - KCM_STATS_INCR(psock->stats.rx_need_more_hdr); - WARN_ON(eaten != orig_len); - break; - } else if (len > psock->sk->sk_rcvbuf) { - /* Message length exceeds maximum allowed */ - KCM_STATS_INCR(psock->stats.rx_msg_too_big); - desc->error = -EMSGSIZE; - psock->rx_skb_head = NULL; - kcm_abort_rx_psock(psock, EMSGSIZE, head); - break; - } else if (len <= (ssize_t)head->len - - skb->len - rxm->offset) { - /* Length must be into new skb (and also - * greater than zero) - */ - KCM_STATS_INCR(psock->stats.rx_bad_hdr_len); - desc->error = -EPROTO; - psock->rx_skb_head = NULL; - kcm_abort_rx_psock(psock, EPROTO, head); - break; - } - - rxm->full_len = len; - } - - extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len; - - if (extra < 0) { - /* Message not complete yet. */ - if (rxm->full_len - rxm->accum_len > - tcp_inq(psock->sk)) { - /* Don't have the whole messages in the socket - * buffer. Set psock->rx_need_bytes to wait for - * the rest of the message. Also, set "early - * eaten" since we've already buffered the skb - * but don't consume yet per tcp_read_sock. - */ - - if (!rxm->accum_len) { - /* Start RX timer for new message */ - kcm_start_rx_timer(psock); - } - - psock->rx_need_bytes = rxm->full_len - - rxm->accum_len; - rxm->accum_len += cand_len; - rxm->early_eaten = cand_len; - KCM_STATS_ADD(psock->stats.rx_bytes, cand_len); - desc->count = 0; /* Stop reading socket */ - break; - } - rxm->accum_len += cand_len; - eaten += cand_len; - WARN_ON(eaten != orig_len); - break; - } - - /* Positive extra indicates ore bytes than needed for the - * message - */ - - WARN_ON(extra > cand_len); - - eaten += (cand_len - extra); - - /* Hurray, we have a new message! */ - del_timer(&psock->rx_msg_timer); - psock->rx_skb_head = NULL; - KCM_STATS_INCR(psock->stats.rx_msgs); - -try_queue: - kcm = reserve_rx_kcm(psock, head); - if (!kcm) { - /* Unable to reserve a KCM, message is held in psock. */ - break; - } - - if (kcm_queue_rcv_skb(&kcm->sk, head)) { - /* Should mean socket buffer full */ - unreserve_rx_kcm(psock, false); - goto try_queue; - } - } - - if (cloned_orig) - kfree_skb(orig_skb); - - KCM_STATS_ADD(psock->stats.rx_bytes, eaten); - - return eaten; -} - -/* Called with lock held on lower socket */ -static int psock_tcp_read_sock(struct kcm_psock *psock) -{ - read_descriptor_t desc; - - desc.arg.data = psock; - desc.error = 0; - desc.count = 1; /* give more than one skb per call */ - - /* sk should be locked here, so okay to do tcp_read_sock */ - tcp_read_sock(psock->sk, &desc, kcm_tcp_recv); - - unreserve_rx_kcm(psock, true); - - return desc.error; -} - /* Lower sock lock held */ -static void psock_tcp_data_ready(struct sock *sk) +static void psock_data_ready(struct sock *sk) { struct kcm_psock *psock; read_lock_bh(&sk->sk_callback_lock); psock = (struct kcm_psock *)sk->sk_user_data; - if (unlikely(!psock || psock->rx_stopped)) - goto out; + if (likely(psock)) + strp_data_ready(&psock->strp); - if (psock->ready_rx_msg) - goto out; - - if (psock->rx_need_bytes) { - if (tcp_inq(sk) >= psock->rx_need_bytes) - psock->rx_need_bytes = 0; - else - goto out; - } - - if (psock_tcp_read_sock(psock) == -ENOMEM) - queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0); - -out: read_unlock_bh(&sk->sk_callback_lock); } -static void do_psock_rx_work(struct kcm_psock *psock) +/* Called with lower sock held */ +static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb) { - read_descriptor_t rd_desc; - struct sock *csk = psock->sk; - - /* We need the read lock to synchronize with psock_tcp_data_ready. We - * need the socket lock for calling tcp_read_sock. - */ - lock_sock(csk); - read_lock_bh(&csk->sk_callback_lock); - - if (unlikely(csk->sk_user_data != psock)) - goto out; - - if (unlikely(psock->rx_stopped)) - goto out; - - if (psock->ready_rx_msg) - goto out; - - rd_desc.arg.data = psock; + struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp); + struct kcm_sock *kcm; - if (psock_tcp_read_sock(psock) == -ENOMEM) - queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0); +try_queue: + kcm = reserve_rx_kcm(psock, skb); + if (!kcm) { + /* Unable to reserve a KCM, message is held in psock and strp + * is paused. + */ + return; + } -out: - read_unlock_bh(&csk->sk_callback_lock); - release_sock(csk); + if (kcm_queue_rcv_skb(&kcm->sk, skb)) { + /* Should mean socket buffer full */ + unreserve_rx_kcm(psock, false); + goto try_queue; + } } -static void psock_rx_work(struct work_struct *w) +static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb) { - do_psock_rx_work(container_of(w, struct kcm_psock, rx_work)); + struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp); + struct bpf_prog *prog = psock->bpf_prog; + + return (*prog->bpf_func)(skb, prog->insnsi); } -static void psock_rx_delayed_work(struct work_struct *w) +static int kcm_read_sock_done(struct strparser *strp, int err) { - do_psock_rx_work(container_of(w, struct kcm_psock, - rx_delayed_work.work)); + struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp); + + unreserve_rx_kcm(psock, true); + + return err; } -static void psock_tcp_state_change(struct sock *sk) +static void psock_state_change(struct sock *sk) { /* TCP only does a POLLIN for a half close. Do a POLLHUP here * since application will normally not poll with POLLIN @@ -702,7 +401,7 @@ static void psock_tcp_state_change(struct sock *sk) report_csk_error(sk, EPIPE); } -static void psock_tcp_write_space(struct sock *sk) +static void psock_write_space(struct sock *sk) { struct kcm_psock *psock; struct kcm_mux *mux; @@ -713,14 +412,13 @@ static void psock_tcp_write_space(struct sock *sk) psock = (struct kcm_psock *)sk->sk_user_data; if (unlikely(!psock)) goto out; - mux = psock->mux; spin_lock_bh(&mux->lock); /* Check if the socket is reserved so someone is waiting for sending. */ kcm = psock->tx_kcm; - if (kcm) + if (kcm && !unlikely(kcm->tx_stopped)) queue_work(kcm_wq, &kcm->tx_work); spin_unlock_bh(&mux->lock); @@ -1411,7 +1109,7 @@ static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, struct kcm_sock *kcm = kcm_sk(sk); int err = 0; long timeo; - struct kcm_rx_msg *rxm; + struct strp_rx_msg *rxm; int copied = 0; struct sk_buff *skb; @@ -1425,7 +1123,7 @@ static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, /* Okay, have a message on the receive queue */ - rxm = kcm_rx_msg(skb); + rxm = strp_rx_msg(skb); if (len > rxm->full_len) len = rxm->full_len; @@ -1481,7 +1179,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, struct sock *sk = sock->sk; struct kcm_sock *kcm = kcm_sk(sk); long timeo; - struct kcm_rx_msg *rxm; + struct strp_rx_msg *rxm; int err = 0; ssize_t copied; struct sk_buff *skb; @@ -1498,7 +1196,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, /* Okay, have a message on the receive queue */ - rxm = kcm_rx_msg(skb); + rxm = strp_rx_msg(skb); if (len > rxm->full_len) len = rxm->full_len; @@ -1674,15 +1372,6 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) spin_unlock_bh(&mux->rx_lock); } -static void kcm_rx_msg_timeout(unsigned long arg) -{ - struct kcm_psock *psock = (struct kcm_psock *)arg; - - /* Message assembly timed out */ - KCM_STATS_INCR(psock->stats.rx_msg_timeouts); - kcm_abort_rx_psock(psock, ETIMEDOUT, NULL); -} - static int kcm_attach(struct socket *sock, struct socket *csock, struct bpf_prog *prog) { @@ -1692,19 +1381,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock, struct kcm_psock *psock = NULL, *tpsock; struct list_head *head; int index = 0; - - if (csock->ops->family != PF_INET && - csock->ops->family != PF_INET6) - return -EINVAL; + struct strp_callbacks cb; + int err; csk = csock->sk; if (!csk) return -EINVAL; - /* Only support TCP for now */ - if (csk->sk_protocol != IPPROTO_TCP) - return -EINVAL; - psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); if (!psock) return -ENOMEM; @@ -1713,11 +1396,16 @@ static int kcm_attach(struct socket *sock, struct socket *csock, psock->sk = csk; psock->bpf_prog = prog; - setup_timer(&psock->rx_msg_timer, kcm_rx_msg_timeout, - (unsigned long)psock); + cb.rcv_msg = kcm_rcv_strparser; + cb.abort_parser = NULL; + cb.parse_msg = kcm_parse_func_strparser; + cb.read_sock_done = kcm_read_sock_done; - INIT_WORK(&psock->rx_work, psock_rx_work); - INIT_DELAYED_WORK(&psock->rx_delayed_work, psock_rx_delayed_work); + err = strp_init(&psock->strp, csk, &cb); + if (err) { + kmem_cache_free(kcm_psockp, psock); + return err; + } sock_hold(csk); @@ -1726,9 +1414,9 @@ static int kcm_attach(struct socket *sock, struct socket *csock, psock->save_write_space = csk->sk_write_space; psock->save_state_change = csk->sk_state_change; csk->sk_user_data = psock; - csk->sk_data_ready = psock_tcp_data_ready; - csk->sk_write_space = psock_tcp_write_space; - csk->sk_state_change = psock_tcp_state_change; + csk->sk_data_ready = psock_data_ready; + csk->sk_write_space = psock_write_space; + csk->sk_state_change = psock_state_change; write_unlock_bh(&csk->sk_callback_lock); /* Finished initialization, now add the psock to the MUX. */ @@ -1750,7 +1438,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock, spin_unlock_bh(&mux->lock); /* Schedule RX work in case there are already bytes queued */ - queue_work(kcm_wq, &psock->rx_work); + strp_check_rcv(&psock->strp); return 0; } @@ -1790,6 +1478,8 @@ static void kcm_unattach(struct kcm_psock *psock) struct sock *csk = psock->sk; struct kcm_mux *mux = psock->mux; + lock_sock(csk); + /* Stop getting callbacks from TCP socket. After this there should * be no way to reserve a kcm for this psock. */ @@ -1798,7 +1488,7 @@ static void kcm_unattach(struct kcm_psock *psock) csk->sk_data_ready = psock->save_data_ready; csk->sk_write_space = psock->save_write_space; csk->sk_state_change = psock->save_state_change; - psock->rx_stopped = 1; + strp_stop(&psock->strp); if (WARN_ON(psock->rx_kcm)) { write_unlock_bh(&csk->sk_callback_lock); @@ -1821,18 +1511,17 @@ static void kcm_unattach(struct kcm_psock *psock) write_unlock_bh(&csk->sk_callback_lock); - del_timer_sync(&psock->rx_msg_timer); - cancel_work_sync(&psock->rx_work); - cancel_delayed_work_sync(&psock->rx_delayed_work); + /* Call strp_done without sock lock */ + release_sock(csk); + strp_done(&psock->strp); + lock_sock(csk); bpf_prog_put(psock->bpf_prog); - kfree_skb(psock->rx_skb_head); - psock->rx_skb_head = NULL; - spin_lock_bh(&mux->lock); aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats); + save_strp_stats(&psock->strp, &mux->aggregate_strp_stats); KCM_STATS_INCR(mux->stats.psock_unattach); @@ -1875,6 +1564,8 @@ no_reserved: fput(csk->sk_socket->file); kmem_cache_free(kcm_psockp, psock); } + + release_sock(csk); } static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info) @@ -1915,6 +1606,7 @@ static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info) spin_unlock_bh(&mux->lock); + /* Lower socket lock should already be held */ kcm_unattach(psock); err = 0; @@ -2072,6 +1764,8 @@ static void release_mux(struct kcm_mux *mux) aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats); aggregate_psock_stats(&mux->aggregate_psock_stats, &knet->aggregate_psock_stats); + aggregate_strp_stats(&mux->aggregate_strp_stats, + &knet->aggregate_strp_stats); list_del_rcu(&mux->kcm_mux_list); knet->count--; mutex_unlock(&knet->mutex); @@ -2151,6 +1845,13 @@ static int kcm_release(struct socket *sock) * it will just return. */ __skb_queue_purge(&sk->sk_write_queue); + + /* Set tx_stopped. This is checked when psock is bound to a kcm and we + * get a writespace callback. This prevents further work being queued + * from the callback (unbinding the psock occurs after canceling work. + */ + kcm->tx_stopped = 1; + release_sock(sk); spin_lock_bh(&mux->lock); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 232cb92033e8..34eff77982cf 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -177,7 +177,7 @@ static int pppol2tp_recv_payload_hook(struct sk_buff *skb) if (!pskb_may_pull(skb, 2)) return 1; - if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03)) + if ((skb->data[0] == PPP_ALLSTATIONS) && (skb->data[1] == PPP_UI)) skb_pull(skb, 2); return 0; @@ -282,7 +282,6 @@ static void pppol2tp_session_sock_put(struct l2tp_session *session) static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { - static const unsigned char ppph[2] = { 0xff, 0x03 }; struct sock *sk = sock->sk; struct sk_buff *skb; int error; @@ -312,7 +311,7 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m, error = -ENOMEM; skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len + - sizeof(ppph) + total_len, + 2 + total_len, /* 2 bytes for PPP_ALLSTATIONS & PPP_UI */ 0, GFP_KERNEL); if (!skb) goto error_put_sess_tun; @@ -325,8 +324,8 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m, skb_reserve(skb, uhlen); /* Add PPP header */ - skb->data[0] = ppph[0]; - skb->data[1] = ppph[1]; + skb->data[0] = PPP_ALLSTATIONS; + skb->data[1] = PPP_UI; skb_put(skb, 2); /* Copy user data into skb */ @@ -369,7 +368,6 @@ error: */ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) { - static const u8 ppph[2] = { 0xff, 0x03 }; struct sock *sk = (struct sock *) chan->private; struct sock *sk_tun; struct l2tp_session *session; @@ -398,14 +396,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) sizeof(struct iphdr) + /* IP header */ uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */ session->hdr_len + /* L2TP header */ - sizeof(ppph); /* PPP header */ + 2; /* 2 bytes for PPP_ALLSTATIONS & PPP_UI */ if (skb_cow_head(skb, headroom)) goto abort_put_sess_tun; /* Setup PPP header */ - __skb_push(skb, sizeof(ppph)); - skb->data[0] = ppph[0]; - skb->data[1] = ppph[1]; + __skb_push(skb, 2); + skb->data[0] = PPP_ALLSTATIONS; + skb->data[1] = PPP_UI; local_bh_disable(); l2tp_xmit_skb(session, skb, session->hdr_len); @@ -440,7 +438,7 @@ static void pppol2tp_session_close(struct l2tp_session *session) BUG_ON(session->magic != L2TP_SESSION_MAGIC); if (sock) { - inet_shutdown(sock, 2); + inet_shutdown(sock, SEND_SHUTDOWN); /* Don't let the session go away before our socket does */ l2tp_session_inc_refcount(session); } diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index ba5fc1f01e53..42a41ae405ba 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -1088,13 +1088,13 @@ static inline void drv_leave_ibss(struct ieee80211_local *local, } static inline u32 drv_get_expected_throughput(struct ieee80211_local *local, - struct ieee80211_sta *sta) + struct sta_info *sta) { u32 ret = 0; - trace_drv_get_expected_throughput(sta); - if (local->ops->get_expected_throughput) - ret = local->ops->get_expected_throughput(&local->hw, sta); + trace_drv_get_expected_throughput(&sta->sta); + if (local->ops->get_expected_throughput && sta->uploaded) + ret = local->ops->get_expected_throughput(&local->hw, &sta->sta); trace_drv_return_u32(local, ret); return ret; diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 8f9c3bde835f..fa7d37cf0351 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -326,22 +326,33 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, u32 tx_time, estimated_retx; u64 result; - if (sta->mesh->fail_avg >= 100) - return MAX_METRIC; + /* Try to get rate based on HW/SW RC algorithm. + * Rate is returned in units of Kbps, correct this + * to comply with airtime calculation units + * Round up in case we get rate < 100Kbps + */ + rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100); - sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo); - rate = cfg80211_calculate_bitrate(&rinfo); - if (WARN_ON(!rate)) - return MAX_METRIC; + if (rate) { + err = 0; + } else { + if (sta->mesh->fail_avg >= 100) + return MAX_METRIC; - err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100; + sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo); + rate = cfg80211_calculate_bitrate(&rinfo); + if (WARN_ON(!rate)) + return MAX_METRIC; + + err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100; + } /* bitrate is in units of 100 Kbps, while we need rate in units of * 1Mbps. This will be corrected on tx_time computation. */ tx_time = (device_constant + 10 * test_frame_len / rate); estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); - result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; + result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT); return (u32)result; } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 76b737dcc36f..19f14c907d74 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2279,11 +2279,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); - /* check if the driver has a SW RC implementation */ - if (ref && ref->ops->get_expected_throughput) - thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); - else - thr = drv_get_expected_throughput(local, &sta->sta); + thr = sta_get_expected_throughput(sta); if (thr != 0) { sinfo->filled |= BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT); @@ -2291,6 +2287,25 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) } } +u32 sta_get_expected_throughput(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct rate_control_ref *ref = NULL; + u32 thr = 0; + + if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + ref = local->rate_ctrl; + + /* check if the driver has a SW RC implementation */ + if (ref && ref->ops->get_expected_throughput) + thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); + else + thr = drv_get_expected_throughput(local, sta); + + return thr; +} + unsigned long ieee80211_sta_last_active(struct sta_info *sta) { struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 78b0ef32dddd..0556be3e3628 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -712,6 +712,8 @@ void sta_set_rate_info_tx(struct sta_info *sta, struct rate_info *rinfo); void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo); +u32 sta_get_expected_throughput(struct sta_info *sta); + void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, unsigned long exp_time); u8 sta_info_tx_streams(struct sta_info *sta); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 502396694f47..1d0746dfea57 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2334,7 +2334,6 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; const u8 *encaps_data; int encaps_len, skip_header_bytes; - int nh_pos, h_pos; bool wme_sta = false, authorized = false; bool tdls_peer; bool multicast; @@ -2640,13 +2639,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, encaps_len = 0; } - nh_pos = skb_network_header(skb) - skb->data; - h_pos = skb_transport_header(skb) - skb->data; - skb_pull(skb, skip_header_bytes); - nh_pos -= skip_header_bytes; - h_pos -= skip_header_bytes; - head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); /* @@ -2672,18 +2665,12 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, } } - if (encaps_data) { + if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); - nh_pos += encaps_len; - h_pos += encaps_len; - } #ifdef CONFIG_MAC80211_MESH - if (meshhdrlen > 0) { + if (meshhdrlen > 0) memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); - nh_pos += meshhdrlen; - h_pos += meshhdrlen; - } #endif if (ieee80211_is_data_qos(fc)) { @@ -2699,15 +2686,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, } else memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); - nh_pos += hdrlen; - h_pos += hdrlen; - - /* Update skb pointers to various headers since this modified frame - * is going to go through Linux networking code that may potentially - * need things like pointer to IP header. */ skb_reset_mac_header(skb); - skb_set_network_header(skb, nh_pos); - skb_set_transport_header(skb, h_pos); info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); @@ -4390,9 +4369,6 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, int ac = ieee802_1d_to_ac[tid & 7]; skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - skb_set_queue_mapping(skb, ac); skb->priority = tid; diff --git a/net/netlink/diag.c b/net/netlink/diag.c index 8dd836a8dd60..3e3e2534478a 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -63,43 +63,75 @@ out_nlmsg_trim: static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, int protocol, int s_num) { + struct rhashtable_iter *hti = (void *)cb->args[2]; struct netlink_table *tbl = &nl_table[protocol]; - struct rhashtable *ht = &tbl->hash; - const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht); struct net *net = sock_net(skb->sk); struct netlink_diag_req *req; struct netlink_sock *nlsk; struct sock *sk; - int ret = 0, num = 0, i; + int num = 2; + int ret = 0; req = nlmsg_data(cb->nlh); - for (i = 0; i < htbl->size; i++) { - struct rhash_head *pos; + if (s_num > 1) + goto mc_list; - rht_for_each_entry_rcu(nlsk, pos, htbl, i, node) { - sk = (struct sock *)nlsk; + num--; - if (!net_eq(sock_net(sk), net)) - continue; - if (num < s_num) { - num++; + if (!hti) { + hti = kmalloc(sizeof(*hti), GFP_KERNEL); + if (!hti) + return -ENOMEM; + + cb->args[2] = (long)hti; + } + + if (!s_num) + rhashtable_walk_enter(&tbl->hash, hti); + + ret = rhashtable_walk_start(hti); + if (ret == -EAGAIN) + ret = 0; + if (ret) + goto stop; + + while ((nlsk = rhashtable_walk_next(hti))) { + if (IS_ERR(nlsk)) { + ret = PTR_ERR(nlsk); + if (ret == -EAGAIN) { + ret = 0; continue; } + break; + } - if (sk_diag_fill(sk, skb, req, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI, - sock_i_ino(sk)) < 0) { - ret = 1; - goto done; - } + sk = (struct sock *)nlsk; - num++; + if (!net_eq(sock_net(sk), net)) + continue; + + if (sk_diag_fill(sk, skb, req, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, + sock_i_ino(sk)) < 0) { + ret = 1; + break; } } +stop: + rhashtable_walk_stop(hti); + if (ret) + goto done; + + rhashtable_walk_exit(hti); + cb->args[2] = 0; + num++; + +mc_list: + read_lock(&nl_table_lock); sk_for_each_bound(sk, &tbl->mc_list) { if (sk_hashed(sk)) continue; @@ -116,13 +148,14 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, NLM_F_MULTI, sock_i_ino(sk)) < 0) { ret = 1; - goto done; + break; } num++; } + read_unlock(&nl_table_lock); + done: cb->args[0] = num; - cb->args[1] = protocol; return ret; } @@ -131,20 +164,20 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct netlink_diag_req *req; int s_num = cb->args[0]; + int err = 0; req = nlmsg_data(cb->nlh); - rcu_read_lock(); - read_lock(&nl_table_lock); - if (req->sdiag_protocol == NDIAG_PROTO_ALL) { int i; for (i = cb->args[1]; i < MAX_LINKS; i++) { - if (__netlink_diag_dump(skb, cb, i, s_num)) + err = __netlink_diag_dump(skb, cb, i, s_num); + if (err) break; s_num = 0; } + cb->args[1] = i; } else { if (req->sdiag_protocol >= MAX_LINKS) { read_unlock(&nl_table_lock); @@ -152,13 +185,22 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) return -ENOENT; } - __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); + err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); } - read_unlock(&nl_table_lock); - rcu_read_unlock(); + return err < 0 ? err : skb->len; +} + +static int netlink_diag_dump_done(struct netlink_callback *cb) +{ + struct rhashtable_iter *hti = (void *)cb->args[2]; + + if (cb->args[0] == 1) + rhashtable_walk_exit(hti); - return skb->len; + kfree(hti); + + return 0; } static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) @@ -172,6 +214,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) if (h->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = netlink_diag_dump, + .done = netlink_diag_dump_done, }; return netlink_dump_start(net->diag_nlsk, skb, h, &c); } else diff --git a/net/rds/ib.h b/net/rds/ib.h index 046f7508c06b..45ac8e8e58f4 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -333,6 +333,7 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp); void rds_ib_state_change(struct sock *sk); int rds_ib_listen_init(void); void rds_ib_listen_stop(void); +__printf(2, 3) void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); diff --git a/net/rds/rds.h b/net/rds/rds.h index b2d17f0fafa8..fd0bccb2f9f9 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -688,6 +688,7 @@ void __rds_conn_error(struct rds_connection *conn, const char *, ...); #define rds_conn_error(conn, fmt...) \ __rds_conn_error(conn, KERN_WARNING "RDS: " fmt) +__printf(2, 3) void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...); #define rds_conn_path_error(cp, fmt...) \ __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 88effadd4b16..c7cf356b42b8 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -22,6 +22,7 @@ #include <net/net_namespace.h> #include <net/sock.h> #include <net/af_rxrpc.h> +#define CREATE_TRACE_POINTS #include "ar-internal.h" MODULE_DESCRIPTION("RxRPC network protocol"); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index ff83fb1ddd47..c761124961cc 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -255,6 +255,9 @@ enum rxrpc_conn_flag { RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */ RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */ RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */ + RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */ + RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ + RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */ }; /* @@ -265,6 +268,17 @@ enum rxrpc_conn_event { }; /* + * The connection cache state. + */ +enum rxrpc_conn_cache_state { + RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */ + RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */ + RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */ + RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */ + RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */ +}; + +/* * The connection protocol state. */ enum rxrpc_conn_proto_state { @@ -276,6 +290,7 @@ enum rxrpc_conn_proto_state { RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */ RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */ RXRPC_CONN_NETWORK_ERROR, /* Conn terminated by network error */ + RXRPC_CONN_LOCAL_ERROR, /* Conn terminated by local error */ RXRPC_CONN__NR_STATES }; @@ -288,23 +303,33 @@ struct rxrpc_connection { struct rxrpc_conn_proto proto; struct rxrpc_conn_parameters params; - spinlock_t channel_lock; + atomic_t usage; + struct rcu_head rcu; + struct list_head cache_link; + spinlock_t channel_lock; + unsigned char active_chans; /* Mask of active channels */ +#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1) + struct list_head waiting_calls; /* Calls waiting for channels */ struct rxrpc_channel { struct rxrpc_call __rcu *call; /* Active call */ u32 call_id; /* ID of current call */ u32 call_counter; /* Call ID counter */ u32 last_call; /* ID of last call */ - u32 last_result; /* Result of last call (0/abort) */ + u8 last_type; /* Type of last packet */ + u16 last_service_id; + union { + u32 last_seq; + u32 last_abort; + }; } channels[RXRPC_MAXCALLS]; - wait_queue_head_t channel_wq; /* queue to wait for channel to become available */ - struct rcu_head rcu; struct work_struct processor; /* connection event processor */ union { struct rb_node client_node; /* Node in local->client_conns */ struct rb_node service_node; /* Node in peer->service_conns */ }; + struct list_head proc_link; /* link in procfs list */ struct list_head link; /* link in master connection list */ struct sk_buff_head rx_queue; /* received conn-level packets */ const struct rxrpc_security *security; /* applied security module */ @@ -313,17 +338,16 @@ struct rxrpc_connection { struct rxrpc_crypt csum_iv; /* packet checksum base */ unsigned long flags; unsigned long events; - unsigned long put_time; /* Time at which last put */ + unsigned long idle_timestamp; /* Time at which last became idle */ spinlock_t state_lock; /* state-change lock */ - atomic_t usage; + enum rxrpc_conn_cache_state cache_state : 8; enum rxrpc_conn_proto_state state : 8; /* current state of connection */ u32 local_abort; /* local abort code */ u32 remote_abort; /* remote abort code */ int error; /* local error incurred */ int debug_id; /* debug ID for printks */ atomic_t serial; /* packet serial number counter */ - atomic_t hi_serial; /* highest serial number received */ - atomic_t avail_chans; /* number of channels available */ + unsigned int hi_serial; /* highest serial number received */ u8 size_align; /* data size alignment (for security) */ u8 header_size; /* rxrpc + security header size */ u8 security_size; /* security header size */ @@ -341,10 +365,11 @@ enum rxrpc_call_flag { RXRPC_CALL_RCVD_LAST, /* all packets received */ RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */ RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */ - RXRPC_CALL_PROC_BUSY, /* the processor is busy */ RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */ RXRPC_CALL_HAS_USERID, /* has a user ID attached */ RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */ + RXRPC_CALL_IS_SERVICE, /* Call is service call */ + RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ }; /* @@ -402,6 +427,7 @@ enum rxrpc_call_state { struct rxrpc_call { struct rcu_head rcu; struct rxrpc_connection *conn; /* connection carrying call */ + struct rxrpc_peer *peer; /* Peer record for remote address */ struct rxrpc_sock *socket; /* socket responsible */ struct timer_list lifetimer; /* lifetime remaining on call */ struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */ @@ -410,13 +436,14 @@ struct rxrpc_call { struct work_struct destroyer; /* call destroyer */ struct work_struct processor; /* packet processor and ACK generator */ struct list_head link; /* link in master call list */ + struct list_head chan_wait_link; /* Link in conn->waiting_calls */ struct hlist_node error_link; /* link in error distribution list */ struct list_head accept_link; /* calls awaiting acceptance */ struct rb_node sock_node; /* node in socket call tree */ struct sk_buff_head rx_queue; /* received packets */ struct sk_buff_head rx_oos_queue; /* packets received out of sequence */ struct sk_buff *tx_pending; /* Tx socket buffer being filled */ - wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */ + wait_queue_head_t waitq; /* Wait queue for channel or Tx */ __be32 crypto_buf[2]; /* Temporary packet crypto buffer */ unsigned long user_call_ID; /* user-defined call ID */ unsigned long creation_jif; /* time of call creation */ @@ -432,8 +459,10 @@ struct rxrpc_call { int error_report; /* Network error (ICMP/local transport) */ int error; /* Local error incurred */ enum rxrpc_call_state state : 8; /* current state of call */ + u16 service_id; /* service ID */ + u32 call_id; /* call ID on connection */ + u32 cid; /* connection ID plus channel index */ int debug_id; /* debug ID for printks */ - u8 channel; /* connection channel occupied by this call */ /* transmission-phase ACK management */ u8 acks_head; /* offset into window of first entry */ @@ -455,19 +484,13 @@ struct rxrpc_call { rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */ rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ u8 ackr_reason; /* reason to ACK */ + u16 ackr_skew; /* skew on packet being ACK'd */ rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ atomic_t ackr_not_idle; /* number of packets in Rx queue */ /* received packet records, 1 bit per record */ #define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG) unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1]; - - u8 in_clientflag; /* Copy of conn->in_clientflag */ - struct rxrpc_local *local; /* Local endpoint. */ - u32 call_id; /* call ID on connection */ - u32 cid; /* connection ID plus channel index */ - u32 epoch; /* epoch of this connection */ - u16 service_id; /* service ID */ }; /* @@ -484,6 +507,8 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) write_unlock_bh(&call->state_lock); } +#include <trace/events/rxrpc.h> + /* * af_rxrpc.c */ @@ -502,8 +527,8 @@ int rxrpc_reject_call(struct rxrpc_sock *); /* * call_event.c */ -void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool); -void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool); +void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool); +void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool); void rxrpc_process_call(struct work_struct *); /* @@ -528,15 +553,32 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *); void __rxrpc_put_call(struct rxrpc_call *); void __exit rxrpc_destroy_all_calls(void); +static inline bool rxrpc_is_service_call(const struct rxrpc_call *call) +{ + return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags); +} + +static inline bool rxrpc_is_client_call(const struct rxrpc_call *call) +{ + return !rxrpc_is_service_call(call); +} + /* * conn_client.c */ +extern unsigned int rxrpc_max_client_connections; +extern unsigned int rxrpc_reap_client_connections; +extern unsigned int rxrpc_conn_idle_client_expiry; +extern unsigned int rxrpc_conn_idle_client_fast_expiry; extern struct idr rxrpc_client_conn_ids; void rxrpc_destroy_client_conn_ids(void); int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, gfp_t); -void rxrpc_unpublish_client_conn(struct rxrpc_connection *); +void rxrpc_expose_client_call(struct rxrpc_call *); +void rxrpc_disconnect_client_call(struct rxrpc_call *); +void rxrpc_put_client_conn(struct rxrpc_connection *); +void __exit rxrpc_destroy_all_client_connections(void); /* * conn_event.c @@ -550,15 +592,17 @@ void rxrpc_reject_packets(struct rxrpc_local *); */ extern unsigned int rxrpc_connection_expiry; extern struct list_head rxrpc_connections; +extern struct list_head rxrpc_connection_proc_list; extern rwlock_t rxrpc_connection_lock; int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *); struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, struct sk_buff *); -void __rxrpc_disconnect_call(struct rxrpc_call *); +void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *); void rxrpc_disconnect_call(struct rxrpc_call *); -void rxrpc_put_connection(struct rxrpc_connection *); +void rxrpc_kill_connection(struct rxrpc_connection *); +void __rxrpc_put_connection(struct rxrpc_connection *); void __exit rxrpc_destroy_all_connections(void); static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) @@ -582,6 +626,21 @@ struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *con return atomic_inc_not_zero(&conn->usage) ? conn : NULL; } +static inline void rxrpc_put_connection(struct rxrpc_connection *conn) +{ + if (!conn) + return; + + if (rxrpc_conn_is_client(conn)) { + if (atomic_dec_and_test(&conn->usage)) + rxrpc_put_client_conn(conn); + } else { + if (atomic_dec_return(&conn->usage) == 1) + __rxrpc_put_connection(conn); + } +} + + static inline bool rxrpc_queue_conn(struct rxrpc_connection *conn) { if (!rxrpc_get_connection_maybe(conn)) @@ -697,9 +756,10 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, struct sockaddr_rxrpc *, gfp_t); struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); -static inline void rxrpc_get_peer(struct rxrpc_peer *peer) +static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) { atomic_inc(&peer->usage); + return peer; } static inline @@ -747,6 +807,11 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *); * skbuff.c */ void rxrpc_packet_destructor(struct sk_buff *); +void rxrpc_new_skb(struct sk_buff *); +void rxrpc_see_skb(struct sk_buff *); +void rxrpc_get_skb(struct sk_buff *); +void rxrpc_free_skb(struct sk_buff *); +void rxrpc_purge_queue(struct sk_buff_head *); /* * sysctl.c @@ -894,44 +959,6 @@ do { \ #endif /* __KDEBUGALL */ -/* - * socket buffer accounting / leak finding - */ -static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn) -{ - //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs)); - //atomic_inc(&rxrpc_n_skbs); -} - -#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__) - -static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn) -{ - //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs)); - //atomic_dec(&rxrpc_n_skbs); -} - -#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__) - -static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn) -{ - if (skb) { - CHECK_SLAB_OKAY(&skb->users); - //_net("free skb %p %s [%d]", - // skb, fn, atomic_read(&rxrpc_n_skbs)); - //atomic_dec(&rxrpc_n_skbs); - kfree_skb(skb); - } -} - -#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__) - -static inline void rxrpc_purge_queue(struct sk_buff_head *list) -{ - struct sk_buff *skb; - while ((skb = skb_dequeue((list))) != NULL) - rxrpc_free_skb(skb); -} #define rxrpc_get_call(CALL) \ do { \ diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 9bae21e66d65..669ac79d3b44 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -203,6 +203,7 @@ void rxrpc_accept_incoming_calls(struct rxrpc_local *local) _net("incoming call skb %p", skb); + rxrpc_see_skb(skb); sp = rxrpc_skb(skb); /* Set up a response packet header in case we need it */ diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index e60cf65c2232..5292bcfd8816 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -25,7 +25,7 @@ * propose an ACK be sent */ void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, - u32 serial, bool immediate) + u16 skew, u32 serial, bool immediate) { unsigned long expiry; s8 prior = rxrpc_ack_priority[ack_reason]; @@ -44,8 +44,10 @@ void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial * numbers */ if (prior == rxrpc_ack_priority[call->ackr_reason]) { - if (prior <= 4) + if (prior <= 4) { + call->ackr_skew = skew; call->ackr_serial = serial; + } if (immediate) goto cancel_timer; return; @@ -103,13 +105,13 @@ cancel_timer: * propose an ACK be sent, locking the call structure */ void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, - u32 serial, bool immediate) + u16 skew, u32 serial, bool immediate) { s8 prior = rxrpc_ack_priority[ack_reason]; if (prior > rxrpc_ack_priority[call->ackr_reason]) { spin_lock_bh(&call->lock); - __rxrpc_propose_ACK(call, ack_reason, serial, immediate); + __rxrpc_propose_ACK(call, ack_reason, skew, serial, immediate); spin_unlock_bh(&call->lock); } } @@ -191,6 +193,8 @@ static void rxrpc_resend(struct rxrpc_call *call) stop = true; sp->resend_at = jiffies + 3; } else { + if (rxrpc_is_client_call(call)) + rxrpc_expose_client_call(call); sp->resend_at = jiffies + rxrpc_resend_timeout; } @@ -376,7 +380,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) call->acks_hard++; } - wake_up(&call->tx_waitq); + wake_up(&call->waitq); } /* @@ -407,6 +411,7 @@ static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call) skb = skb_dequeue(&call->rx_oos_queue); if (skb) { + rxrpc_see_skb(skb); sp = rxrpc_skb(skb); _debug("drain OOS packet %d [%d]", @@ -427,6 +432,7 @@ static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call) /* find out what the next packet is */ skb = skb_peek(&call->rx_oos_queue); + rxrpc_see_skb(skb); if (skb) call->rx_first_oos = rxrpc_skb(skb)->hdr.seq; else @@ -576,6 +582,7 @@ process_further: if (!skb) return -EAGAIN; + rxrpc_see_skb(skb); _net("deferred skb %p", skb); sp = rxrpc_skb(skb); @@ -625,7 +632,7 @@ process_further: if (ack.reason == RXRPC_ACK_PING) { _proto("Rx ACK %%%u PING Request", latest); rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, - sp->hdr.serial, true); + skb->priority, sp->hdr.serial, true); } /* discard any out-of-order or duplicate ACKs */ @@ -832,11 +839,6 @@ void rxrpc_process_call(struct work_struct *work) call->debug_id, rxrpc_call_states[call->state], call->events, (jiffies - call->creation_jif) / (HZ / 10)); - if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) { - _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX"); - return; - } - if (!call->conn) goto skip_msg_init; @@ -1155,8 +1157,7 @@ skip_msg_init: goto maybe_reschedule; send_ACK_with_skew: - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - - ntohl(ack.serial)); + ack.maxSkew = htons(call->ackr_skew); send_ACK: mtu = call->conn->params.peer->if_mtu; mtu -= call->conn->params.peer->hdrsize; @@ -1246,7 +1247,8 @@ send_message_2: case RXRPC_CALL_SERVER_ACK_REQUEST: _debug("start ACK timer"); rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, - call->ackr_serial, false); + call->ackr_skew, call->ackr_serial, + false); default: break; } @@ -1281,7 +1283,6 @@ maybe_reschedule: } error: - clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags); kfree(acks); /* because we don't want two CPUs both processing the work item for one diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index ae057e0740f3..e7cbcc4a87cf 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -127,10 +127,11 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) INIT_WORK(&call->destroyer, &rxrpc_destroy_call); INIT_WORK(&call->processor, &rxrpc_process_call); INIT_LIST_HEAD(&call->link); + INIT_LIST_HEAD(&call->chan_wait_link); INIT_LIST_HEAD(&call->accept_link); skb_queue_head_init(&call->rx_queue); skb_queue_head_init(&call->rx_oos_queue); - init_waitqueue_head(&call->tx_waitq); + init_waitqueue_head(&call->waitq); spin_lock_init(&call->lock); rwlock_init(&call->state_lock); atomic_set(&call->usage, 1); @@ -167,10 +168,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, sock_hold(&rx->sk); call->socket = rx; call->rx_data_post = 1; - - call->local = rx->local; call->service_id = srx->srx_service; - call->in_clientflag = 0; _leave(" = %p", call); return call; @@ -318,11 +316,12 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, chan = sp->hdr.cid & RXRPC_CHANNELMASK; candidate->socket = rx; candidate->conn = conn; + candidate->peer = conn->params.peer; candidate->cid = sp->hdr.cid; candidate->call_id = sp->hdr.callNumber; - candidate->channel = chan; candidate->rx_data_post = 0; candidate->state = RXRPC_CALL_SERVER_ACCEPTING; + candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE); if (conn->security_ix > 0) candidate->state = RXRPC_CALL_SERVER_SECURING; @@ -332,7 +331,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, call = rcu_dereference_protected(conn->channels[chan].call, lockdep_is_held(&conn->channel_lock)); - _debug("channel[%u] is %p", candidate->channel, call); + _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call); if (call && call->call_id == sp->hdr.callNumber) { /* already set; must've been a duplicate packet */ _debug("extant call [%d]", call->state); @@ -360,7 +359,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, call->debug_id, rxrpc_call_states[call->state]); if (call->state >= RXRPC_CALL_COMPLETE) { - __rxrpc_disconnect_call(call); + __rxrpc_disconnect_call(conn, call); } else { spin_unlock(&conn->channel_lock); kmem_cache_free(rxrpc_call_jar, candidate); @@ -387,6 +386,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, rcu_assign_pointer(conn->channels[chan].call, call); sock_hold(&rx->sk); rxrpc_get_connection(conn); + rxrpc_get_peer(call->peer); spin_unlock(&conn->channel_lock); spin_lock(&conn->params.peer->lock); @@ -397,10 +397,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, list_add_tail(&call->link, &rxrpc_calls); write_unlock_bh(&rxrpc_call_lock); - call->local = conn->params.local; - call->epoch = conn->proto.epoch; call->service_id = conn->params.service_id; - call->in_clientflag = RXRPC_CLIENT_INITIATED; _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); @@ -569,12 +566,6 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) read_lock_bh(&rx->call_lock); - /* mark all the calls as no longer wanting incoming packets */ - for (p = rb_first(&rx->calls); p; p = rb_next(p)) { - call = rb_entry(p, struct rxrpc_call, sock_node); - rxrpc_mark_call_released(call); - } - /* kill the not-yet-accepted incoming calls */ list_for_each_entry(call, &rx->secureq, accept_link) { rxrpc_mark_call_released(call); @@ -584,6 +575,12 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) rxrpc_mark_call_released(call); } + /* mark all the calls as no longer wanting incoming packets */ + for (p = rb_first(&rx->calls); p; p = rb_next(p)) { + call = rb_entry(p, struct rxrpc_call, sock_node); + rxrpc_mark_call_released(call); + } + read_unlock_bh(&rx->call_lock); _leave(""); } @@ -616,6 +613,7 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); rxrpc_purge_queue(&call->rx_queue); + rxrpc_put_peer(call->peer); kmem_cache_free(rxrpc_call_jar, call); } @@ -682,8 +680,8 @@ static void rxrpc_destroy_call(struct work_struct *work) struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer); - _enter("%p{%d,%d,%p}", - call, atomic_read(&call->usage), call->channel, call->conn); + _enter("%p{%d,%x,%p}", + call, atomic_read(&call->usage), call->cid, call->conn); ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 9e91f27b0d0f..349402b08e5a 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -7,6 +7,68 @@ * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. + * + * + * Client connections need to be cached for a little while after they've made a + * call so as to handle retransmitted DATA packets in case the server didn't + * receive the final ACK or terminating ABORT we sent it. + * + * Client connections can be in one of a number of cache states: + * + * (1) INACTIVE - The connection is not held in any list and may not have been + * exposed to the world. If it has been previously exposed, it was + * discarded from the idle list after expiring. + * + * (2) WAITING - The connection is waiting for the number of client conns to + * drop below the maximum capacity. Calls may be in progress upon it from + * when it was active and got culled. + * + * The connection is on the rxrpc_waiting_client_conns list which is kept + * in to-be-granted order. Culled conns with waiters go to the back of + * the queue just like new conns. + * + * (3) ACTIVE - The connection has at least one call in progress upon it, it + * may freely grant available channels to new calls and calls may be + * waiting on it for channels to become available. + * + * The connection is on the rxrpc_active_client_conns list which is kept + * in activation order for culling purposes. + * + * rxrpc_nr_active_client_conns is held incremented also. + * + * (4) CULLED - The connection got summarily culled to try and free up + * capacity. Calls currently in progress on the connection are allowed to + * continue, but new calls will have to wait. There can be no waiters in + * this state - the conn would have to go to the WAITING state instead. + * + * (5) IDLE - The connection has no calls in progress upon it and must have + * been exposed to the world (ie. the EXPOSED flag must be set). When it + * expires, the EXPOSED flag is cleared and the connection transitions to + * the INACTIVE state. + * + * The connection is on the rxrpc_idle_client_conns list which is kept in + * order of how soon they'll expire. + * + * There are flags of relevance to the cache: + * + * (1) EXPOSED - The connection ID got exposed to the world. If this flag is + * set, an extra ref is added to the connection preventing it from being + * reaped when it has no calls outstanding. This flag is cleared and the + * ref dropped when a conn is discarded from the idle list. + * + * This allows us to move terminal call state retransmission to the + * connection and to discard the call immediately we think it is done + * with. It also give us a chance to reuse the connection. + * + * (2) DONT_REUSE - The connection should be discarded as soon as possible and + * should not be reused. This is set when an exclusive connection is used + * or a call ID counter overflows. + * + * The caching state may only be changed if the cache lock is held. + * + * There are two idle client connection expiry durations. If the total number + * of connections is below the reap threshold, we use the normal duration; if + * it's above, we use the fast duration. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -16,22 +78,37 @@ #include <linux/timer.h> #include "ar-internal.h" +__read_mostly unsigned int rxrpc_max_client_connections = 1000; +__read_mostly unsigned int rxrpc_reap_client_connections = 900; +__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; +__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; + +static unsigned int rxrpc_nr_client_conns; +static unsigned int rxrpc_nr_active_client_conns; +static __read_mostly bool rxrpc_kill_all_client_conns; + +static DEFINE_SPINLOCK(rxrpc_client_conn_cache_lock); +static DEFINE_SPINLOCK(rxrpc_client_conn_discard_mutex); +static LIST_HEAD(rxrpc_waiting_client_conns); +static LIST_HEAD(rxrpc_active_client_conns); +static LIST_HEAD(rxrpc_idle_client_conns); + /* * We use machine-unique IDs for our client connections. */ DEFINE_IDR(rxrpc_client_conn_ids); static DEFINE_SPINLOCK(rxrpc_conn_id_lock); +static void rxrpc_cull_active_client_conns(void); +static void rxrpc_discard_expired_client_conns(struct work_struct *); + +static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap, + rxrpc_discard_expired_client_conns); + /* * Get a connection ID and epoch for a client connection from the global pool. * The connection struct pointer is then recorded in the idr radix tree. The * epoch is changed if this wraps. - * - * TODO: The IDR tree gets very expensive on memory if the connection IDs are - * widely scattered throughout the number space, so we shall need to retire - * connections that have, say, an ID more than four times the maximum number of - * client conns away from the current allocation point to try and keep the IDs - * concentrated. We will also need to retire connections from an old epoch. */ static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, gfp_t gfp) @@ -114,8 +191,7 @@ void rxrpc_destroy_client_conn_ids(void) } /* - * Allocate a client connection. The caller must take care to clear any - * padding bytes in *cp. + * Allocate a client connection. */ static struct rxrpc_connection * rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) @@ -131,6 +207,10 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) return ERR_PTR(-ENOMEM); } + atomic_set(&conn->usage, 1); + if (conn->params.exclusive) + __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); + conn->params = *cp; conn->out_clientflag = RXRPC_CLIENT_INITIATED; conn->state = RXRPC_CONN_CLIENT; @@ -148,7 +228,7 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) goto error_2; write_lock(&rxrpc_connection_lock); - list_add_tail(&conn->link, &rxrpc_connections); + list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list); write_unlock(&rxrpc_connection_lock); /* We steal the caller's peer ref. */ @@ -170,32 +250,68 @@ error_0: } /* - * find a connection for a call - * - called in process context with IRQs enabled + * Determine if a connection may be reused. */ -int rxrpc_connect_call(struct rxrpc_call *call, - struct rxrpc_conn_parameters *cp, - struct sockaddr_rxrpc *srx, - gfp_t gfp) +static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) +{ + int id_cursor, id, distance, limit; + + if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) + goto dont_reuse; + + if (conn->proto.epoch != rxrpc_epoch) + goto mark_dont_reuse; + + /* The IDR tree gets very expensive on memory if the connection IDs are + * widely scattered throughout the number space, so we shall want to + * kill off connections that, say, have an ID more than about four + * times the maximum number of client conns away from the current + * allocation point to try and keep the IDs concentrated. + */ + id_cursor = READ_ONCE(rxrpc_client_conn_ids.cur); + id = conn->proto.cid >> RXRPC_CIDSHIFT; + distance = id - id_cursor; + if (distance < 0) + distance = -distance; + limit = round_up(rxrpc_max_client_connections, IDR_SIZE) * 4; + if (distance > limit) + goto mark_dont_reuse; + + return true; + +mark_dont_reuse: + set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); +dont_reuse: + return false; +} + +/* + * Create or find a client connection to use for a call. + * + * If we return with a connection, the call will be on its waiting list. It's + * left to the caller to assign a channel and wake up the call. + */ +static int rxrpc_get_client_conn(struct rxrpc_call *call, + struct rxrpc_conn_parameters *cp, + struct sockaddr_rxrpc *srx, + gfp_t gfp) { struct rxrpc_connection *conn, *candidate = NULL; struct rxrpc_local *local = cp->local; struct rb_node *p, **pp, *parent; long diff; - int chan; - - DECLARE_WAITQUEUE(myself, current); + int ret = -ENOMEM; _enter("{%d,%lx},", call->debug_id, call->user_call_ID); cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); if (!cp->peer) - return -ENOMEM; + goto error; + /* If the connection is not meant to be exclusive, search the available + * connections to see if the connection we want to use already exists. + */ if (!cp->exclusive) { - /* Search for a existing client connection unless this is going - * to be a connection that's used exclusively for a single call. - */ _debug("search 1"); spin_lock(&local->client_conns_lock); p = local->client_conns.rb_node; @@ -206,39 +322,55 @@ int rxrpc_connect_call(struct rxrpc_call *call, diff = (cmp(peer) ?: cmp(key) ?: cmp(security_level)); - if (diff < 0) +#undef cmp + if (diff < 0) { p = p->rb_left; - else if (diff > 0) + } else if (diff > 0) { p = p->rb_right; - else - goto found_extant_conn; + } else { + if (rxrpc_may_reuse_conn(conn) && + rxrpc_get_connection_maybe(conn)) + goto found_extant_conn; + /* The connection needs replacing. It's better + * to effect that when we have something to + * replace it with so that we don't have to + * rebalance the tree twice. + */ + break; + } } spin_unlock(&local->client_conns_lock); } - /* We didn't find a connection or we want an exclusive one. */ - _debug("get new conn"); + /* There wasn't a connection yet or we need an exclusive connection. + * We need to create a candidate and then potentially redo the search + * in case we're racing with another thread also trying to connect on a + * shareable connection. + */ + _debug("new conn"); candidate = rxrpc_alloc_client_connection(cp, gfp); - if (!candidate) { - _leave(" = -ENOMEM"); - return -ENOMEM; + if (IS_ERR(candidate)) { + ret = PTR_ERR(candidate); + goto error_peer; } + /* Add the call to the new connection's waiting list in case we're + * going to have to wait for the connection to come live. It's our + * connection, so we want first dibs on the channel slots. We would + * normally have to take channel_lock but we do this before anyone else + * can see the connection. + */ + list_add_tail(&call->chan_wait_link, &candidate->waiting_calls); + if (cp->exclusive) { - /* Assign the call on an exclusive connection to channel 0 and - * don't add the connection to the endpoint's shareable conn - * lookup tree. - */ - _debug("exclusive chan 0"); - conn = candidate; - atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1); - spin_lock(&conn->channel_lock); - chan = 0; - goto found_channel; + call->conn = candidate; + _leave(" = 0 [exclusive %d]", candidate->debug_id); + return 0; } - /* We need to redo the search before attempting to add a new connection - * lest we race with someone else adding a conflicting instance. + /* Publish the new connection for userspace to find. We need to redo + * the search before doing this lest we race with someone else adding a + * conflicting instance. */ _debug("search 2"); spin_lock(&local->client_conns_lock); @@ -249,124 +381,672 @@ int rxrpc_connect_call(struct rxrpc_call *call, parent = *pp; conn = rb_entry(parent, struct rxrpc_connection, client_node); +#define cmp(X) ((long)conn->params.X - (long)candidate->params.X) diff = (cmp(peer) ?: cmp(key) ?: cmp(security_level)); - if (diff < 0) +#undef cmp + if (diff < 0) { pp = &(*pp)->rb_left; - else if (diff > 0) + } else if (diff > 0) { pp = &(*pp)->rb_right; - else - goto found_extant_conn; + } else { + if (rxrpc_may_reuse_conn(conn) && + rxrpc_get_connection_maybe(conn)) + goto found_extant_conn; + /* The old connection is from an outdated epoch. */ + _debug("replace conn"); + clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags); + rb_replace_node(&conn->client_node, + &candidate->client_node, + &local->client_conns); + goto candidate_published; + } } - /* The second search also failed; simply add the new connection with - * the new call in channel 0. Note that we need to take the channel - * lock before dropping the client conn lock. - */ _debug("new conn"); - set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags); rb_link_node(&candidate->client_node, parent, pp); rb_insert_color(&candidate->client_node, &local->client_conns); -attached: - conn = candidate; - candidate = NULL; - atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1); - spin_lock(&conn->channel_lock); +candidate_published: + set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags); + call->conn = candidate; spin_unlock(&local->client_conns_lock); - chan = 0; + _leave(" = 0 [new %d]", candidate->debug_id); + return 0; -found_channel: - _debug("found chan"); - call->conn = conn; - call->channel = chan; - call->epoch = conn->proto.epoch; - call->cid = conn->proto.cid | chan; - call->call_id = ++conn->channels[chan].call_counter; - conn->channels[chan].call_id = call->call_id; - rcu_assign_pointer(conn->channels[chan].call, call); + /* We come here if we found a suitable connection already in existence. + * Discard any candidate we may have allocated, and try to get a + * channel on this one. + */ +found_extant_conn: + _debug("found conn"); + spin_unlock(&local->client_conns_lock); - _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id); + rxrpc_put_connection(candidate); + candidate = NULL; + spin_lock(&conn->channel_lock); + call->conn = conn; + list_add(&call->chan_wait_link, &conn->waiting_calls); spin_unlock(&conn->channel_lock); + _leave(" = 0 [extant %d]", conn->debug_id); + return 0; + +error_peer: rxrpc_put_peer(cp->peer); cp->peer = NULL; - _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); - return 0; +error: + _leave(" = %d", ret); + return ret; +} - /* We found a potentially suitable connection already in existence. If - * we can reuse it (ie. its usage count hasn't been reduced to 0 by the - * reaper), discard any candidate we may have allocated, and try to get - * a channel on this one, otherwise we have to replace it. - */ -found_extant_conn: - _debug("found conn"); - if (!rxrpc_get_connection_maybe(conn)) { - set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags); - rb_replace_node(&conn->client_node, - &candidate->client_node, - &local->client_conns); - clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags); - goto attached; +/* + * Activate a connection. + */ +static void rxrpc_activate_conn(struct rxrpc_connection *conn) +{ + conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; + rxrpc_nr_active_client_conns++; + list_move_tail(&conn->cache_link, &rxrpc_active_client_conns); +} + +/* + * Attempt to animate a connection for a new call. + * + * If it's not exclusive, the connection is in the endpoint tree, and we're in + * the conn's list of those waiting to grab a channel. There is, however, a + * limit on the number of live connections allowed at any one time, so we may + * have to wait for capacity to become available. + * + * Note that a connection on the waiting queue might *also* have active + * channels if it has been culled to make space and then re-requested by a new + * call. + */ +static void rxrpc_animate_client_conn(struct rxrpc_connection *conn) +{ + unsigned int nr_conns; + + _enter("%d,%d", conn->debug_id, conn->cache_state); + + if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE) + goto out; + + spin_lock(&rxrpc_client_conn_cache_lock); + + nr_conns = rxrpc_nr_client_conns; + if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) + rxrpc_nr_client_conns = nr_conns + 1; + + switch (conn->cache_state) { + case RXRPC_CONN_CLIENT_ACTIVE: + case RXRPC_CONN_CLIENT_WAITING: + break; + + case RXRPC_CONN_CLIENT_INACTIVE: + case RXRPC_CONN_CLIENT_CULLED: + case RXRPC_CONN_CLIENT_IDLE: + if (nr_conns >= rxrpc_max_client_connections) + goto wait_for_capacity; + goto activate_conn; + + default: + BUG(); } - spin_unlock(&local->client_conns_lock); +out_unlock: + spin_unlock(&rxrpc_client_conn_cache_lock); +out: + _leave(" [%d]", conn->cache_state); + return; - rxrpc_put_connection(candidate); +activate_conn: + _debug("activate"); + rxrpc_activate_conn(conn); + goto out_unlock; + +wait_for_capacity: + _debug("wait"); + conn->cache_state = RXRPC_CONN_CLIENT_WAITING; + list_move_tail(&conn->cache_link, &rxrpc_waiting_client_conns); + goto out_unlock; +} + +/* + * Deactivate a channel. + */ +static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn, + unsigned int channel) +{ + struct rxrpc_channel *chan = &conn->channels[channel]; + + rcu_assign_pointer(chan->call, NULL); + conn->active_chans &= ~(1 << channel); +} + +/* + * Assign a channel to the call at the front of the queue and wake the call up. + * We don't increment the callNumber counter until this number has been exposed + * to the world. + */ +static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, + unsigned int channel) +{ + struct rxrpc_channel *chan = &conn->channels[channel]; + struct rxrpc_call *call = list_entry(conn->waiting_calls.next, + struct rxrpc_call, chan_wait_link); + u32 call_id = chan->call_counter + 1; + + list_del_init(&call->chan_wait_link); + conn->active_chans |= 1 << channel; + call->peer = rxrpc_get_peer(conn->params.peer); + call->cid = conn->proto.cid | channel; + call->call_id = call_id; + + _net("CONNECT call %08x:%08x as call %d on conn %d", + call->cid, call->call_id, call->debug_id, conn->debug_id); + + /* Paired with the read barrier in rxrpc_wait_for_channel(). This + * orders cid and epoch in the connection wrt to call_id without the + * need to take the channel_lock. + * + * We provisionally assign a callNumber at this point, but we don't + * confirm it until the call is about to be exposed. + * + * TODO: Pair with a barrier in the data_ready handler when that looks + * at the call ID through a connection channel. + */ + smp_wmb(); + chan->call_id = call_id; + rcu_assign_pointer(chan->call, call); + wake_up(&call->waitq); +} + +/* + * Assign channels and callNumbers to waiting calls. + */ +static void rxrpc_activate_channels(struct rxrpc_connection *conn) +{ + unsigned char mask; + + _enter("%d", conn->debug_id); + + if (conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE || + conn->active_chans == RXRPC_ACTIVE_CHANS_MASK) + return; + + spin_lock(&conn->channel_lock); + + while (!list_empty(&conn->waiting_calls) && + (mask = ~conn->active_chans, + mask &= RXRPC_ACTIVE_CHANS_MASK, + mask != 0)) + rxrpc_activate_one_channel(conn, __ffs(mask)); + + spin_unlock(&conn->channel_lock); + _leave(""); +} + +/* + * Wait for a callNumber and a channel to be granted to a call. + */ +static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) +{ + int ret = 0; + + _enter("%d", call->debug_id); + + if (!call->call_id) { + DECLARE_WAITQUEUE(myself, current); - if (!atomic_add_unless(&conn->avail_chans, -1, 0)) { if (!gfpflags_allow_blocking(gfp)) { - rxrpc_put_connection(conn); - _leave(" = -EAGAIN"); - return -EAGAIN; + ret = -EAGAIN; + goto out; } - add_wait_queue(&conn->channel_wq, &myself); + add_wait_queue_exclusive(&call->waitq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); - if (atomic_add_unless(&conn->avail_chans, -1, 0)) + if (call->call_id) break; - if (signal_pending(current)) - goto interrupted; + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } schedule(); } - remove_wait_queue(&conn->channel_wq, &myself); + remove_wait_queue(&call->waitq, &myself); __set_current_state(TASK_RUNNING); } - /* The connection allegedly now has a free channel and we can now - * attach the call to it. - */ + /* Paired with the write barrier in rxrpc_activate_one_channel(). */ + smp_rmb(); + +out: + _leave(" = %d", ret); + return ret; +} + +/* + * find a connection for a call + * - called in process context with IRQs enabled + */ +int rxrpc_connect_call(struct rxrpc_call *call, + struct rxrpc_conn_parameters *cp, + struct sockaddr_rxrpc *srx, + gfp_t gfp) +{ + int ret; + + _enter("{%d,%lx},", call->debug_id, call->user_call_ID); + + rxrpc_discard_expired_client_conns(NULL); + rxrpc_cull_active_client_conns(); + + ret = rxrpc_get_client_conn(call, cp, srx, gfp); + if (ret < 0) + return ret; + + rxrpc_animate_client_conn(call->conn); + rxrpc_activate_channels(call->conn); + + ret = rxrpc_wait_for_channel(call, gfp); + if (ret < 0) + rxrpc_disconnect_client_call(call); + + _leave(" = %d", ret); + return ret; +} + +/* + * Note that a connection is about to be exposed to the world. Once it is + * exposed, we maintain an extra ref on it that stops it from being summarily + * discarded before it's (a) had a chance to deal with retransmission and (b) + * had a chance at re-use (the per-connection security negotiation is + * expensive). + */ +static void rxrpc_expose_client_conn(struct rxrpc_connection *conn) +{ + if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) + rxrpc_get_connection(conn); +} + +/* + * Note that a call, and thus a connection, is about to be exposed to the + * world. + */ +void rxrpc_expose_client_call(struct rxrpc_call *call) +{ + struct rxrpc_connection *conn = call->conn; + struct rxrpc_channel *chan = + &conn->channels[call->cid & RXRPC_CHANNELMASK]; + + if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) { + /* Mark the call ID as being used. If the callNumber counter + * exceeds ~2 billion, we kill the connection after its + * outstanding calls have finished so that the counter doesn't + * wrap. + */ + chan->call_counter++; + if (chan->call_counter >= INT_MAX) + set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); + rxrpc_expose_client_conn(conn); + } +} + +/* + * Disconnect a client call. + */ +void rxrpc_disconnect_client_call(struct rxrpc_call *call) +{ + unsigned int channel = call->cid & RXRPC_CHANNELMASK; + struct rxrpc_connection *conn = call->conn; + struct rxrpc_channel *chan = &conn->channels[channel]; + + call->conn = NULL; + spin_lock(&conn->channel_lock); - for (chan = 0; chan < RXRPC_MAXCALLS; chan++) - if (!conn->channels[chan].call) - goto found_channel; - BUG(); + /* Calls that have never actually been assigned a channel can simply be + * discarded. If the conn didn't get used either, it will follow + * immediately unless someone else grabs it in the meantime. + */ + if (!list_empty(&call->chan_wait_link)) { + _debug("call is waiting"); + ASSERTCMP(call->call_id, ==, 0); + ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); + list_del_init(&call->chan_wait_link); + + /* We must deactivate or idle the connection if it's now + * waiting for nothing. + */ + spin_lock(&rxrpc_client_conn_cache_lock); + if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING && + list_empty(&conn->waiting_calls) && + !conn->active_chans) + goto idle_connection; + goto out; + } + + ASSERTCMP(rcu_access_pointer(chan->call), ==, call); + ASSERTCMP(atomic_read(&conn->usage), >=, 2); + + /* If a client call was exposed to the world, we save the result for + * retransmission. + * + * We use a barrier here so that the call number and abort code can be + * read without needing to take a lock. + * + * TODO: Make the incoming packet handler check this and handle + * terminal retransmission without requiring access to the call. + */ + if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { + _debug("exposed %u,%u", call->call_id, call->local_abort); + __rxrpc_disconnect_call(conn, call); + } + + /* See if we can pass the channel directly to another call. */ + if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE && + !list_empty(&conn->waiting_calls)) { + _debug("pass chan"); + rxrpc_activate_one_channel(conn, channel); + goto out_2; + } + + /* Things are more complex and we need the cache lock. We might be + * able to simply idle the conn or it might now be lurking on the wait + * list. It might even get moved back to the active list whilst we're + * waiting for the lock. + */ + spin_lock(&rxrpc_client_conn_cache_lock); + + switch (conn->cache_state) { + case RXRPC_CONN_CLIENT_ACTIVE: + if (list_empty(&conn->waiting_calls)) { + rxrpc_deactivate_one_channel(conn, channel); + if (!conn->active_chans) { + rxrpc_nr_active_client_conns--; + goto idle_connection; + } + goto out; + } + + _debug("pass chan 2"); + rxrpc_activate_one_channel(conn, channel); + goto out; + + case RXRPC_CONN_CLIENT_CULLED: + rxrpc_deactivate_one_channel(conn, channel); + ASSERT(list_empty(&conn->waiting_calls)); + if (!conn->active_chans) + goto idle_connection; + goto out; + + case RXRPC_CONN_CLIENT_WAITING: + rxrpc_deactivate_one_channel(conn, channel); + goto out; + + default: + BUG(); + } -interrupted: - remove_wait_queue(&conn->channel_wq, &myself); - __set_current_state(TASK_RUNNING); +out: + spin_unlock(&rxrpc_client_conn_cache_lock); +out_2: + spin_unlock(&conn->channel_lock); rxrpc_put_connection(conn); - rxrpc_put_peer(cp->peer); - cp->peer = NULL; - _leave(" = -ERESTARTSYS"); - return -ERESTARTSYS; + _leave(""); + return; + +idle_connection: + /* As no channels remain active, the connection gets deactivated + * immediately or moved to the idle list for a short while. + */ + if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { + _debug("make idle"); + conn->idle_timestamp = jiffies; + conn->cache_state = RXRPC_CONN_CLIENT_IDLE; + list_move_tail(&conn->cache_link, &rxrpc_idle_client_conns); + if (rxrpc_idle_client_conns.next == &conn->cache_link && + !rxrpc_kill_all_client_conns) + queue_delayed_work(rxrpc_workqueue, + &rxrpc_client_conn_reap, + rxrpc_conn_idle_client_expiry); + } else { + _debug("make inactive"); + conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; + list_del_init(&conn->cache_link); + } + goto out; } /* - * Remove a client connection from the local endpoint's tree, thereby removing - * it as a target for reuse for new client calls. + * Clean up a dead client connection. */ -void rxrpc_unpublish_client_conn(struct rxrpc_connection *conn) +static struct rxrpc_connection * +rxrpc_put_one_client_conn(struct rxrpc_connection *conn) { + struct rxrpc_connection *next; struct rxrpc_local *local = conn->params.local; + unsigned int nr_conns; - spin_lock(&local->client_conns_lock); - if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) - rb_erase(&conn->client_node, &local->client_conns); - spin_unlock(&local->client_conns_lock); + if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) { + spin_lock(&local->client_conns_lock); + if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, + &conn->flags)) + rb_erase(&conn->client_node, &local->client_conns); + spin_unlock(&local->client_conns_lock); + } rxrpc_put_client_connection_id(conn); + + ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE); + + if (!test_bit(RXRPC_CONN_COUNTED, &conn->flags)) + return NULL; + + spin_lock(&rxrpc_client_conn_cache_lock); + nr_conns = --rxrpc_nr_client_conns; + + next = NULL; + if (nr_conns < rxrpc_max_client_connections && + !list_empty(&rxrpc_waiting_client_conns)) { + next = list_entry(rxrpc_waiting_client_conns.next, + struct rxrpc_connection, cache_link); + rxrpc_get_connection(next); + rxrpc_activate_conn(next); + } + + spin_unlock(&rxrpc_client_conn_cache_lock); + rxrpc_kill_connection(conn); + + if (next) + rxrpc_activate_channels(next); + + /* We need to get rid of the temporary ref we took upon next, but we + * can't call rxrpc_put_connection() recursively. + */ + return next; +} + +/* + * Clean up a dead client connections. + */ +void rxrpc_put_client_conn(struct rxrpc_connection *conn) +{ + struct rxrpc_connection *next; + + do { + _enter("%p{u=%d,d=%d}", + conn, atomic_read(&conn->usage), conn->debug_id); + + next = rxrpc_put_one_client_conn(conn); + + if (!next) + break; + conn = next; + } while (atomic_dec_and_test(&conn->usage)); + + _leave(""); +} + +/* + * Kill the longest-active client connections to make room for new ones. + */ +static void rxrpc_cull_active_client_conns(void) +{ + struct rxrpc_connection *conn; + unsigned int nr_conns = rxrpc_nr_client_conns; + unsigned int nr_active, limit; + + _enter(""); + + ASSERTCMP(nr_conns, >=, 0); + if (nr_conns < rxrpc_max_client_connections) { + _leave(" [ok]"); + return; + } + limit = rxrpc_reap_client_connections; + + spin_lock(&rxrpc_client_conn_cache_lock); + nr_active = rxrpc_nr_active_client_conns; + + while (nr_active > limit) { + ASSERT(!list_empty(&rxrpc_active_client_conns)); + conn = list_entry(rxrpc_active_client_conns.next, + struct rxrpc_connection, cache_link); + ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE); + + if (list_empty(&conn->waiting_calls)) { + conn->cache_state = RXRPC_CONN_CLIENT_CULLED; + list_del_init(&conn->cache_link); + } else { + conn->cache_state = RXRPC_CONN_CLIENT_WAITING; + list_move_tail(&conn->cache_link, + &rxrpc_waiting_client_conns); + } + + nr_active--; + } + + rxrpc_nr_active_client_conns = nr_active; + spin_unlock(&rxrpc_client_conn_cache_lock); + ASSERTCMP(nr_active, >=, 0); + _leave(" [culled]"); +} + +/* + * Discard expired client connections from the idle list. Each conn in the + * idle list has been exposed and holds an extra ref because of that. + * + * This may be called from conn setup or from a work item so cannot be + * considered non-reentrant. + */ +static void rxrpc_discard_expired_client_conns(struct work_struct *work) +{ + struct rxrpc_connection *conn; + unsigned long expiry, conn_expires_at, now; + unsigned int nr_conns; + bool did_discard = false; + + _enter("%c", work ? 'w' : 'n'); + + if (list_empty(&rxrpc_idle_client_conns)) { + _leave(" [empty]"); + return; + } + + /* Don't double up on the discarding */ + if (!spin_trylock(&rxrpc_client_conn_discard_mutex)) { + _leave(" [already]"); + return; + } + + /* We keep an estimate of what the number of conns ought to be after + * we've discarded some so that we don't overdo the discarding. + */ + nr_conns = rxrpc_nr_client_conns; + +next: + spin_lock(&rxrpc_client_conn_cache_lock); + + if (list_empty(&rxrpc_idle_client_conns)) + goto out; + + conn = list_entry(rxrpc_idle_client_conns.next, + struct rxrpc_connection, cache_link); + ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags)); + + if (!rxrpc_kill_all_client_conns) { + /* If the number of connections is over the reap limit, we + * expedite discard by reducing the expiry timeout. We must, + * however, have at least a short grace period to be able to do + * final-ACK or ABORT retransmission. + */ + expiry = rxrpc_conn_idle_client_expiry; + if (nr_conns > rxrpc_reap_client_connections) + expiry = rxrpc_conn_idle_client_fast_expiry; + + conn_expires_at = conn->idle_timestamp + expiry; + + now = READ_ONCE(jiffies); + if (time_after(conn_expires_at, now)) + goto not_yet_expired; + } + + _debug("discard conn %d", conn->debug_id); + if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) + BUG(); + conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; + list_del_init(&conn->cache_link); + + spin_unlock(&rxrpc_client_conn_cache_lock); + + /* When we cleared the EXPOSED flag, we took on responsibility for the + * reference that that had on the usage count. We deal with that here. + * If someone re-sets the flag and re-gets the ref, that's fine. + */ + rxrpc_put_connection(conn); + did_discard = true; + nr_conns--; + goto next; + +not_yet_expired: + /* The connection at the front of the queue hasn't yet expired, so + * schedule the work item for that point if we discarded something. + * + * We don't worry if the work item is already scheduled - it can look + * after rescheduling itself at a later time. We could cancel it, but + * then things get messier. + */ + _debug("not yet"); + if (!rxrpc_kill_all_client_conns) + queue_delayed_work(rxrpc_workqueue, + &rxrpc_client_conn_reap, + conn_expires_at - now); + +out: + spin_unlock(&rxrpc_client_conn_cache_lock); + spin_unlock(&rxrpc_client_conn_discard_mutex); + _leave(""); +} + +/* + * Preemptively destroy all the client connection records rather than waiting + * for them to time out + */ +void __exit rxrpc_destroy_all_client_connections(void) +{ + _enter(""); + + spin_lock(&rxrpc_client_conn_cache_lock); + rxrpc_kill_all_client_conns = true; + spin_unlock(&rxrpc_client_conn_cache_lock); + + cancel_delayed_work(&rxrpc_client_conn_reap); + + if (!queue_delayed_work(rxrpc_workqueue, &rxrpc_client_conn_reap, 0)) + _debug("destroy: queue failed"); + + _leave(""); } diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index cee0f35bc1cf..6296374df840 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -25,6 +25,114 @@ #include "ar-internal.h" /* + * Retransmit terminal ACK or ABORT of the previous call. + */ +static void rxrpc_conn_retransmit(struct rxrpc_connection *conn, + struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + struct rxrpc_channel *chan; + struct msghdr msg; + struct kvec iov; + struct { + struct rxrpc_wire_header whdr; + union { + struct { + __be32 code; + } abort; + struct { + struct rxrpc_ackpacket ack; + u8 padding[3]; + struct rxrpc_ackinfo info; + }; + }; + } __attribute__((packed)) pkt; + size_t len; + u32 serial, mtu, call_id; + + _enter("%d", conn->debug_id); + + chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK]; + + /* If the last call got moved on whilst we were waiting to run, just + * ignore this packet. + */ + call_id = READ_ONCE(chan->last_call); + /* Sync with __rxrpc_disconnect_call() */ + smp_rmb(); + if (call_id != sp->hdr.callNumber) + return; + + msg.msg_name = &conn->params.peer->srx.transport; + msg.msg_namelen = conn->params.peer->srx.transport_len; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + pkt.whdr.epoch = htonl(sp->hdr.epoch); + pkt.whdr.cid = htonl(sp->hdr.cid); + pkt.whdr.callNumber = htonl(sp->hdr.callNumber); + pkt.whdr.seq = 0; + pkt.whdr.type = chan->last_type; + pkt.whdr.flags = conn->out_clientflag; + pkt.whdr.userStatus = 0; + pkt.whdr.securityIndex = conn->security_ix; + pkt.whdr._rsvd = 0; + pkt.whdr.serviceId = htons(chan->last_service_id); + + len = sizeof(pkt.whdr); + switch (chan->last_type) { + case RXRPC_PACKET_TYPE_ABORT: + pkt.abort.code = htonl(chan->last_abort); + len += sizeof(pkt.abort); + break; + + case RXRPC_PACKET_TYPE_ACK: + mtu = conn->params.peer->if_mtu; + mtu -= conn->params.peer->hdrsize; + pkt.ack.bufferSpace = 0; + pkt.ack.maxSkew = htons(skb->priority); + pkt.ack.firstPacket = htonl(chan->last_seq); + pkt.ack.previousPacket = htonl(chan->last_seq - 1); + pkt.ack.serial = htonl(sp->hdr.serial); + pkt.ack.reason = RXRPC_ACK_DUPLICATE; + pkt.ack.nAcks = 0; + pkt.info.rxMTU = htonl(rxrpc_rx_mtu); + pkt.info.maxMTU = htonl(mtu); + pkt.info.rwind = htonl(rxrpc_rx_window_size); + pkt.info.jumbo_max = htonl(rxrpc_rx_jumbo_max); + len += sizeof(pkt.ack) + sizeof(pkt.info); + break; + } + + /* Resync with __rxrpc_disconnect_call() and check that the last call + * didn't get advanced whilst we were filling out the packets. + */ + smp_rmb(); + if (READ_ONCE(chan->last_call) != call_id) + return; + + iov.iov_base = &pkt; + iov.iov_len = len; + + serial = atomic_inc_return(&conn->serial); + pkt.whdr.serial = htonl(serial); + + switch (chan->last_type) { + case RXRPC_PACKET_TYPE_ABORT: + _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort); + break; + case RXRPC_PACKET_TYPE_ACK: + _proto("Tx ACK %%%u [re]", serial); + break; + } + + kernel_sendmsg(conn->params.local->socket, &msg, &iov, 1, len); + _leave(""); + return; +} + +/* * pass a connection-level abort onto all calls on that connection */ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, @@ -166,6 +274,12 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial); switch (sp->hdr.type) { + case RXRPC_PACKET_TYPE_DATA: + case RXRPC_PACKET_TYPE_ACK: + rxrpc_conn_retransmit(conn, skb); + rxrpc_free_skb(skb); + return 0; + case RXRPC_PACKET_TYPE_ABORT: if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0) return -EPROTO; @@ -277,6 +391,7 @@ void rxrpc_process_connection(struct work_struct *work) /* go through the conn-level event packets, releasing the ref on this * connection that each one has when we've finished with it */ while ((skb = skb_dequeue(&conn->rx_queue))) { + rxrpc_see_skb(skb); ret = rxrpc_process_event(conn, skb, &abort_code); switch (ret) { case -EPROTO: @@ -365,6 +480,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) whdr.type = RXRPC_PACKET_TYPE_ABORT; while ((skb = skb_dequeue(&local->reject_queue))) { + rxrpc_see_skb(skb); sp = rxrpc_skb(skb); switch (sa.sa.sa_family) { case AF_INET: diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 896d84493a05..5b45b6c367e7 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -1,6 +1,6 @@ -/* RxRPC virtual connection handler +/* RxRPC virtual connection handler, common bits. * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -15,8 +15,6 @@ #include <linux/slab.h> #include <linux/net.h> #include <linux/skbuff.h> -#include <net/sock.h> -#include <net/af_rxrpc.h> #include "ar-internal.h" /* @@ -27,9 +25,12 @@ unsigned int rxrpc_connection_expiry = 10 * 60; static void rxrpc_connection_reaper(struct work_struct *work); LIST_HEAD(rxrpc_connections); +LIST_HEAD(rxrpc_connection_proc_list); DEFINE_RWLOCK(rxrpc_connection_lock); static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); +static void rxrpc_destroy_connection(struct rcu_head *); + /* * allocate a new connection */ @@ -41,21 +42,19 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) conn = kzalloc(sizeof(struct rxrpc_connection), gfp); if (conn) { + INIT_LIST_HEAD(&conn->cache_link); spin_lock_init(&conn->channel_lock); - init_waitqueue_head(&conn->channel_wq); + INIT_LIST_HEAD(&conn->waiting_calls); INIT_WORK(&conn->processor, &rxrpc_process_connection); + INIT_LIST_HEAD(&conn->proc_link); INIT_LIST_HEAD(&conn->link); skb_queue_head_init(&conn->rx_queue); conn->security = &rxrpc_no_security; spin_lock_init(&conn->state_lock); - /* We maintain an extra ref on the connection whilst it is - * on the rxrpc_connections list. - */ - atomic_set(&conn->usage, 2); conn->debug_id = atomic_inc_return(&rxrpc_debug_id); - atomic_set(&conn->avail_chans, RXRPC_MAXCALLS); conn->size_align = 4; conn->header_size = sizeof(struct rxrpc_wire_header); + conn->idle_timestamp = jiffies; } _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0); @@ -153,25 +152,32 @@ not_found: * terminates. The caller must hold the channel_lock and must release the * call's ref on the connection. */ -void __rxrpc_disconnect_call(struct rxrpc_call *call) +void __rxrpc_disconnect_call(struct rxrpc_connection *conn, + struct rxrpc_call *call) { - struct rxrpc_connection *conn = call->conn; - struct rxrpc_channel *chan = &conn->channels[call->channel]; + struct rxrpc_channel *chan = + &conn->channels[call->cid & RXRPC_CHANNELMASK]; - _enter("%d,%d", conn->debug_id, call->channel); + _enter("%d,%x", conn->debug_id, call->cid); if (rcu_access_pointer(chan->call) == call) { /* Save the result of the call so that we can repeat it if necessary * through the channel, whilst disposing of the actual call record. */ - chan->last_result = call->local_abort; + chan->last_service_id = call->service_id; + if (call->local_abort) { + chan->last_abort = call->local_abort; + chan->last_type = RXRPC_PACKET_TYPE_ABORT; + } else { + chan->last_seq = call->rx_data_eaten; + chan->last_type = RXRPC_PACKET_TYPE_ACK; + } + /* Sync with rxrpc_conn_retransmit(). */ smp_wmb(); chan->last_call = chan->call_id; chan->call_id = chan->call_counter; rcu_assign_pointer(chan->call, NULL); - atomic_inc(&conn->avail_chans); - wake_up(&conn->channel_wq); } _leave(""); @@ -185,34 +191,52 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) { struct rxrpc_connection *conn = call->conn; + if (rxrpc_is_client_call(call)) + return rxrpc_disconnect_client_call(call); + spin_lock(&conn->channel_lock); - __rxrpc_disconnect_call(call); + __rxrpc_disconnect_call(conn, call); spin_unlock(&conn->channel_lock); call->conn = NULL; + conn->idle_timestamp = jiffies; rxrpc_put_connection(conn); } /* - * release a virtual connection + * Kill off a connection. */ -void rxrpc_put_connection(struct rxrpc_connection *conn) +void rxrpc_kill_connection(struct rxrpc_connection *conn) { - if (!conn) - return; + ASSERT(!rcu_access_pointer(conn->channels[0].call) && + !rcu_access_pointer(conn->channels[1].call) && + !rcu_access_pointer(conn->channels[2].call) && + !rcu_access_pointer(conn->channels[3].call)); + ASSERT(list_empty(&conn->cache_link)); - _enter("%p{u=%d,d=%d}", - conn, atomic_read(&conn->usage), conn->debug_id); + write_lock(&rxrpc_connection_lock); + list_del_init(&conn->proc_link); + write_unlock(&rxrpc_connection_lock); - ASSERTCMP(atomic_read(&conn->usage), >, 1); + /* Drain the Rx queue. Note that even though we've unpublished, an + * incoming packet could still be being added to our Rx queue, so we + * will need to drain it again in the RCU cleanup handler. + */ + rxrpc_purge_queue(&conn->rx_queue); - conn->put_time = ktime_get_seconds(); - if (atomic_dec_return(&conn->usage) == 1) { - _debug("zombie"); - rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); - } + /* Leave final destruction to RCU. The connection processor work item + * must carry a ref on the connection to prevent us getting here whilst + * it is queued or running. + */ + call_rcu(&conn->rcu, rxrpc_destroy_connection); +} - _leave(""); +/* + * release a virtual connection + */ +void __rxrpc_put_connection(struct rxrpc_connection *conn) +{ + rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); } /* @@ -242,19 +266,19 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu) } /* - * reap dead connections + * reap dead service connections */ static void rxrpc_connection_reaper(struct work_struct *work) { struct rxrpc_connection *conn, *_p; - unsigned long reap_older_than, earliest, put_time, now; + unsigned long reap_older_than, earliest, idle_timestamp, now; LIST_HEAD(graveyard); _enter(""); - now = ktime_get_seconds(); - reap_older_than = now - rxrpc_connection_expiry; + now = jiffies; + reap_older_than = now - rxrpc_connection_expiry * HZ; earliest = ULONG_MAX; write_lock(&rxrpc_connection_lock); @@ -263,10 +287,14 @@ static void rxrpc_connection_reaper(struct work_struct *work) if (likely(atomic_read(&conn->usage) > 1)) continue; - put_time = READ_ONCE(conn->put_time); - if (time_after(put_time, reap_older_than)) { - if (time_before(put_time, earliest)) - earliest = put_time; + idle_timestamp = READ_ONCE(conn->idle_timestamp); + _debug("reap CONN %d { u=%d,t=%ld }", + conn->debug_id, atomic_read(&conn->usage), + (long)reap_older_than - (long)idle_timestamp); + + if (time_after(idle_timestamp, reap_older_than)) { + if (time_before(idle_timestamp, earliest)) + earliest = idle_timestamp; continue; } @@ -277,7 +305,7 @@ static void rxrpc_connection_reaper(struct work_struct *work) continue; if (rxrpc_conn_is_client(conn)) - rxrpc_unpublish_client_conn(conn); + BUG(); else rxrpc_unpublish_service_conn(conn); @@ -287,9 +315,9 @@ static void rxrpc_connection_reaper(struct work_struct *work) if (earliest != ULONG_MAX) { _debug("reschedule reaper %ld", (long) earliest - now); - ASSERTCMP(earliest, >, now); + ASSERT(time_after(earliest, now)); rxrpc_queue_delayed_work(&rxrpc_connection_reap, - (earliest - now) * HZ); + earliest - now); } while (!list_empty(&graveyard)) { @@ -298,16 +326,15 @@ static void rxrpc_connection_reaper(struct work_struct *work) list_del_init(&conn->link); ASSERTCMP(atomic_read(&conn->usage), ==, 0); - skb_queue_purge(&conn->rx_queue); - call_rcu(&conn->rcu, rxrpc_destroy_connection); + rxrpc_kill_connection(conn); } _leave(""); } /* - * preemptively destroy all the connection records rather than waiting for them - * to time out + * preemptively destroy all the service connection records rather than + * waiting for them to time out */ void __exit rxrpc_destroy_all_connections(void) { @@ -316,6 +343,8 @@ void __exit rxrpc_destroy_all_connections(void) _enter(""); + rxrpc_destroy_all_client_connections(); + rxrpc_connection_expiry = 0; cancel_delayed_work(&rxrpc_connection_reap); rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); @@ -330,6 +359,8 @@ void __exit rxrpc_destroy_all_connections(void) write_unlock(&rxrpc_connection_lock); BUG_ON(leak); + ASSERT(list_empty(&rxrpc_connection_proc_list)); + /* Make sure the local and peer records pinned by any dying connections * are released. */ diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c index fd9027ccba8f..316a92107fee 100644 --- a/net/rxrpc/conn_service.c +++ b/net/rxrpc/conn_service.c @@ -185,8 +185,14 @@ struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local, rxrpc_get_local(local); + /* We maintain an extra ref on the connection whilst it is on + * the rxrpc_connections list. + */ + atomic_set(&conn->usage, 2); + write_lock(&rxrpc_connection_lock); list_add_tail(&conn->link, &rxrpc_connections); + list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list); write_unlock(&rxrpc_connection_lock); /* Make the connection a target for incoming packets. */ diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 70bb77818dea..5e683dd21ab9 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -125,6 +125,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, bool terminal; int ret, ackbit, ack; u32 serial; + u16 skew; u8 flags; _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); @@ -133,6 +134,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, ASSERTCMP(sp->call, ==, NULL); flags = sp->hdr.flags; serial = sp->hdr.serial; + skew = skb->priority; spin_lock(&call->lock); @@ -231,7 +233,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, spin_unlock(&call->lock); atomic_inc(&call->ackr_not_idle); - rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false); + rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, false); _leave(" = 0 [posted]"); return 0; @@ -244,7 +246,7 @@ out: discard_and_ack: _debug("discard and ACK packet %p", skb); - __rxrpc_propose_ACK(call, ack, serial, true); + __rxrpc_propose_ACK(call, ack, skew, serial, true); discard: spin_unlock(&call->lock); rxrpc_free_skb(skb); @@ -252,7 +254,7 @@ discard: return 0; enqueue_and_ack: - __rxrpc_propose_ACK(call, ack, serial, true); + __rxrpc_propose_ACK(call, ack, skew, serial, true); enqueue_packet: _net("defer skb %p", skb); spin_unlock(&call->lock); @@ -304,7 +306,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); __be32 wtmp; - u32 hi_serial, abort_code; + u32 abort_code; _enter("%p,%p", call, skb); @@ -321,18 +323,12 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) } #endif - /* track the latest serial number on this connection for ACK packet - * information */ - hi_serial = atomic_read(&call->conn->hi_serial); - while (sp->hdr.serial > hi_serial) - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, - sp->hdr.serial); - /* request ACK generation for any ACK or DATA packet that requests * it */ if (sp->hdr.flags & RXRPC_REQUEST_ACK) { _proto("ACK Requested on %%%u", sp->hdr.serial); - rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, false); + rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, + skb->priority, sp->hdr.serial, false); } switch (sp->hdr.type) { @@ -570,7 +566,8 @@ done: /* * post connection-level events to the connection - * - this includes challenges, responses and some aborts + * - this includes challenges, responses, some aborts and call terminal packet + * retransmission. */ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, struct sk_buff *skb) @@ -637,7 +634,7 @@ void rxrpc_data_ready(struct sock *sk) struct rxrpc_skb_priv *sp; struct rxrpc_local *local = sk->sk_user_data; struct sk_buff *skb; - int ret; + int ret, skew; _enter("%p", sk); @@ -700,25 +697,64 @@ void rxrpc_data_ready(struct sock *sk) rcu_read_lock(); conn = rxrpc_find_connection_rcu(local, skb); - if (!conn) + if (!conn) { + skb->priority = 0; goto cant_route_call; + } + + /* Note the serial number skew here */ + skew = (int)sp->hdr.serial - (int)conn->hi_serial; + if (skew >= 0) { + if (skew > 0) + conn->hi_serial = sp->hdr.serial; + skb->priority = 0; + } else { + skew = -skew; + skb->priority = min(skew, 65535); + } if (sp->hdr.callNumber == 0) { /* Connection-level packet */ _debug("CONN %p {%d}", conn, conn->debug_id); rxrpc_post_packet_to_conn(conn, skb); + goto out_unlock; } else { /* Call-bound packets are routed by connection channel. */ unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK; struct rxrpc_channel *chan = &conn->channels[channel]; - struct rxrpc_call *call = rcu_dereference(chan->call); + struct rxrpc_call *call; + + /* Ignore really old calls */ + if (sp->hdr.callNumber < chan->last_call) + goto discard_unlock; + + if (sp->hdr.callNumber == chan->last_call) { + /* For the previous service call, if completed + * successfully, we discard all further packets. + */ + if (rxrpc_conn_is_service(conn) && + (chan->last_type == RXRPC_PACKET_TYPE_ACK || + sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)) + goto discard_unlock; + + /* But otherwise we need to retransmit the final packet + * from data cached in the connection record. + */ + rxrpc_post_packet_to_conn(conn, skb); + goto out_unlock; + } + call = rcu_dereference(chan->call); if (!call || atomic_read(&call->usage) == 0) goto cant_route_call; rxrpc_post_packet_to_call(call, skb); + goto out_unlock; } +discard_unlock: + rxrpc_free_skb(skb); +out_unlock: rcu_read_unlock(); out: return; diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index 31a3f86ef2f6..bcc6593b4cdb 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -93,6 +93,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) if (skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + rxrpc_see_skb(skb); _debug("{%d},{%u}", local->debug_id, sp->hdr.type); switch (sp->hdr.type) { diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index f4bda06b7d2d..8a9917cba6fe 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -218,11 +218,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = 0; } else if (cmd != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; - } else if (!call->in_clientflag && + } else if (rxrpc_is_client_call(call) && call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { /* request phase complete for this client call */ ret = -EPROTO; - } else if (call->in_clientflag && + } else if (rxrpc_is_service_call(call) && call->state != RXRPC_CALL_SERVER_ACK_REQUEST && call->state != RXRPC_CALL_SERVER_SEND_REPLY) { /* Reply phase not begun or not complete for service call. */ @@ -390,7 +390,7 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, call->acks_winsz), *timeo); - add_wait_queue(&call->tx_waitq, &myself); + add_wait_queue(&call->waitq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); @@ -408,7 +408,7 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, lock_sock(&rx->sk); } - remove_wait_queue(&call->tx_waitq, &myself); + remove_wait_queue(&call->waitq, &myself); set_current_state(TASK_RUNNING); _leave(" = %d", ret); return ret; @@ -482,6 +482,8 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, if (try_to_del_timer_sync(&call->ack_timer) >= 0) { /* the packet may be freed by rxrpc_process_call() before this * returns */ + if (rxrpc_is_client_call(call)) + rxrpc_expose_client_call(call); ret = rxrpc_send_data_packet(call->conn, skb); _net("sent skb %p", skb); } else { @@ -548,6 +550,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, skb = call->tx_pending; call->tx_pending = NULL; + rxrpc_see_skb(skb); copied = 0; do { diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index ced5f07444e5..060fb4892c39 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -46,7 +46,9 @@ static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) static int rxrpc_call_seq_show(struct seq_file *seq, void *v) { - struct rxrpc_connection *conn; + struct rxrpc_local *local; + struct rxrpc_sock *rx; + struct rxrpc_peer *peer; struct rxrpc_call *call; char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; @@ -60,15 +62,24 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) call = list_entry(v, struct rxrpc_call, link); - sprintf(lbuff, "%pI4:%u", - &call->local->srx.transport.sin.sin_addr, - ntohs(call->local->srx.transport.sin.sin_port)); + rx = READ_ONCE(call->socket); + if (rx) { + local = READ_ONCE(rx->local); + if (local) + sprintf(lbuff, "%pI4:%u", + &local->srx.transport.sin.sin_addr, + ntohs(local->srx.transport.sin.sin_port)); + else + strcpy(lbuff, "no_local"); + } else { + strcpy(lbuff, "no_socket"); + } - conn = call->conn; - if (conn) + peer = call->peer; + if (peer) sprintf(rbuff, "%pI4:%u", - &conn->params.peer->srx.transport.sin.sin_addr, - ntohs(conn->params.peer->srx.transport.sin.sin_port)); + &peer->srx.transport.sin.sin_addr, + ntohs(peer->srx.transport.sin.sin_port)); else strcpy(rbuff, "no_connection"); @@ -80,7 +91,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) call->service_id, call->cid, call->call_id, - call->in_clientflag ? "Svc" : "Clt", + rxrpc_is_service_call(call) ? "Svc" : "Clt", atomic_read(&call->usage), rxrpc_call_states[call->state], call->remote_abort ?: call->local_abort, @@ -115,13 +126,13 @@ const struct file_operations rxrpc_call_seq_fops = { static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) { read_lock(&rxrpc_connection_lock); - return seq_list_start_head(&rxrpc_connections, *_pos); + return seq_list_start_head(&rxrpc_connection_proc_list, *_pos); } static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - return seq_list_next(v, &rxrpc_connections, pos); + return seq_list_next(v, &rxrpc_connection_proc_list, pos); } static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) @@ -134,7 +145,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) struct rxrpc_connection *conn; char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; - if (v == &rxrpc_connections) { + if (v == &rxrpc_connection_proc_list) { seq_puts(seq, "Proto Local Remote " " SvID ConnID End Use State Key " @@ -143,7 +154,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) return 0; } - conn = list_entry(v, struct rxrpc_connection, link); + conn = list_entry(v, struct rxrpc_connection, proc_link); sprintf(lbuff, "%pI4:%u", &conn->params.local->srx.transport.sin.sin_addr, @@ -165,7 +176,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) rxrpc_conn_states[conn->state], key_serial(conn->params.key), atomic_read(&conn->serial), - atomic_read(&conn->hi_serial)); + conn->hi_serial); return 0; } diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 9ed66d533002..b964c2d49a88 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -111,6 +111,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, } peek_next_packet: + rxrpc_see_skb(skb); sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 63afa9e9cc08..89f475febfd7 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -275,7 +275,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call, memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); /* calculate the security checksum */ - x = call->channel << (32 - RXRPC_CIDSHIFT); + x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT); x |= sp->hdr.seq & 0x3fffffff; call->crypto_buf[0] = htonl(sp->hdr.callNumber); call->crypto_buf[1] = htonl(x); @@ -507,7 +507,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); /* validate the security checksum */ - x = call->channel << (32 - RXRPC_CIDSHIFT); + x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT); x |= sp->hdr.seq & 0x3fffffff; call->crypto_buf[0] = htonl(call->call_id); call->crypto_buf[1] = htonl(x); diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 06c51d4b622d..fbd8c74d9505 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -53,9 +53,9 @@ static void rxrpc_request_final_ACK(struct rxrpc_call *call) /* * drop the bottom ACK off of the call ACK window and advance the window */ -static void rxrpc_hard_ACK_data(struct rxrpc_call *call, - struct rxrpc_skb_priv *sp) +static void rxrpc_hard_ACK_data(struct rxrpc_call *call, struct sk_buff *skb) { + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); int loop; u32 seq; @@ -91,8 +91,8 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call, * its Tx bufferage. */ _debug("send Rx idle ACK"); - __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial, - false); + __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, + skb->priority, sp->hdr.serial, false); } spin_unlock_bh(&call->lock); @@ -125,7 +125,7 @@ void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb) ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten); call->rx_data_recv = sp->hdr.seq; - rxrpc_hard_ACK_data(call, sp); + rxrpc_hard_ACK_data(call, skb); } EXPORT_SYMBOL(rxrpc_kernel_data_consumed); @@ -163,3 +163,65 @@ void rxrpc_kernel_free_skb(struct sk_buff *skb) rxrpc_free_skb(skb); } EXPORT_SYMBOL(rxrpc_kernel_free_skb); + +/* + * Note the existence of a new-to-us socket buffer (allocated or dequeued). + */ +void rxrpc_new_skb(struct sk_buff *skb) +{ + const void *here = __builtin_return_address(0); + int n = atomic_inc_return(&rxrpc_n_skbs); + trace_rxrpc_skb(skb, 0, atomic_read(&skb->users), n, here); +} + +/* + * Note the re-emergence of a socket buffer from a queue or buffer. + */ +void rxrpc_see_skb(struct sk_buff *skb) +{ + const void *here = __builtin_return_address(0); + if (skb) { + int n = atomic_read(&rxrpc_n_skbs); + trace_rxrpc_skb(skb, 1, atomic_read(&skb->users), n, here); + } +} + +/* + * Note the addition of a ref on a socket buffer. + */ +void rxrpc_get_skb(struct sk_buff *skb) +{ + const void *here = __builtin_return_address(0); + int n = atomic_inc_return(&rxrpc_n_skbs); + trace_rxrpc_skb(skb, 2, atomic_read(&skb->users), n, here); + skb_get(skb); +} + +/* + * Note the destruction of a socket buffer. + */ +void rxrpc_free_skb(struct sk_buff *skb) +{ + const void *here = __builtin_return_address(0); + if (skb) { + int n; + CHECK_SLAB_OKAY(&skb->users); + n = atomic_dec_return(&rxrpc_n_skbs); + trace_rxrpc_skb(skb, 3, atomic_read(&skb->users), n, here); + kfree_skb(skb); + } +} + +/* + * Clear a queue of socket buffers. + */ +void rxrpc_purge_queue(struct sk_buff_head *list) +{ + const void *here = __builtin_return_address(0); + struct sk_buff *skb; + while ((skb = skb_dequeue((list))) != NULL) { + int n = atomic_dec_return(&rxrpc_n_skbs); + trace_rxrpc_skb(skb, 4, atomic_read(&skb->users), n, here); + kfree_skb(skb); + } +} diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c index 03ad08774d4e..dc380af8a81e 100644 --- a/net/rxrpc/sysctl.c +++ b/net/rxrpc/sysctl.c @@ -62,6 +62,22 @@ static struct ctl_table rxrpc_sysctl_table[] = { .proc_handler = proc_dointvec_ms_jiffies, .extra1 = (void *)&one, }, + { + .procname = "idle_conn_expiry", + .data = &rxrpc_conn_idle_client_expiry, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + .extra1 = (void *)&one, + }, + { + .procname = "idle_conn_fast_expiry", + .data = &rxrpc_conn_idle_client_fast_expiry, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + .extra1 = (void *)&one, + }, /* Values measured in seconds but used in jiffies */ { @@ -81,17 +97,24 @@ static struct ctl_table rxrpc_sysctl_table[] = { .extra1 = (void *)&one, }, - /* Values measured in seconds */ + /* Non-time values */ + { + .procname = "max_client_conns", + .data = &rxrpc_max_client_connections, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = (void *)&rxrpc_reap_client_connections, + }, { - .procname = "connection_expiry", - .data = &rxrpc_connection_expiry, + .procname = "reap_client_conns", + .data = &rxrpc_reap_client_connections, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&one, + .extra2 = (void *)&rxrpc_max_client_connections, }, - - /* Non-time values */ { .procname = "max_backlog", .data = &rxrpc_max_backlog, diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 691409de3e1a..59a8d3150ae2 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -43,7 +43,8 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, goto drop; break; case TCA_VLAN_ACT_PUSH: - err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid); + err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid | + (v->tcfv_push_prio << VLAN_PRIO_SHIFT)); if (err) goto drop; break; @@ -65,6 +66,7 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) }, [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 }, [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 }, + [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 }, }; static int tcf_vlan_init(struct net *net, struct nlattr *nla, @@ -78,6 +80,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, int action; __be16 push_vid = 0; __be16 push_proto = 0; + u8 push_prio = 0; bool exists = false; int ret = 0, err; @@ -123,6 +126,9 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, } else { push_proto = htons(ETH_P_8021Q); } + + if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY]) + push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]); break; default: if (exists) @@ -150,6 +156,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, v->tcfv_action = action; v->tcfv_push_vid = push_vid; + v->tcfv_push_prio = push_prio; v->tcfv_push_proto = push_proto; v->tcf_action = parm->action; @@ -181,7 +188,9 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, if (v->tcfv_action == TCA_VLAN_ACT_PUSH && (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) || nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, - v->tcfv_push_proto))) + v->tcfv_push_proto) || + (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY, + v->tcfv_push_prio)))) goto nla_put_failure; tcf_tm_dump(&t, &v->tcf_tm); diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 0b8c3ace671f..eb219b78cd49 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -138,10 +138,12 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, struct tcf_exts e; struct tcf_ematch_tree t; - tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE); - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + err = tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE); if (err < 0) return err; + err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + if (err < 0) + goto errout; err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &t); if (err < 0) @@ -189,7 +191,10 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, if (!fnew) return -ENOBUFS; - tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE); + err = tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE); + if (err < 0) + goto errout; + err = -EINVAL; if (handle) { fnew->handle = handle; @@ -226,6 +231,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, return 0; errout: + tcf_exts_destroy(&fnew->exts); kfree(fnew); return err; } diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index c3002c2c68bb..4742f415ee5b 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -311,17 +311,19 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) return -EINVAL; - tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); - ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr); + ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); if (ret < 0) return ret; + ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr); + if (ret < 0) + goto errout; if (tb[TCA_BPF_FLAGS]) { u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) { - tcf_exts_destroy(&exts); - return -EINVAL; + ret = -EINVAL; + goto errout; } have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; @@ -331,10 +333,8 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : cls_bpf_prog_from_efd(tb, prog, tp); - if (ret < 0) { - tcf_exts_destroy(&exts); - return ret; - } + if (ret < 0) + goto errout; if (tb[TCA_BPF_CLASSID]) { prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); @@ -343,6 +343,10 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, tcf_exts_change(tp, &prog->exts, &exts); return 0; + +errout: + tcf_exts_destroy(&exts); + return ret; } static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, @@ -388,7 +392,9 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, if (!prog) return -ENOBUFS; - tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); + ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); + if (ret < 0) + goto errout; if (oldprog) { if (handle && oldprog->handle != handle) { @@ -420,9 +426,10 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, *arg = (unsigned long) prog; return 0; + errout: + tcf_exts_destroy(&prog->exts); kfree(prog); - return ret; } diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 4c85bd3a750c..85233c470035 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -93,7 +93,9 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, if (!new) return -ENOBUFS; - tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); + err = tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); + if (err < 0) + goto errout; new->handle = handle; new->tp = tp; err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], @@ -101,10 +103,14 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto errout; - tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); - err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); + err = tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); if (err < 0) goto errout; + err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); + if (err < 0) { + tcf_exts_destroy(&e); + goto errout; + } err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); if (err < 0) { @@ -120,6 +126,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, call_rcu(&head->rcu, cls_cgroup_destroy_rcu); return 0; errout: + tcf_exts_destroy(&new->exts); kfree(new); return err; } diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index fbfec6a18839..2c1ae549edbf 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -418,10 +418,12 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, return -EOPNOTSUPP; } - tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE); + err = tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE); + if (err < 0) + goto err1; err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); if (err < 0) - return err; + goto err1; err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); if (err < 0) @@ -432,13 +434,15 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (!fnew) goto err2; - tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); + err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); + if (err < 0) + goto err3; fold = (struct flow_filter *)*arg; if (fold) { err = -EINVAL; if (fold->handle != handle && handle) - goto err2; + goto err3; /* Copy fold into fnew */ fnew->tp = fold->tp; @@ -458,31 +462,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) - goto err2; + goto err3; if (mode == FLOW_MODE_HASH) perturb_period = fold->perturb_period; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) - goto err2; + goto err3; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } } else { err = -EINVAL; if (!handle) - goto err2; + goto err3; if (!tb[TCA_FLOW_KEYS]) - goto err2; + goto err3; mode = FLOW_MODE_MAP; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) - goto err2; + goto err3; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) - goto err2; + goto err3; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } @@ -542,6 +546,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, call_rcu(&fold->rcu, flow_destroy_filter); return 0; +err3: + tcf_exts_destroy(&fnew->exts); err2: tcf_em_tree_destroy(&t); kfree(fnew); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 5060801a2f6d..cf9ad5b50889 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -28,6 +28,7 @@ struct fl_flow_key { struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; + struct flow_dissector_key_vlan vlan; struct flow_dissector_key_addrs ipaddrs; union { struct flow_dissector_key_ipv4_addrs ipv4; @@ -293,6 +294,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, + }; static void fl_set_key_val(struct nlattr **tb, @@ -308,9 +313,29 @@ static void fl_set_key_val(struct nlattr **tb, memcpy(mask, nla_data(tb[mask_type]), len); } +static void fl_set_key_vlan(struct nlattr **tb, + struct flow_dissector_key_vlan *key_val, + struct flow_dissector_key_vlan *key_mask) +{ +#define VLAN_PRIORITY_MASK 0x7 + + if (tb[TCA_FLOWER_KEY_VLAN_ID]) { + key_val->vlan_id = + nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK; + key_mask->vlan_id = VLAN_VID_MASK; + } + if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) { + key_val->vlan_priority = + nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) & + VLAN_PRIORITY_MASK; + key_mask->vlan_priority = VLAN_PRIORITY_MASK; + } +} + static int fl_set_key(struct net *net, struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask) { + __be16 ethertype; #ifdef CONFIG_NET_CLS_IND if (tb[TCA_FLOWER_INDEV]) { int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]); @@ -328,9 +353,20 @@ static int fl_set_key(struct net *net, struct nlattr **tb, mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, sizeof(key->eth.src)); - fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, - &mask->basic.n_proto, TCA_FLOWER_UNSPEC, - sizeof(key->basic.n_proto)); + if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { + ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); + + if (ethertype == htons(ETH_P_8021Q)) { + fl_set_key_vlan(tb, &key->vlan, &mask->vlan); + fl_set_key_val(tb, &key->basic.n_proto, + TCA_FLOWER_KEY_VLAN_ETH_TYPE, + &mask->basic.n_proto, TCA_FLOWER_UNSPEC, + sizeof(key->basic.n_proto)); + } else { + key->basic.n_proto = ethertype; + mask->basic.n_proto = cpu_to_be16(~0); + } + } if (key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) { @@ -404,12 +440,10 @@ static int fl_init_hashtable(struct cls_fl_head *head, #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member)) -#define FL_KEY_MEMBER_END_OFFSET(member) \ - (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member)) -#define FL_KEY_IN_RANGE(mask, member) \ - (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \ - FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start) +#define FL_KEY_IS_MASKED(mask, member) \ + memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ + 0, FL_KEY_MEMBER_SIZE(member)) \ #define FL_KEY_SET(keys, cnt, id, member) \ do { \ @@ -418,9 +452,9 @@ static int fl_init_hashtable(struct cls_fl_head *head, cnt++; \ } while(0); -#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \ +#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ do { \ - if (FL_KEY_IN_RANGE(mask, member)) \ + if (FL_KEY_IS_MASKED(mask, member)) \ FL_KEY_SET(keys, cnt, id, member); \ } while(0); @@ -432,14 +466,16 @@ static void fl_init_dissector(struct cls_fl_head *head, FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_VLAN, vlan); skb_flow_dissector_init(&head->dissector, keys, cnt); } @@ -478,10 +514,12 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, struct tcf_exts e; int err; - tcf_exts_init(&e, TCA_FLOWER_ACT, 0); - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0); if (err < 0) return err; + err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + if (err < 0) + goto errout; if (tb[TCA_FLOWER_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); @@ -550,7 +588,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (!fnew) return -ENOBUFS; - tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0); + err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0); + if (err < 0) + goto errout; if (!handle) { handle = fl_grab_new_handle(tp, head); @@ -614,6 +654,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, return 0; errout: + tcf_exts_destroy(&fnew->exts); kfree(fnew); return err; } @@ -668,6 +709,29 @@ static int fl_dump_key_val(struct sk_buff *skb, return 0; } +static int fl_dump_key_vlan(struct sk_buff *skb, + struct flow_dissector_key_vlan *vlan_key, + struct flow_dissector_key_vlan *vlan_mask) +{ + int err; + + if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) + return 0; + if (vlan_mask->vlan_id) { + err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID, + vlan_key->vlan_id); + if (err) + return err; + } + if (vlan_mask->vlan_priority) { + err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO, + vlan_key->vlan_priority); + if (err) + return err; + } + return 0; +} + static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { @@ -712,6 +776,10 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, &mask->basic.n_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.n_proto))) goto nla_put_failure; + + if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan)) + goto nla_put_failure; + if ((key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) && fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index f23a3b68bba6..cc0bda945800 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -195,10 +195,12 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, u32 mask; int err; - tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE); - err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); + err = tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE); if (err < 0) return err; + err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); + if (err < 0) + goto errout; if (tb[TCA_FW_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); @@ -270,10 +272,15 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, #endif /* CONFIG_NET_CLS_IND */ fnew->tp = f->tp; - tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE); + err = tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE); + if (err < 0) { + kfree(fnew); + return err; + } err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr); if (err < 0) { + tcf_exts_destroy(&fnew->exts); kfree(fnew); return err; } @@ -313,7 +320,9 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, if (f == NULL) return -ENOBUFS; - tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE); + err = tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE); + if (err < 0) + goto errout; f->id = handle; f->tp = tp; @@ -328,6 +337,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, return 0; errout: + tcf_exts_destroy(&f->exts); kfree(f); return err; } diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 08a3b0a6f5ab..c91e65d81a48 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -383,17 +383,19 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *est, int new, bool ovr) { - int err; u32 id = 0, to = 0, nhandle = 0x8000; struct route4_filter *fp; unsigned int h1; struct route4_bucket *b; struct tcf_exts e; + int err; - tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + err = tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); if (err < 0) return err; + err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + if (err < 0) + goto errout; err = -EINVAL; if (tb[TCA_ROUTE4_TO]) { @@ -503,7 +505,10 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (!f) goto errout; - tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); + err = tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); + if (err < 0) + goto errout; + if (fold) { f->id = fold->id; f->iif = fold->iif; @@ -557,6 +562,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, return 0; errout: + tcf_exts_destroy(&f->exts); kfree(f); return err; } diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index f9c9fc075fe6..4f05a19fb073 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -487,10 +487,12 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE); - err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); + err = tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE); if (err < 0) return err; + err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); + if (err < 0) + goto errout2; f = (struct rsvp_filter *)*arg; if (f) { @@ -506,7 +508,11 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, goto errout2; } - tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE); + err = tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE); + if (err < 0) { + kfree(n); + goto errout2; + } if (tb[TCA_RSVP_CLASSID]) { n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); @@ -530,7 +536,9 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, if (f == NULL) goto errout2; - tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE); + err = tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE); + if (err < 0) + goto errout; h2 = 16; if (tb[TCA_RSVP_SRC]) { memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); @@ -627,6 +635,7 @@ insert: goto insert; errout: + tcf_exts_destroy(&f->exts); kfree(f); errout2: tcf_exts_destroy(&e); diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 944c8ff45055..d9500709831f 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -219,10 +219,10 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, }; -static void tcindex_filter_result_init(struct tcindex_filter_result *r) +static int tcindex_filter_result_init(struct tcindex_filter_result *r) { memset(r, 0, sizeof(*r)); - tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); + return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); } static void __tcindex_partial_destroy(struct rcu_head *head) @@ -233,23 +233,57 @@ static void __tcindex_partial_destroy(struct rcu_head *head) kfree(p); } +static void tcindex_free_perfect_hash(struct tcindex_data *cp) +{ + int i; + + for (i = 0; i < cp->hash; i++) + tcf_exts_destroy(&cp->perfect[i].exts); + kfree(cp->perfect); +} + +static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) +{ + int i, err = 0; + + cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result), + GFP_KERNEL); + if (!cp->perfect) + return -ENOMEM; + + for (i = 0; i < cp->hash; i++) { + err = tcf_exts_init(&cp->perfect[i].exts, + TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); + if (err < 0) + goto errout; + } + + return 0; + +errout: + tcindex_free_perfect_hash(cp); + return err; +} + static int tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, u32 handle, struct tcindex_data *p, struct tcindex_filter_result *r, struct nlattr **tb, struct nlattr *est, bool ovr) { - int err, balloc = 0; struct tcindex_filter_result new_filter_result, *old_r = r; struct tcindex_filter_result cr; - struct tcindex_data *cp, *oldp; + struct tcindex_data *cp = NULL, *oldp; struct tcindex_filter *f = NULL; /* make gcc behave */ + int err, balloc = 0; struct tcf_exts e; - tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); if (err < 0) return err; + err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + if (err < 0) + goto errout; err = -ENOMEM; /* tcindex_data attributes must look atomic to classifier/lookup so @@ -270,19 +304,20 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (p->perfect) { int i; - cp->perfect = kmemdup(p->perfect, - sizeof(*r) * cp->hash, GFP_KERNEL); - if (!cp->perfect) + if (tcindex_alloc_perfect_hash(cp) < 0) goto errout; for (i = 0; i < cp->hash; i++) - tcf_exts_init(&cp->perfect[i].exts, - TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); + cp->perfect[i].res = p->perfect[i].res; balloc = 1; } cp->h = p->h; - tcindex_filter_result_init(&new_filter_result); - tcindex_filter_result_init(&cr); + err = tcindex_filter_result_init(&new_filter_result); + if (err < 0) + goto errout1; + err = tcindex_filter_result_init(&cr); + if (err < 0) + goto errout1; if (old_r) cr.res = r->res; @@ -338,15 +373,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, err = -ENOMEM; if (!cp->perfect && !cp->h) { if (valid_perfect_hash(cp)) { - int i; - - cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL); - if (!cp->perfect) + if (tcindex_alloc_perfect_hash(cp) < 0) goto errout_alloc; - for (i = 0; i < cp->hash; i++) - tcf_exts_init(&cp->perfect[i].exts, - TCA_TCINDEX_ACT, - TCA_TCINDEX_POLICE); balloc = 1; } else { struct tcindex_filter __rcu **hash; @@ -373,8 +401,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (!f) goto errout_alloc; f->key = handle; - tcindex_filter_result_init(&f->result); f->next = NULL; + err = tcindex_filter_result_init(&f->result); + if (err < 0) { + kfree(f); + goto errout_alloc; + } } if (tb[TCA_TCINDEX_CLASSID]) { @@ -387,8 +419,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, else tcf_exts_change(tp, &cr.exts, &e); - if (old_r && old_r != r) - tcindex_filter_result_init(old_r); + if (old_r && old_r != r) { + err = tcindex_filter_result_init(old_r); + if (err < 0) { + kfree(f); + goto errout_alloc; + } + } oldp = p; r->res = cr.res; @@ -415,9 +452,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, errout_alloc: if (balloc == 1) - kfree(cp->perfect); + tcindex_free_perfect_hash(cp); else if (balloc == 2) kfree(cp->h); +errout1: + tcf_exts_destroy(&cr.exts); + tcf_exts_destroy(&new_filter_result.exts); errout: kfree(cp); tcf_exts_destroy(&e); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index ffe593efe930..a29263a9d8c1 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -709,13 +709,15 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, struct tc_u_knode *n, struct nlattr **tb, struct nlattr *est, bool ovr) { - int err; struct tcf_exts e; + int err; - tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE); - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + err = tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE); if (err < 0) return err; + err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + if (err < 0) + goto errout; err = -EINVAL; if (tb[TCA_U32_LINK]) { @@ -833,7 +835,10 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp, new->tp = tp; memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); - tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE); + if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) { + kfree(new); + return NULL; + } return new; } @@ -985,9 +990,12 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, n->handle = handle; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; n->flags = flags; - tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); n->tp = tp; + err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); + if (err < 0) + goto errout; + #ifdef CONFIG_CLS_U32_MARK n->pcpu_success = alloc_percpu(u32); if (!n->pcpu_success) { @@ -1028,9 +1036,10 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, errhw: #ifdef CONFIG_CLS_U32_MARK free_percpu(n->pcpu_success); -errout: #endif +errout: + tcf_exts_destroy(&n->exts); #ifdef CONFIG_CLS_U32_PERF free_percpu(n->pf); #endif diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 12ebde845523..d677b3484d81 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -29,6 +29,7 @@ #include <linux/hrtimer.h> #include <linux/lockdep.h> #include <linux/slab.h> +#include <linux/hashtable.h> #include <net/net_namespace.h> #include <net/sock.h> @@ -259,37 +260,40 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) { struct Qdisc *q; + if (!qdisc_dev(root)) + return (root->handle == handle ? root : NULL); + if (!(root->flags & TCQ_F_BUILTIN) && root->handle == handle) return root; - list_for_each_entry_rcu(q, &root->list, list) { + hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) { if (q->handle == handle) return q; } return NULL; } -void qdisc_list_add(struct Qdisc *q) +void qdisc_hash_add(struct Qdisc *q) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { struct Qdisc *root = qdisc_dev(q)->qdisc; WARN_ON_ONCE(root == &noop_qdisc); ASSERT_RTNL(); - list_add_tail_rcu(&q->list, &root->list); + hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); } } -EXPORT_SYMBOL(qdisc_list_add); +EXPORT_SYMBOL(qdisc_hash_add); -void qdisc_list_del(struct Qdisc *q) +void qdisc_hash_del(struct Qdisc *q) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { ASSERT_RTNL(); - list_del_rcu(&q->list); + hash_del_rcu(&q->hash); } } -EXPORT_SYMBOL(qdisc_list_del); +EXPORT_SYMBOL(qdisc_hash_del); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { @@ -998,7 +1002,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, goto err_out4; } - qdisc_list_add(sch); + qdisc_hash_add(sch); return sch; } @@ -1431,10 +1435,11 @@ err_out: static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, struct netlink_callback *cb, - int *q_idx_p, int s_q_idx) + int *q_idx_p, int s_q_idx, bool recur) { int ret = 0, q_idx = *q_idx_p; struct Qdisc *q; + int b; if (!root) return 0; @@ -1449,7 +1454,17 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, goto done; q_idx++; } - list_for_each_entry(q, &root->list, list) { + + /* If dumping singletons, there is no qdisc_dev(root) and the singleton + * itself has already been dumped. + * + * If we've already dumped the top-level (ingress) qdisc above and the global + * qdisc hashtable, we don't want to hit it again + */ + if (!qdisc_dev(root) || !recur) + goto out; + + hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { if (q_idx < s_q_idx) { q_idx++; continue; @@ -1490,13 +1505,13 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) s_q_idx = 0; q_idx = 0; - if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) + if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, true) < 0) goto done; dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, - &q_idx, s_q_idx) < 0) + &q_idx, s_q_idx, false) < 0) goto done; cont: @@ -1765,6 +1780,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, int *t_p, int s_t) { struct Qdisc *q; + int b; if (!root) return 0; @@ -1772,7 +1788,10 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) return -1; - list_for_each_entry(q, &root->list, list) { + if (!qdisc_dev(root)) + return 0; + + hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) return -1; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 657c13362b19..0d21b567ff27 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -423,7 +423,6 @@ struct Qdisc noop_qdisc = { .dequeue = noop_dequeue, .flags = TCQ_F_BUILTIN, .ops = &noop_qdisc_ops, - .list = LIST_HEAD_INIT(noop_qdisc.list), .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, .running = SEQCNT_ZERO(noop_qdisc.running), @@ -613,7 +612,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); sch->padded = (char *) sch - (char *) p; } - INIT_LIST_HEAD(&sch->list); skb_queue_head_init(&sch->q); spin_lock_init(&sch->busylock); @@ -701,7 +699,7 @@ void qdisc_destroy(struct Qdisc *qdisc) return; #ifdef CONFIG_NET_SCHED - qdisc_list_del(qdisc); + qdisc_hash_del(qdisc); qdisc_put_stab(rtnl_dereference(qdisc->stab)); #endif @@ -789,6 +787,10 @@ static void attach_default_qdiscs(struct net_device *dev) qdisc->ops->attach(qdisc); } } +#ifdef CONFIG_NET_SCHED + if (dev->qdisc) + qdisc_hash_add(dev->qdisc); +#endif } static void transition_one_qdisc(struct net_device *dev, diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 3ddc7bd74ecb..000f1d36128e 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -142,8 +142,6 @@ struct hfsc_class { link-sharing, max(myf, cfmin) */ u64 cl_myf; /* my fit-time (calculated from this class's own upperlimit curve) */ - u64 cl_myfadj; /* my fit-time adjustment (to cancel - history dependence) */ u64 cl_cfmin; /* earliest children's fit-time (used with cl_myf to obtain cl_f) */ u64 cl_cvtmin; /* minimal virtual time among the @@ -151,11 +149,8 @@ struct hfsc_class { (monotonic within a period) */ u64 cl_vtadj; /* intra-period cumulative vt adjustment */ - u64 cl_vtoff; /* inter-period cumulative vt offset */ - u64 cl_cvtmax; /* max child's vt in the last period */ - u64 cl_cvtoff; /* cumulative cvtmax of all periods */ - u64 cl_pcvtoff; /* parent's cvtoff at initialization - time */ + u64 cl_cvtoff; /* largest virtual time seen among + the children */ struct internal_sc cl_rsc; /* internal real-time service curve */ struct internal_sc cl_fsc; /* internal fair service curve */ @@ -701,28 +696,16 @@ init_vf(struct hfsc_class *cl, unsigned int len) } else { /* * first child for a new parent backlog period. - * add parent's cvtmax to cvtoff to make a new - * vt (vtoff + vt) larger than the vt in the - * last period for all children. + * initialize cl_vt to the highest value seen + * among the siblings. this is analogous to + * what cur_time would provide in realtime case. */ - vt = cl->cl_parent->cl_cvtmax; - cl->cl_parent->cl_cvtoff += vt; - cl->cl_parent->cl_cvtmax = 0; + cl->cl_vt = cl->cl_parent->cl_cvtoff; cl->cl_parent->cl_cvtmin = 0; - cl->cl_vt = 0; } - cl->cl_vtoff = cl->cl_parent->cl_cvtoff - - cl->cl_pcvtoff; - /* update the virtual curve */ - vt = cl->cl_vt + cl->cl_vtoff; - rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, - cl->cl_total); - if (cl->cl_virtual.x == vt) { - cl->cl_virtual.x -= cl->cl_vtoff; - cl->cl_vtoff = 0; - } + rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); cl->cl_vtadj = 0; cl->cl_vtperiod++; /* increment vt period */ @@ -745,7 +728,6 @@ init_vf(struct hfsc_class *cl, unsigned int len) /* compute myf */ cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total); - cl->cl_myfadj = 0; } } @@ -779,8 +761,7 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) go_passive = 0; /* update vt */ - cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) - - cl->cl_vtoff + cl->cl_vtadj; + cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj; /* * if vt of the class is smaller than cvtmin, @@ -795,9 +776,9 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) if (go_passive) { /* no more active child, going passive */ - /* update cvtmax of the parent class */ - if (cl->cl_vt > cl->cl_parent->cl_cvtmax) - cl->cl_parent->cl_cvtmax = cl->cl_vt; + /* update cvtoff of the parent class */ + if (cl->cl_vt > cl->cl_parent->cl_cvtoff) + cl->cl_parent->cl_cvtoff = cl->cl_vt; /* remove this class from the vt tree */ vttree_remove(cl); @@ -813,9 +794,10 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) /* update f */ if (cl->cl_flags & HFSC_USC) { + cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total); +#if 0 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); -#if 0 /* * This code causes classes to stay way under their * limit when multiple classes are used at gigabit @@ -940,7 +922,7 @@ static void hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) { sc2isc(fsc, &cl->cl_fsc); - rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vtoff + cl->cl_vt, cl->cl_total); + rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); cl->cl_flags |= HFSC_FSC; } @@ -1094,7 +1076,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (parent->level == 0) hfsc_purge_queue(sch, parent); hfsc_adjust_levels(parent); - cl->cl_pcvtoff = parent->cl_cvtoff; sch_tree_unlock(sch); qdisc_class_hash_grow(sch, &q->clhash); @@ -1482,16 +1463,12 @@ hfsc_reset_class(struct hfsc_class *cl) cl->cl_e = 0; cl->cl_vt = 0; cl->cl_vtadj = 0; - cl->cl_vtoff = 0; cl->cl_cvtmin = 0; - cl->cl_cvtmax = 0; cl->cl_cvtoff = 0; - cl->cl_pcvtoff = 0; cl->cl_vtperiod = 0; cl->cl_parentperiod = 0; cl->cl_f = 0; cl->cl_myf = 0; - cl->cl_myfadj = 0; cl->cl_cfmin = 0; cl->cl_nactive = 0; diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index b9439827c172..2bc8d7f8df16 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -88,7 +88,7 @@ static void mq_attach(struct Qdisc *sch) qdisc_destroy(old); #ifdef CONFIG_NET_SCHED if (ntx < dev->real_num_tx_queues) - qdisc_list_add(qdisc); + qdisc_hash_add(qdisc); #endif } diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 549c66359924..b5c502c78143 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -182,7 +182,7 @@ static void mqprio_attach(struct Qdisc *sch) if (old) qdisc_destroy(old); if (ntx < dev->real_num_tx_queues) - qdisc_list_add(qdisc); + qdisc_hash_add(qdisc); } kfree(priv->qdiscs); priv->qdiscs = NULL; diff --git a/net/strparser/Kconfig b/net/strparser/Kconfig new file mode 100644 index 000000000000..6cff3f6d0c3a --- /dev/null +++ b/net/strparser/Kconfig @@ -0,0 +1,4 @@ + +config STREAM_PARSER + tristate + default n diff --git a/net/strparser/Makefile b/net/strparser/Makefile new file mode 100644 index 000000000000..858a126ebaa0 --- /dev/null +++ b/net/strparser/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_STREAM_PARSER) += strparser.o diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c new file mode 100644 index 000000000000..5c7549b5b92c --- /dev/null +++ b/net/strparser/strparser.c @@ -0,0 +1,510 @@ +/* + * Stream Parser + * + * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/bpf.h> +#include <linux/errno.h> +#include <linux/errqueue.h> +#include <linux/file.h> +#include <linux/in.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/net.h> +#include <linux/netdevice.h> +#include <linux/poll.h> +#include <linux/rculist.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/uaccess.h> +#include <linux/workqueue.h> +#include <net/strparser.h> +#include <net/netns/generic.h> +#include <net/sock.h> + +static struct workqueue_struct *strp_wq; + +struct _strp_rx_msg { + /* Internal cb structure. struct strp_rx_msg must be first for passing + * to upper layer. + */ + struct strp_rx_msg strp; + int accum_len; + int early_eaten; +}; + +static inline struct _strp_rx_msg *_strp_rx_msg(struct sk_buff *skb) +{ + return (struct _strp_rx_msg *)((void *)skb->cb + + offsetof(struct qdisc_skb_cb, data)); +} + +/* Lower lock held */ +static void strp_abort_rx_strp(struct strparser *strp, int err) +{ + struct sock *csk = strp->sk; + + /* Unrecoverable error in receive */ + + del_timer(&strp->rx_msg_timer); + + if (strp->rx_stopped) + return; + + strp->rx_stopped = 1; + + /* Report an error on the lower socket */ + csk->sk_err = err; + csk->sk_error_report(csk); +} + +static void strp_start_rx_timer(struct strparser *strp) +{ + if (strp->sk->sk_rcvtimeo) + mod_timer(&strp->rx_msg_timer, strp->sk->sk_rcvtimeo); +} + +/* Lower lock held */ +static void strp_parser_err(struct strparser *strp, int err, + read_descriptor_t *desc) +{ + desc->error = err; + kfree_skb(strp->rx_skb_head); + strp->rx_skb_head = NULL; + strp->cb.abort_parser(strp, err); +} + +static inline int strp_peek_len(struct strparser *strp) +{ + struct socket *sock = strp->sk->sk_socket; + + return sock->ops->peek_len(sock); +} + +/* Lower socket lock held */ +static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, + unsigned int orig_offset, size_t orig_len) +{ + struct strparser *strp = (struct strparser *)desc->arg.data; + struct _strp_rx_msg *rxm; + struct sk_buff *head, *skb; + size_t eaten = 0, cand_len; + ssize_t extra; + int err; + bool cloned_orig = false; + + if (strp->rx_paused) + return 0; + + head = strp->rx_skb_head; + if (head) { + /* Message already in progress */ + + rxm = _strp_rx_msg(head); + if (unlikely(rxm->early_eaten)) { + /* Already some number of bytes on the receive sock + * data saved in rx_skb_head, just indicate they + * are consumed. + */ + eaten = orig_len <= rxm->early_eaten ? + orig_len : rxm->early_eaten; + rxm->early_eaten -= eaten; + + return eaten; + } + + if (unlikely(orig_offset)) { + /* Getting data with a non-zero offset when a message is + * in progress is not expected. If it does happen, we + * need to clone and pull since we can't deal with + * offsets in the skbs for a message expect in the head. + */ + orig_skb = skb_clone(orig_skb, GFP_ATOMIC); + if (!orig_skb) { + STRP_STATS_INCR(strp->stats.rx_mem_fail); + desc->error = -ENOMEM; + return 0; + } + if (!pskb_pull(orig_skb, orig_offset)) { + STRP_STATS_INCR(strp->stats.rx_mem_fail); + kfree_skb(orig_skb); + desc->error = -ENOMEM; + return 0; + } + cloned_orig = true; + orig_offset = 0; + } + + if (!strp->rx_skb_nextp) { + /* We are going to append to the frags_list of head. + * Need to unshare the frag_list. + */ + err = skb_unclone(head, GFP_ATOMIC); + if (err) { + STRP_STATS_INCR(strp->stats.rx_mem_fail); + desc->error = err; + return 0; + } + + if (unlikely(skb_shinfo(head)->frag_list)) { + /* We can't append to an sk_buff that already + * has a frag_list. We create a new head, point + * the frag_list of that to the old head, and + * then are able to use the old head->next for + * appending to the message. + */ + if (WARN_ON(head->next)) { + desc->error = -EINVAL; + return 0; + } + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) { + STRP_STATS_INCR(strp->stats.rx_mem_fail); + desc->error = -ENOMEM; + return 0; + } + skb->len = head->len; + skb->data_len = head->len; + skb->truesize = head->truesize; + *_strp_rx_msg(skb) = *_strp_rx_msg(head); + strp->rx_skb_nextp = &head->next; + skb_shinfo(skb)->frag_list = head; + strp->rx_skb_head = skb; + head = skb; + } else { + strp->rx_skb_nextp = + &skb_shinfo(head)->frag_list; + } + } + } + + while (eaten < orig_len) { + /* Always clone since we will consume something */ + skb = skb_clone(orig_skb, GFP_ATOMIC); + if (!skb) { + STRP_STATS_INCR(strp->stats.rx_mem_fail); + desc->error = -ENOMEM; + break; + } + + cand_len = orig_len - eaten; + + head = strp->rx_skb_head; + if (!head) { + head = skb; + strp->rx_skb_head = head; + /* Will set rx_skb_nextp on next packet if needed */ + strp->rx_skb_nextp = NULL; + rxm = _strp_rx_msg(head); + memset(rxm, 0, sizeof(*rxm)); + rxm->strp.offset = orig_offset + eaten; + } else { + /* Unclone since we may be appending to an skb that we + * already share a frag_list with. + */ + err = skb_unclone(skb, GFP_ATOMIC); + if (err) { + STRP_STATS_INCR(strp->stats.rx_mem_fail); + desc->error = err; + break; + } + + rxm = _strp_rx_msg(head); + *strp->rx_skb_nextp = skb; + strp->rx_skb_nextp = &skb->next; + head->data_len += skb->len; + head->len += skb->len; + head->truesize += skb->truesize; + } + + if (!rxm->strp.full_len) { + ssize_t len; + + len = (*strp->cb.parse_msg)(strp, head); + + if (!len) { + /* Need more header to determine length */ + if (!rxm->accum_len) { + /* Start RX timer for new message */ + strp_start_rx_timer(strp); + } + rxm->accum_len += cand_len; + eaten += cand_len; + STRP_STATS_INCR(strp->stats.rx_need_more_hdr); + WARN_ON(eaten != orig_len); + break; + } else if (len < 0) { + if (len == -ESTRPIPE && rxm->accum_len) { + len = -ENODATA; + strp->rx_unrecov_intr = 1; + } else { + strp->rx_interrupted = 1; + } + strp_parser_err(strp, err, desc); + break; + } else if (len > strp->sk->sk_rcvbuf) { + /* Message length exceeds maximum allowed */ + STRP_STATS_INCR(strp->stats.rx_msg_too_big); + strp_parser_err(strp, -EMSGSIZE, desc); + break; + } else if (len <= (ssize_t)head->len - + skb->len - rxm->strp.offset) { + /* Length must be into new skb (and also + * greater than zero) + */ + STRP_STATS_INCR(strp->stats.rx_bad_hdr_len); + strp_parser_err(strp, -EPROTO, desc); + break; + } + + rxm->strp.full_len = len; + } + + extra = (ssize_t)(rxm->accum_len + cand_len) - + rxm->strp.full_len; + + if (extra < 0) { + /* Message not complete yet. */ + if (rxm->strp.full_len - rxm->accum_len > + strp_peek_len(strp)) { + /* Don't have the whole messages in the socket + * buffer. Set strp->rx_need_bytes to wait for + * the rest of the message. Also, set "early + * eaten" since we've already buffered the skb + * but don't consume yet per strp_read_sock. + */ + + if (!rxm->accum_len) { + /* Start RX timer for new message */ + strp_start_rx_timer(strp); + } + + strp->rx_need_bytes = rxm->strp.full_len - + rxm->accum_len; + rxm->accum_len += cand_len; + rxm->early_eaten = cand_len; + STRP_STATS_ADD(strp->stats.rx_bytes, cand_len); + desc->count = 0; /* Stop reading socket */ + break; + } + rxm->accum_len += cand_len; + eaten += cand_len; + WARN_ON(eaten != orig_len); + break; + } + + /* Positive extra indicates ore bytes than needed for the + * message + */ + + WARN_ON(extra > cand_len); + + eaten += (cand_len - extra); + + /* Hurray, we have a new message! */ + del_timer(&strp->rx_msg_timer); + strp->rx_skb_head = NULL; + STRP_STATS_INCR(strp->stats.rx_msgs); + + /* Give skb to upper layer */ + strp->cb.rcv_msg(strp, head); + + if (unlikely(strp->rx_paused)) { + /* Upper layer paused strp */ + break; + } + } + + if (cloned_orig) + kfree_skb(orig_skb); + + STRP_STATS_ADD(strp->stats.rx_bytes, eaten); + + return eaten; +} + +static int default_read_sock_done(struct strparser *strp, int err) +{ + return err; +} + +/* Called with lock held on lower socket */ +static int strp_read_sock(struct strparser *strp) +{ + struct socket *sock = strp->sk->sk_socket; + read_descriptor_t desc; + + desc.arg.data = strp; + desc.error = 0; + desc.count = 1; /* give more than one skb per call */ + + /* sk should be locked here, so okay to do read_sock */ + sock->ops->read_sock(strp->sk, &desc, strp_recv); + + desc.error = strp->cb.read_sock_done(strp, desc.error); + + return desc.error; +} + +/* Lower sock lock held */ +void strp_data_ready(struct strparser *strp) +{ + if (unlikely(strp->rx_stopped)) + return; + + /* This check is needed to synchronize with do_strp_rx_work. + * do_strp_rx_work acquires a process lock (lock_sock) whereas + * the lock held here is bh_lock_sock. The two locks can be + * held by different threads at the same time, but bh_lock_sock + * allows a thread in BH context to safely check if the process + * lock is held. In this case, if the lock is held, queue work. + */ + if (sock_owned_by_user(strp->sk)) { + queue_work(strp_wq, &strp->rx_work); + return; + } + + if (strp->rx_paused) + return; + + if (strp->rx_need_bytes) { + if (strp_peek_len(strp) >= strp->rx_need_bytes) + strp->rx_need_bytes = 0; + else + return; + } + + if (strp_read_sock(strp) == -ENOMEM) + queue_work(strp_wq, &strp->rx_work); +} +EXPORT_SYMBOL_GPL(strp_data_ready); + +static void do_strp_rx_work(struct strparser *strp) +{ + read_descriptor_t rd_desc; + struct sock *csk = strp->sk; + + /* We need the read lock to synchronize with strp_data_ready. We + * need the socket lock for calling strp_read_sock. + */ + lock_sock(csk); + + if (unlikely(strp->rx_stopped)) + goto out; + + if (strp->rx_paused) + goto out; + + rd_desc.arg.data = strp; + + if (strp_read_sock(strp) == -ENOMEM) + queue_work(strp_wq, &strp->rx_work); + +out: + release_sock(csk); +} + +static void strp_rx_work(struct work_struct *w) +{ + do_strp_rx_work(container_of(w, struct strparser, rx_work)); +} + +static void strp_rx_msg_timeout(unsigned long arg) +{ + struct strparser *strp = (struct strparser *)arg; + + /* Message assembly timed out */ + STRP_STATS_INCR(strp->stats.rx_msg_timeouts); + lock_sock(strp->sk); + strp->cb.abort_parser(strp, ETIMEDOUT); + release_sock(strp->sk); +} + +int strp_init(struct strparser *strp, struct sock *csk, + struct strp_callbacks *cb) +{ + struct socket *sock = csk->sk_socket; + + if (!cb || !cb->rcv_msg || !cb->parse_msg) + return -EINVAL; + + if (!sock->ops->read_sock || !sock->ops->peek_len) + return -EAFNOSUPPORT; + + memset(strp, 0, sizeof(*strp)); + + strp->sk = csk; + + setup_timer(&strp->rx_msg_timer, strp_rx_msg_timeout, + (unsigned long)strp); + + INIT_WORK(&strp->rx_work, strp_rx_work); + + strp->cb.rcv_msg = cb->rcv_msg; + strp->cb.parse_msg = cb->parse_msg; + strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; + strp->cb.abort_parser = cb->abort_parser ? : strp_abort_rx_strp; + + return 0; +} +EXPORT_SYMBOL_GPL(strp_init); + +void strp_unpause(struct strparser *strp) +{ + strp->rx_paused = 0; + + /* Sync setting rx_paused with RX work */ + smp_mb(); + + queue_work(strp_wq, &strp->rx_work); +} +EXPORT_SYMBOL_GPL(strp_unpause); + +/* strp must already be stopped so that strp_recv will no longer be called. + * Note that strp_done is not called with the lower socket held. + */ +void strp_done(struct strparser *strp) +{ + WARN_ON(!strp->rx_stopped); + + del_timer_sync(&strp->rx_msg_timer); + cancel_work_sync(&strp->rx_work); + + if (strp->rx_skb_head) { + kfree_skb(strp->rx_skb_head); + strp->rx_skb_head = NULL; + } +} +EXPORT_SYMBOL_GPL(strp_done); + +void strp_stop(struct strparser *strp) +{ + strp->rx_stopped = 1; +} +EXPORT_SYMBOL_GPL(strp_stop); + +void strp_check_rcv(struct strparser *strp) +{ + queue_work(strp_wq, &strp->rx_work); +} +EXPORT_SYMBOL_GPL(strp_check_rcv); + +static int __init strp_mod_init(void) +{ + strp_wq = create_singlethread_workqueue("kstrp"); + + return 0; +} + +static void __exit strp_mod_exit(void) +{ +} +module_init(strp_mod_init); +module_exit(strp_mod_exit); +MODULE_LICENSE("GPL"); diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index a5fc9dd24aa9..1031a0327fff 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -1292,12 +1292,10 @@ bool switchdev_port_same_parent_id(struct net_device *a, struct switchdev_attr a_attr = { .orig_dev = a, .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, - .flags = SWITCHDEV_F_NO_RECURSE, }; struct switchdev_attr b_attr = { .orig_dev = b, .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, - .flags = SWITCHDEV_F_NO_RECURSE, }; if (switchdev_port_attr_get(a, &a_attr) || @@ -1306,89 +1304,4 @@ bool switchdev_port_same_parent_id(struct net_device *a, return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid); } - -static u32 switchdev_port_fwd_mark_get(struct net_device *dev, - struct net_device *group_dev) -{ - struct net_device *lower_dev; - struct list_head *iter; - - netdev_for_each_lower_dev(group_dev, lower_dev, iter) { - if (lower_dev == dev) - continue; - if (switchdev_port_same_parent_id(dev, lower_dev)) - return lower_dev->offload_fwd_mark; - return switchdev_port_fwd_mark_get(dev, lower_dev); - } - - return dev->ifindex; -} EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id); - -static void switchdev_port_fwd_mark_reset(struct net_device *group_dev, - u32 old_mark, u32 *reset_mark) -{ - struct net_device *lower_dev; - struct list_head *iter; - - netdev_for_each_lower_dev(group_dev, lower_dev, iter) { - if (lower_dev->offload_fwd_mark == old_mark) { - if (!*reset_mark) - *reset_mark = lower_dev->ifindex; - lower_dev->offload_fwd_mark = *reset_mark; - } - switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark); - } -} - -/** - * switchdev_port_fwd_mark_set - Set port offload forwarding mark - * - * @dev: port device - * @group_dev: containing device - * @joining: true if dev is joining group; false if leaving group - * - * An ungrouped port's offload mark is just its ifindex. A grouped - * port's (member of a bridge, for example) offload mark is the ifindex - * of one of the ports in the group with the same parent (switch) ID. - * Ports on the same device in the same group will have the same mark. - * - * Example: - * - * br0 ifindex=9 - * sw1p1 ifindex=2 mark=2 - * sw1p2 ifindex=3 mark=2 - * sw2p1 ifindex=4 mark=5 - * sw2p2 ifindex=5 mark=5 - * - * If sw2p2 leaves the bridge, we'll have: - * - * br0 ifindex=9 - * sw1p1 ifindex=2 mark=2 - * sw1p2 ifindex=3 mark=2 - * sw2p1 ifindex=4 mark=4 - * sw2p2 ifindex=5 mark=5 - */ -void switchdev_port_fwd_mark_set(struct net_device *dev, - struct net_device *group_dev, - bool joining) -{ - u32 mark = dev->ifindex; - u32 reset_mark = 0; - - if (group_dev) { - ASSERT_RTNL(); - if (joining) - mark = switchdev_port_fwd_mark_get(dev, group_dev); - else if (dev->offload_fwd_mark == mark) - /* Ohoh, this port was the mark reference port, - * but it's leaving the group, so reset the - * mark for the remaining ports in the group. - */ - switchdev_port_fwd_mark_reset(group_dev, mark, - &reset_mark); - } - - dev->offload_fwd_mark = mark; -} -EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set); diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 46a71c701e7c..5bc1a3d57401 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -42,26 +42,37 @@ static int net_ctl_permissions(struct ctl_table_header *head, struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); - kuid_t root_uid = make_kuid(net->user_ns, 0); - kgid_t root_gid = make_kgid(net->user_ns, 0); /* Allow network administrator to have same access as root. */ - if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN) || - uid_eq(root_uid, current_euid())) { + if (ns_capable(net->user_ns, CAP_NET_ADMIN)) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; } - /* Allow netns root group to have the same access as the root group */ - if (in_egroup_p(root_gid)) { - int mode = (table->mode >> 3) & 7; - return (mode << 3) | mode; - } + return table->mode; } +static void net_ctl_set_ownership(struct ctl_table_header *head, + struct ctl_table *table, + kuid_t *uid, kgid_t *gid) +{ + struct net *net = container_of(head->set, struct net, sysctls); + kuid_t ns_root_uid; + kgid_t ns_root_gid; + + ns_root_uid = make_kuid(net->user_ns, 0); + if (uid_valid(ns_root_uid)) + *uid = ns_root_uid; + + ns_root_gid = make_kgid(net->user_ns, 0); + if (gid_valid(ns_root_gid)) + *gid = ns_root_gid; +} + static struct ctl_table_root net_sysctl_root = { .lookup = net_ctl_header_lookup, .permissions = net_ctl_permissions, + .set_ownership = net_ctl_set_ownership, }; static int __net_init sysctl_net_init(struct net *net) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 65b1bbf133bd..975dbeb60ab0 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -42,6 +42,7 @@ #include "monitor.h" #include "bcast.h" #include "netlink.h" +#include "udp_media.h" #define MAX_ADDR_STR 60 @@ -56,6 +57,13 @@ static struct tipc_media * const media_info_array[] = { NULL }; +static struct tipc_bearer *bearer_get(struct net *net, int bearer_id) +{ + struct tipc_net *tn = tipc_net(net); + + return rcu_dereference_rtnl(tn->bearer_list[bearer_id]); +} + static void bearer_disable(struct net *net, struct tipc_bearer *b); /** @@ -323,6 +331,7 @@ restart: b->domain = disc_domain; b->net_plane = bearer_id + 'A'; b->priority = priority; + test_and_set_bit_lock(0, &b->up); res = tipc_disc_create(net, b, &b->bcast_addr, &skb); if (res) { @@ -360,15 +369,24 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b) */ void tipc_bearer_reset_all(struct net *net) { - struct tipc_net *tn = tipc_net(net); struct tipc_bearer *b; int i; for (i = 0; i < MAX_BEARERS; i++) { - b = rcu_dereference_rtnl(tn->bearer_list[i]); + b = bearer_get(net, i); + if (b) + clear_bit_unlock(0, &b->up); + } + for (i = 0; i < MAX_BEARERS; i++) { + b = bearer_get(net, i); if (b) tipc_reset_bearer(net, b); } + for (i = 0; i < MAX_BEARERS; i++) { + b = bearer_get(net, i); + if (b) + test_and_set_bit_lock(0, &b->up); + } } /** @@ -382,8 +400,9 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b) int bearer_id = b->identity; pr_info("Disabling bearer <%s>\n", b->name); - b->media->disable_media(b); + clear_bit_unlock(0, &b->up); tipc_node_delete_links(net, bearer_id); + b->media->disable_media(b); RCU_INIT_POINTER(b->media_ptr, NULL); if (b->link_req) tipc_disc_delete(b->link_req); @@ -440,22 +459,16 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, { struct net_device *dev; int delta; - void *tipc_ptr; dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr); if (!dev) return 0; - /* Send RESET message even if bearer is detached from device */ - tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr); - if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) - goto drop; - - delta = dev->hard_header_len - skb_headroom(skb); - if ((delta > 0) && - pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) - goto drop; - + delta = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb)); + if ((delta > 0) && pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) { + kfree_skb(skb); + return 0; + } skb_reset_network_header(skb); skb->dev = dev; skb->protocol = htons(ETH_P_TIPC); @@ -463,9 +476,6 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, dev->dev_addr, skb->len); dev_queue_xmit(skb); return 0; -drop: - kfree_skb(skb); - return 0; } int tipc_bearer_mtu(struct net *net, u32 bearer_id) @@ -487,12 +497,12 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id, struct sk_buff *skb, struct tipc_media_addr *dest) { - struct tipc_net *tn = tipc_net(net); + struct tipc_msg *hdr = buf_msg(skb); struct tipc_bearer *b; rcu_read_lock(); - b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); - if (likely(b)) + b = bearer_get(net, bearer_id); + if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr)))) b->media->send_msg(net, skb, b, dest); else kfree_skb(skb); @@ -505,7 +515,6 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id, struct sk_buff_head *xmitq, struct tipc_media_addr *dst) { - struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_bearer *b; struct sk_buff *skb, *tmp; @@ -513,12 +522,15 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id, return; rcu_read_lock(); - b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); + b = bearer_get(net, bearer_id); if (unlikely(!b)) __skb_queue_purge(xmitq); skb_queue_walk_safe(xmitq, skb, tmp) { __skb_dequeue(xmitq); - b->media->send_msg(net, skb, b, dst); + if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb)))) + b->media->send_msg(net, skb, b, dst); + else + kfree_skb(skb); } rcu_read_unlock(); } @@ -535,8 +547,8 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, struct tipc_msg *hdr; rcu_read_lock(); - b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); - if (unlikely(!b)) + b = bearer_get(net, bearer_id); + if (unlikely(!b || !test_bit(0, &b->up))) __skb_queue_purge(xmitq); skb_queue_walk_safe(xmitq, skb, tmp) { hdr = buf_msg(skb); @@ -566,7 +578,8 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, rcu_read_lock(); b = rcu_dereference_rtnl(dev->tipc_ptr); - if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) { + if (likely(b && test_bit(0, &b->up) && + (skb->pkt_type <= PACKET_BROADCAST))) { skb->next = NULL; tipc_rcv(dev_net(dev), skb, b); rcu_read_unlock(); @@ -591,18 +604,9 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); - struct tipc_net *tn = tipc_net(net); struct tipc_bearer *b; - int i; b = rtnl_dereference(dev->tipc_ptr); - if (!b) { - for (i = 0; i < MAX_BEARERS; b = NULL, i++) { - b = rtnl_dereference(tn->bearer_list[i]); - if (b && (b->media_ptr == dev)) - break; - } - } if (!b) return NOTIFY_DONE; @@ -613,11 +617,10 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, if (netif_carrier_ok(dev)) break; case NETDEV_UP: - rcu_assign_pointer(dev->tipc_ptr, b); + test_and_set_bit_lock(0, &b->up); break; case NETDEV_GOING_DOWN: - RCU_INIT_POINTER(dev->tipc_ptr, NULL); - synchronize_net(); + clear_bit_unlock(0, &b->up); tipc_reset_bearer(net, b); break; case NETDEV_CHANGEMTU: @@ -709,6 +712,14 @@ static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, goto prop_msg_full; nla_nest_end(msg->skb, prop); + +#ifdef CONFIG_TIPC_MEDIA_UDP + if (bearer->media->type_id == TIPC_MEDIA_TYPE_UDP) { + if (tipc_udp_nl_add_bearer_data(msg, bearer)) + goto attr_msg_full; + } +#endif + nla_nest_end(msg->skb, attrs); genlmsg_end(msg->skb, hdr); @@ -895,6 +906,49 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) return 0; } +int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_bearer *b; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + struct net *net = sock_net(skb->sk); + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + rtnl_lock(); + b = tipc_bearer_find(net, name); + if (!b) { + rtnl_unlock(); + return -EINVAL; + } + +#ifdef CONFIG_TIPC_MEDIA_UDP + if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) { + err = tipc_udp_nl_bearer_add(b, + attrs[TIPC_NLA_BEARER_UDP_OPTS]); + if (err) { + rtnl_unlock(); + return err; + } + } +#endif + rtnl_unlock(); + + return 0; +} + int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) { int err; diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 43757f1f9cb3..78892e2f53e3 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -150,6 +150,7 @@ struct tipc_bearer { u32 identity; struct tipc_link_req *link_req; char net_plane; + unsigned long up; }; struct tipc_bearer_names { @@ -180,6 +181,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info); int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); diff --git a/net/tipc/link.c b/net/tipc/link.c index 877d94f34814..2c6e1b9e024b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -807,7 +807,7 @@ void link_prepare_wakeup(struct tipc_link *l) skb_queue_walk_safe(&l->wakeupq, skb, tmp) { imp = TIPC_SKB_CB(skb)->chain_imp; - lim = l->window + l->backlog[imp].limit; + lim = l->backlog[imp].limit; pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; if ((pnd[imp] + l->backlog[imp].len) >= lim) break; @@ -873,9 +873,11 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff *skb, *_skb, *bskb; /* Match msg importance against this and all higher backlog limits: */ - for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { - if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) - return link_schedule_user(l, list); + if (!skb_queue_empty(backlogq)) { + for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { + if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) + return link_schedule_user(l, list); + } } if (unlikely(msg_size(hdr) > mtu)) { skb_queue_purge(list); @@ -1692,10 +1694,10 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); l->window = win; - l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; - l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; - l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; - l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; + l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win); + l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2); + l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3); + l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4); l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; } diff --git a/net/tipc/net.h b/net/tipc/net.h index 77a7a118911d..c7c254902873 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h @@ -39,6 +39,8 @@ #include <net/genetlink.h> +extern const struct nla_policy tipc_nl_net_policy[]; + int tipc_net_start(struct net *net, u32 addr); void tipc_net_stop(struct net *net); diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index a84daec0afe9..3200059d14b2 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -41,6 +41,7 @@ #include "link.h" #include "node.h" #include "net.h" +#include "udp_media.h" #include <net/genetlink.h> static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = { @@ -161,6 +162,11 @@ static const struct genl_ops tipc_genl_v2_ops[] = { .policy = tipc_nl_policy, }, { + .cmd = TIPC_NL_BEARER_ADD, + .doit = tipc_nl_bearer_add, + .policy = tipc_nl_policy, + }, + { .cmd = TIPC_NL_BEARER_SET, .doit = tipc_nl_bearer_set, .policy = tipc_nl_policy, @@ -238,6 +244,18 @@ static const struct genl_ops tipc_genl_v2_ops[] = { .dumpit = tipc_nl_node_dump_monitor_peer, .policy = tipc_nl_policy, }, + { + .cmd = TIPC_NL_PEER_REMOVE, + .doit = tipc_nl_peer_rm, + .policy = tipc_nl_policy, + }, +#ifdef CONFIG_TIPC_MEDIA_UDP + { + .cmd = TIPC_NL_UDP_GET_REMOTEIP, + .dumpit = tipc_udp_nl_dump_remoteip, + .policy = tipc_nl_policy, + }, +#endif }; int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr) diff --git a/net/tipc/node.c b/net/tipc/node.c index 21974191e425..7e8b75fd1a02 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1553,6 +1553,69 @@ discard: kfree_skb(skb); } +int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) +{ + struct net *net = sock_net(skb->sk); + struct tipc_net *tn = net_generic(net, tipc_net_id); + struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; + struct tipc_node *peer; + u32 addr; + int err; + int i; + + /* We identify the peer by its net */ + if (!info->attrs[TIPC_NLA_NET]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, + info->attrs[TIPC_NLA_NET], + tipc_nl_net_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_NET_ADDR]) + return -EINVAL; + + addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); + + if (in_own_node(net, addr)) + return -ENOTSUPP; + + spin_lock_bh(&tn->node_list_lock); + peer = tipc_node_find(net, addr); + if (!peer) { + spin_unlock_bh(&tn->node_list_lock); + return -ENXIO; + } + + tipc_node_write_lock(peer); + if (peer->state != SELF_DOWN_PEER_DOWN && + peer->state != SELF_DOWN_PEER_LEAVING) { + tipc_node_write_unlock(peer); + err = -EBUSY; + goto err_out; + } + + for (i = 0; i < MAX_BEARERS; i++) { + struct tipc_link_entry *le = &peer->links[i]; + + if (le->link) { + kfree(le->link); + le->link = NULL; + peer->link_cnt--; + } + } + tipc_node_write_unlock(peer); + tipc_node_delete(peer); + + err = 0; +err_out: + tipc_node_put(peer); + spin_unlock_bh(&tn->node_list_lock); + + return err; +} + int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) { int err; diff --git a/net/tipc/node.h b/net/tipc/node.h index d69fdfcc0ec9..4578b34c7dca 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -77,6 +77,7 @@ int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb); int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info); diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index ae7e14cae085..dd274687a53d 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -49,6 +49,7 @@ #include "core.h" #include "bearer.h" #include "netlink.h" +#include "msg.h" /* IANA assigned UDP port */ #define UDP_PORT_DEFAULT 6118 @@ -70,6 +71,13 @@ struct udp_media_addr { }; }; +/* struct udp_replicast - container for UDP remote addresses */ +struct udp_replicast { + struct udp_media_addr addr; + struct rcu_head rcu; + struct list_head list; +}; + /** * struct udp_bearer - ip/udp bearer data structure * @bearer: associated generic tipc bearer @@ -82,8 +90,20 @@ struct udp_bearer { struct socket *ubsock; u32 ifindex; struct work_struct work; + struct udp_replicast rcast; }; +static int tipc_udp_is_mcast_addr(struct udp_media_addr *addr) +{ + if (ntohs(addr->proto) == ETH_P_IP) + return ipv4_is_multicast(addr->ipv4.s_addr); +#if IS_ENABLED(CONFIG_IPV6) + else + return ipv6_addr_is_multicast(&addr->ipv6); +#endif + return 0; +} + /* udp_media_addr_set - convert a ip/udp address to a TIPC media address */ static void tipc_udp_media_addr_set(struct tipc_media_addr *addr, struct udp_media_addr *ua) @@ -91,15 +111,9 @@ static void tipc_udp_media_addr_set(struct tipc_media_addr *addr, memset(addr, 0, sizeof(struct tipc_media_addr)); addr->media_id = TIPC_MEDIA_TYPE_UDP; memcpy(addr->value, ua, sizeof(struct udp_media_addr)); - if (ntohs(ua->proto) == ETH_P_IP) { - if (ipv4_is_multicast(ua->ipv4.s_addr)) - addr->broadcast = 1; - } else if (ntohs(ua->proto) == ETH_P_IPV6) { - if (ipv6_addr_type(&ua->ipv6) & IPV6_ADDR_MULTICAST) - addr->broadcast = 1; - } else { - pr_err("Invalid UDP media address\n"); - } + + if (tipc_udp_is_mcast_addr(ua)) + addr->broadcast = 1; } /* tipc_udp_addr2str - convert ip/udp address to string */ @@ -140,28 +154,13 @@ static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a) } /* tipc_send_msg - enqueue a send request */ -static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, - struct tipc_bearer *b, - struct tipc_media_addr *dest) +static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, + struct udp_bearer *ub, struct udp_media_addr *src, + struct udp_media_addr *dst) { int ttl, err = 0; - struct udp_bearer *ub; - struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value; - struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; struct rtable *rt; - if (skb_headroom(skb) < UDP_MIN_HEADROOM) { - err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); - if (err) - goto tx_error; - } - - skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); - ub = rcu_dereference_rtnl(b->media_ptr); - if (!ub) { - err = -ENODEV; - goto tx_error; - } if (dst->proto == htons(ETH_P_IP)) { struct flowi4 fl = { .daddr = dst->ipv4.s_addr, @@ -207,29 +206,178 @@ tx_error: return err; } +static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, + struct tipc_bearer *b, + struct tipc_media_addr *addr) +{ + struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; + struct udp_media_addr *dst = (struct udp_media_addr *)&addr->value; + struct udp_replicast *rcast; + struct udp_bearer *ub; + int err = 0; + + if (skb_headroom(skb) < UDP_MIN_HEADROOM) { + err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); + if (err) + goto out; + } + + skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); + ub = rcu_dereference_rtnl(b->media_ptr); + if (!ub) { + err = -ENODEV; + goto out; + } + + if (!addr->broadcast || list_empty(&ub->rcast.list)) + return tipc_udp_xmit(net, skb, ub, src, dst); + + /* Replicast, send an skb to each configured IP address */ + list_for_each_entry_rcu(rcast, &ub->rcast.list, list) { + struct sk_buff *_skb; + + _skb = pskb_copy(skb, GFP_ATOMIC); + if (!_skb) { + err = -ENOMEM; + goto out; + } + + err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr); + if (err) { + kfree_skb(_skb); + goto out; + } + } + err = 0; +out: + kfree_skb(skb); + return err; +} + +static bool tipc_udp_is_known_peer(struct tipc_bearer *b, + struct udp_media_addr *addr) +{ + struct udp_replicast *rcast, *tmp; + struct udp_bearer *ub; + + ub = rcu_dereference_rtnl(b->media_ptr); + if (!ub) { + pr_err_ratelimited("UDP bearer instance not found\n"); + return false; + } + + list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) { + if (!memcmp(&rcast->addr, addr, sizeof(struct udp_media_addr))) + return true; + } + + return false; +} + +static int tipc_udp_rcast_add(struct tipc_bearer *b, + struct udp_media_addr *addr) +{ + struct udp_replicast *rcast; + struct udp_bearer *ub; + + ub = rcu_dereference_rtnl(b->media_ptr); + if (!ub) + return -ENODEV; + + rcast = kmalloc(sizeof(*rcast), GFP_ATOMIC); + if (!rcast) + return -ENOMEM; + + memcpy(&rcast->addr, addr, sizeof(struct udp_media_addr)); + + if (ntohs(addr->proto) == ETH_P_IP) + pr_info("New replicast peer: %pI4\n", &rcast->addr.ipv4); +#if IS_ENABLED(CONFIG_IPV6) + else if (ntohs(addr->proto) == ETH_P_IPV6) + pr_info("New replicast peer: %pI6\n", &rcast->addr.ipv6); +#endif + + list_add_rcu(&rcast->list, &ub->rcast.list); + return 0; +} + +static int tipc_udp_rcast_disc(struct tipc_bearer *b, struct sk_buff *skb) +{ + struct udp_media_addr src = {0}; + struct udp_media_addr *dst; + + dst = (struct udp_media_addr *)&b->bcast_addr.value; + if (tipc_udp_is_mcast_addr(dst)) + return 0; + + src.port = udp_hdr(skb)->source; + + if (ip_hdr(skb)->version == 4) { + struct iphdr *iphdr = ip_hdr(skb); + + src.proto = htons(ETH_P_IP); + src.ipv4.s_addr = iphdr->saddr; + if (ipv4_is_multicast(iphdr->daddr)) + return 0; +#if IS_ENABLED(CONFIG_IPV6) + } else if (ip_hdr(skb)->version == 6) { + struct ipv6hdr *iphdr = ipv6_hdr(skb); + + src.proto = htons(ETH_P_IPV6); + src.ipv6 = iphdr->saddr; + if (ipv6_addr_is_multicast(&iphdr->daddr)) + return 0; +#endif + } else { + return 0; + } + + if (likely(tipc_udp_is_known_peer(b, &src))) + return 0; + + return tipc_udp_rcast_add(b, &src); +} + /* tipc_udp_recv - read data from bearer socket */ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) { struct udp_bearer *ub; struct tipc_bearer *b; + struct tipc_msg *hdr; + int err; ub = rcu_dereference_sk_user_data(sk); if (!ub) { pr_err_ratelimited("Failed to get UDP bearer reference"); - kfree_skb(skb); - return 0; + goto out; } - skb_pull(skb, sizeof(struct udphdr)); + hdr = buf_msg(skb); + rcu_read_lock(); b = rcu_dereference_rtnl(ub->bearer); + if (!b) + goto rcu_out; - if (b) { + if (b && test_bit(0, &b->up)) { tipc_rcv(sock_net(sk), skb, b); rcu_read_unlock(); return 0; } + + if (unlikely(msg_user(hdr) == LINK_CONFIG)) { + err = tipc_udp_rcast_disc(b, skb); + if (err) + goto rcu_out; + } + + tipc_rcv(sock_net(sk), skb, b); rcu_read_unlock(); + return 0; + +rcu_out: + rcu_read_unlock(); +out: kfree_skb(skb); return 0; } @@ -241,15 +389,11 @@ static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote) struct sock *sk = ub->ubsock->sk; if (ntohs(remote->proto) == ETH_P_IP) { - if (!ipv4_is_multicast(remote->ipv4.s_addr)) - return 0; mreqn.imr_multiaddr = remote->ipv4; mreqn.imr_ifindex = ub->ifindex; err = ip_mc_join_group(sk, &mreqn); #if IS_ENABLED(CONFIG_IPV6) } else { - if (!ipv6_addr_is_multicast(&remote->ipv6)) - return 0; err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex, &remote->ipv6); #endif @@ -257,75 +401,234 @@ static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote) return err; } -/** - * parse_options - build local/remote addresses from configuration - * @attrs: netlink config data - * @ub: UDP bearer instance - * @local: local bearer IP address/port - * @remote: peer or multicast IP/port - */ -static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub, - struct udp_media_addr *local, - struct udp_media_addr *remote) +static int __tipc_nl_add_udp_addr(struct sk_buff *skb, + struct udp_media_addr *addr, int nla_t) { - struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; - struct sockaddr_storage sa_local, sa_remote; + if (ntohs(addr->proto) == ETH_P_IP) { + struct sockaddr_in ip4; - if (!attrs[TIPC_NLA_BEARER_UDP_OPTS]) - goto err; - if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, - attrs[TIPC_NLA_BEARER_UDP_OPTS], - tipc_nl_udp_policy)) - goto err; - if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) { - nla_memcpy(&sa_local, opts[TIPC_NLA_UDP_LOCAL], - sizeof(sa_local)); - nla_memcpy(&sa_remote, opts[TIPC_NLA_UDP_REMOTE], - sizeof(sa_remote)); + ip4.sin_family = AF_INET; + ip4.sin_port = addr->port; + ip4.sin_addr.s_addr = addr->ipv4.s_addr; + if (nla_put(skb, nla_t, sizeof(ip4), &ip4)) + return -EMSGSIZE; + +#if IS_ENABLED(CONFIG_IPV6) + } else if (ntohs(addr->proto) == ETH_P_IPV6) { + struct sockaddr_in6 ip6; + + ip6.sin6_family = AF_INET6; + ip6.sin6_port = addr->port; + memcpy(&ip6.sin6_addr, &addr->ipv6, sizeof(struct in6_addr)); + if (nla_put(skb, nla_t, sizeof(ip6), &ip6)) + return -EMSGSIZE; +#endif + } + + return 0; +} + +int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb) +{ + u32 bid = cb->args[0]; + u32 skip_cnt = cb->args[1]; + u32 portid = NETLINK_CB(cb->skb).portid; + struct udp_replicast *rcast, *tmp; + struct tipc_bearer *b; + struct udp_bearer *ub; + void *hdr; + int err; + int i; + + if (!bid && !skip_cnt) { + struct net *net = sock_net(skb->sk); + struct nlattr *battrs[TIPC_NLA_BEARER_MAX + 1]; + struct nlattr **attrs; + char *bname; + + err = tipc_nlmsg_parse(cb->nlh, &attrs); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(battrs, TIPC_NLA_BEARER_MAX, + attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!battrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + + bname = nla_data(battrs[TIPC_NLA_BEARER_NAME]); + + rtnl_lock(); + b = tipc_bearer_find(net, bname); + if (!b) { + rtnl_unlock(); + return -EINVAL; + } + bid = b->identity; } else { -err: - pr_err("Invalid UDP bearer configuration"); + struct net *net = sock_net(skb->sk); + struct tipc_net *tn = net_generic(net, tipc_net_id); + + rtnl_lock(); + b = rtnl_dereference(tn->bearer_list[bid]); + if (!b) { + rtnl_unlock(); + return -EINVAL; + } + } + + ub = rcu_dereference_rtnl(b->media_ptr); + if (!ub) { + rtnl_unlock(); return -EINVAL; } - if ((sa_local.ss_family & sa_remote.ss_family) == AF_INET) { - struct sockaddr_in *ip4; - - ip4 = (struct sockaddr_in *)&sa_local; - local->proto = htons(ETH_P_IP); - local->port = ip4->sin_port; - local->ipv4.s_addr = ip4->sin_addr.s_addr; - - ip4 = (struct sockaddr_in *)&sa_remote; - remote->proto = htons(ETH_P_IP); - remote->port = ip4->sin_port; - remote->ipv4.s_addr = ip4->sin_addr.s_addr; + + i = 0; + list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) { + if (i < skip_cnt) + goto count; + + hdr = genlmsg_put(skb, portid, cb->nlh->nlmsg_seq, + &tipc_genl_family, NLM_F_MULTI, + TIPC_NL_BEARER_GET); + if (!hdr) + goto done; + + err = __tipc_nl_add_udp_addr(skb, &rcast->addr, + TIPC_NLA_UDP_REMOTE); + if (err) { + genlmsg_cancel(skb, hdr); + goto done; + } + genlmsg_end(skb, hdr); +count: + i++; + } +done: + rtnl_unlock(); + cb->args[0] = bid; + cb->args[1] = i; + + return skb->len; +} + +int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b) +{ + struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; + struct udp_media_addr *dst; + struct udp_bearer *ub; + struct nlattr *nest; + + ub = rcu_dereference_rtnl(b->media_ptr); + if (!ub) + return -ENODEV; + + nest = nla_nest_start(msg->skb, TIPC_NLA_BEARER_UDP_OPTS); + if (!nest) + goto msg_full; + + if (__tipc_nl_add_udp_addr(msg->skb, src, TIPC_NLA_UDP_LOCAL)) + goto msg_full; + + dst = (struct udp_media_addr *)&b->bcast_addr.value; + if (__tipc_nl_add_udp_addr(msg->skb, dst, TIPC_NLA_UDP_REMOTE)) + goto msg_full; + + if (!list_empty(&ub->rcast.list)) { + if (nla_put_flag(msg->skb, TIPC_NLA_UDP_MULTI_REMOTEIP)) + goto msg_full; + } + + nla_nest_end(msg->skb, nest); + return 0; +msg_full: + nla_nest_cancel(msg->skb, nest); + return -EMSGSIZE; +} + +/** + * tipc_parse_udp_addr - build udp media address from netlink data + * @nlattr: netlink attribute containing sockaddr storage aligned address + * @addr: tipc media address to fill with address, port and protocol type + * @scope_id: IPv6 scope id pointer, not NULL indicates it's required + */ + +static int tipc_parse_udp_addr(struct nlattr *nla, struct udp_media_addr *addr, + u32 *scope_id) +{ + struct sockaddr_storage sa; + + nla_memcpy(&sa, nla, sizeof(sa)); + if (sa.ss_family == AF_INET) { + struct sockaddr_in *ip4 = (struct sockaddr_in *)&sa; + + addr->proto = htons(ETH_P_IP); + addr->port = ip4->sin_port; + addr->ipv4.s_addr = ip4->sin_addr.s_addr; return 0; #if IS_ENABLED(CONFIG_IPV6) - } else if ((sa_local.ss_family & sa_remote.ss_family) == AF_INET6) { - int atype; - struct sockaddr_in6 *ip6; + } else if (sa.ss_family == AF_INET6) { + struct sockaddr_in6 *ip6 = (struct sockaddr_in6 *)&sa; - ip6 = (struct sockaddr_in6 *)&sa_local; - atype = ipv6_addr_type(&ip6->sin6_addr); - if (__ipv6_addr_needs_scope_id(atype) && !ip6->sin6_scope_id) - return -EINVAL; + addr->proto = htons(ETH_P_IPV6); + addr->port = ip6->sin6_port; + memcpy(&addr->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); + + /* Scope ID is only interesting for local addresses */ + if (scope_id) { + int atype; - local->proto = htons(ETH_P_IPV6); - local->port = ip6->sin6_port; - memcpy(&local->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); - ub->ifindex = ip6->sin6_scope_id; + atype = ipv6_addr_type(&ip6->sin6_addr); + if (__ipv6_addr_needs_scope_id(atype) && + !ip6->sin6_scope_id) { + return -EINVAL; + } + + *scope_id = ip6->sin6_scope_id ? : 0; + } - ip6 = (struct sockaddr_in6 *)&sa_remote; - remote->proto = htons(ETH_P_IPV6); - remote->port = ip6->sin6_port; - memcpy(&remote->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); return 0; #endif } return -EADDRNOTAVAIL; } +int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr) +{ + int err; + struct udp_media_addr addr = {0}; + struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; + struct udp_media_addr *dst; + + if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, attr, tipc_nl_udp_policy)) + return -EINVAL; + + if (!opts[TIPC_NLA_UDP_REMOTE]) + return -EINVAL; + + err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_REMOTE], &addr, NULL); + if (err) + return err; + + dst = (struct udp_media_addr *)&b->bcast_addr.value; + if (tipc_udp_is_mcast_addr(dst)) { + pr_err("Can't add remote ip to TIPC UDP multicast bearer\n"); + return -EINVAL; + } + + if (tipc_udp_is_known_peer(b, &addr)) + return 0; + + return tipc_udp_rcast_add(b, &addr); +} + /** * tipc_udp_enable - callback to create a new udp bearer instance * @net: network namespace @@ -340,18 +643,37 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, { int err = -EINVAL; struct udp_bearer *ub; - struct udp_media_addr *remote; + struct udp_media_addr remote = {0}; struct udp_media_addr local = {0}; struct udp_port_cfg udp_conf = {0}; struct udp_tunnel_sock_cfg tuncfg = {NULL}; + struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; ub = kzalloc(sizeof(*ub), GFP_ATOMIC); if (!ub) return -ENOMEM; - remote = (struct udp_media_addr *)&b->bcast_addr.value; - memset(remote, 0, sizeof(struct udp_media_addr)); - err = parse_options(attrs, ub, &local, remote); + INIT_LIST_HEAD(&ub->rcast.list); + + if (!attrs[TIPC_NLA_BEARER_UDP_OPTS]) + goto err; + + if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, + attrs[TIPC_NLA_BEARER_UDP_OPTS], + tipc_nl_udp_policy)) + goto err; + + if (!opts[TIPC_NLA_UDP_LOCAL] || !opts[TIPC_NLA_UDP_REMOTE]) { + pr_err("Invalid UDP bearer configuration"); + return -EINVAL; + } + + err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_LOCAL], &local, + &ub->ifindex); + if (err) + goto err; + + err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_REMOTE], &remote, NULL); if (err) goto err; @@ -396,9 +718,18 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, tuncfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg); - err = enable_mcast(ub, remote); + /** + * The bcast media address port is used for all peers and the ip + * is used if it's a multicast address. + */ + memcpy(&b->bcast_addr.value, &remote, sizeof(remote)); + if (tipc_udp_is_mcast_addr(&remote)) + err = enable_mcast(ub, &remote); + else + err = tipc_udp_rcast_add(b, &remote); if (err) goto err; + return 0; err: if (ub->ubsock) @@ -411,6 +742,12 @@ err: static void cleanup_bearer(struct work_struct *work) { struct udp_bearer *ub = container_of(work, struct udp_bearer, work); + struct udp_replicast *rcast, *tmp; + + list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) { + list_del_rcu(&rcast->list); + kfree_rcu(rcast, rcu); + } if (ub->ubsock) udp_tunnel_sock_release(ub->ubsock); diff --git a/net/tipc/udp_media.h b/net/tipc/udp_media.h new file mode 100644 index 000000000000..281bbae87726 --- /dev/null +++ b/net/tipc/udp_media.h @@ -0,0 +1,46 @@ +/* + * net/tipc/udp_media.h: Include file for UDP bearer media + * + * Copyright (c) 1996-2006, 2013-2016, Ericsson AB + * Copyright (c) 2005, 2010-2011, Wind River Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef CONFIG_TIPC_MEDIA_UDP +#ifndef _TIPC_UDP_MEDIA_H +#define _TIPC_UDP_MEDIA_H + +int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr); +int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b); +int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb); + +#endif +#endif diff --git a/net/wireless/core.c b/net/wireless/core.c index 7645e97362c0..2029b49a1df3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -906,6 +906,8 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) if (WARN_ON(wdev->netdev)) return; + nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); + list_del_rcu(&wdev->list); rdev->devlist_generation++; @@ -1079,6 +1081,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) dev->priv_flags |= IFF_DONT_BRIDGE; + + nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); break; case NETDEV_GOING_DOWN: cfg80211_leave(rdev, wdev); @@ -1157,6 +1161,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, * remove and clean it up. */ if (!list_empty(&wdev->list)) { + nl80211_notify_iface(rdev, wdev, + NL80211_CMD_DEL_INTERFACE); sysfs_remove_link(&dev->dev.kobj, "phy80211"); list_del_rcu(&wdev->list); rdev->devlist_generation++; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f02653a08993..499785778983 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2751,7 +2751,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; struct wireless_dev *wdev; - struct sk_buff *msg, *event; + struct sk_buff *msg; int err; enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; u32 flags; @@ -2855,20 +2855,15 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return -ENOBUFS; } - event = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (event) { - if (nl80211_send_iface(event, 0, 0, 0, - rdev, wdev, false) < 0) { - nlmsg_free(event); - goto out; - } - - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), - event, 0, NL80211_MCGRP_CONFIG, - GFP_KERNEL); - } + /* + * For wdevs which have no associated netdev object (e.g. of type + * NL80211_IFTYPE_P2P_DEVICE), emit the NEW_INTERFACE event here. + * For all other types, the event will be generated from the + * netdev notifier + */ + if (!wdev->netdev) + nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); -out: return genlmsg_reply(msg, info); } @@ -2876,18 +2871,10 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; - struct sk_buff *msg; - int status; if (!rdev->ops->del_virtual_intf) return -EOPNOTSUPP; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (msg && nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, true) < 0) { - nlmsg_free(msg); - msg = NULL; - } - /* * If we remove a wireless device without a netdev then clear * user_ptr[1] so that nl80211_post_doit won't dereference it @@ -2898,15 +2885,7 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) if (!wdev->netdev) info->user_ptr[1] = NULL; - status = rdev_del_virtual_intf(rdev, wdev); - if (status >= 0 && msg) - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), - msg, 0, NL80211_MCGRP_CONFIG, - GFP_KERNEL); - else - nlmsg_free(msg); - - return status; + return rdev_del_virtual_intf(rdev, wdev); } static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) @@ -5374,6 +5353,18 @@ static int nl80211_check_s32(const struct nlattr *nla, s32 min, s32 max, s32 *ou return 0; } +static int nl80211_check_power_mode(const struct nlattr *nla, + enum nl80211_mesh_power_mode min, + enum nl80211_mesh_power_mode max, + enum nl80211_mesh_power_mode *out) +{ + u32 val = nla_get_u32(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; +} + static int nl80211_parse_mesh_config(struct genl_info *info, struct mesh_config *cfg, u32 *mask_out) @@ -5518,7 +5509,7 @@ do { \ NL80211_MESH_POWER_ACTIVE, NL80211_MESH_POWER_MAX, mask, NL80211_MESHCONF_POWER_MODE, - nl80211_check_u32); + nl80211_check_power_mode); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, 0, 65535, mask, NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16); @@ -7773,12 +7764,13 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) ibss.beacon_interval = 100; - if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { + if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) ibss.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); - if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000) - return -EINVAL; - } + + err = cfg80211_validate_beacon_int(rdev, ibss.beacon_interval); + if (err) + return err; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; @@ -9252,9 +9244,10 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { setup.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); - if (setup.beacon_interval < 10 || - setup.beacon_interval > 10000) - return -EINVAL; + + err = cfg80211_validate_beacon_int(rdev, setup.beacon_interval); + if (err) + return err; } if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) { @@ -11847,6 +11840,29 @@ void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev, NL80211_MCGRP_CONFIG, GFP_KERNEL); } +void nl80211_notify_iface(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + enum nl80211_commands cmd) +{ + struct sk_buff *msg; + + WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE && + cmd != NL80211_CMD_DEL_INTERFACE); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + if (nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, + cmd == NL80211_CMD_DEL_INTERFACE) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_CONFIG, GFP_KERNEL); +} + static int nl80211_add_scan_req(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index a63f402b10b7..7e3821d7fcc5 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -7,6 +7,9 @@ int nl80211_init(void); void nl80211_exit(void); void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev, enum nl80211_commands cmd); +void nl80211_notify_iface(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + enum nl80211_commands cmd); void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/util.c b/net/wireless/util.c index b7d1592bd5b8..0675f513e7b9 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1559,7 +1559,7 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev; int res = 0; - if (!beacon_int) + if (beacon_int < 10 || beacon_int > 10000) return -EINVAL; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 90ebf7d35c07..db3cb061bfcd 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -24,6 +24,7 @@ hostprogs-y += test_overhead hostprogs-y += test_cgrp2_array_pin hostprogs-y += xdp1 hostprogs-y += xdp2 +hostprogs-y += test_current_task_under_cgroup test_verifier-objs := test_verifier.o libbpf.o test_maps-objs := test_maps.o libbpf.o @@ -49,6 +50,8 @@ test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o xdp1-objs := bpf_load.o libbpf.o xdp1_user.o # reuse xdp1 source intentionally xdp2-objs := bpf_load.o libbpf.o xdp1_user.o +test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \ + test_current_task_under_cgroup_user.o # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -64,6 +67,7 @@ always += tracex6_kern.o always += test_probe_write_user_kern.o always += trace_output_kern.o always += tcbpf1_kern.o +always += tcbpf2_kern.o always += lathist_kern.o always += offwaketime_kern.o always += spintest_kern.o @@ -74,6 +78,7 @@ always += parse_varlen.o parse_simple.o parse_ldabs.o always += test_cgrp2_tc_kern.o always += xdp1_kern.o always += xdp2_kern.o +always += test_current_task_under_cgroup_kern.o HOSTCFLAGS += -I$(objtree)/usr/include @@ -97,6 +102,7 @@ HOSTLOADLIBES_map_perf_test += -lelf -lrt HOSTLOADLIBES_test_overhead += -lelf -lrt HOSTLOADLIBES_xdp1 += -lelf HOSTLOADLIBES_xdp2 += -lelf +HOSTLOADLIBES_test_current_task_under_cgroup += -lelf # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index 7927a090fa0d..bbdf62a1e45e 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h @@ -37,12 +37,24 @@ static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) = (void *) BPF_FUNC_clone_redirect; static int (*bpf_redirect)(int ifindex, int flags) = (void *) BPF_FUNC_redirect; -static int (*bpf_perf_event_output)(void *ctx, void *map, int index, void *data, int size) = +static int (*bpf_perf_event_output)(void *ctx, void *map, + unsigned long long flags, void *data, + int size) = (void *) BPF_FUNC_perf_event_output; static int (*bpf_get_stackid)(void *ctx, void *map, int flags) = (void *) BPF_FUNC_get_stackid; static int (*bpf_probe_write_user)(void *dst, void *src, int size) = (void *) BPF_FUNC_probe_write_user; +static int (*bpf_current_task_under_cgroup)(void *map, int index) = + (void *) BPF_FUNC_current_task_under_cgroup; +static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) = + (void *) BPF_FUNC_skb_get_tunnel_key; +static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) = + (void *) BPF_FUNC_skb_set_tunnel_key; +static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) = + (void *) BPF_FUNC_skb_get_tunnel_opt; +static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) = + (void *) BPF_FUNC_skb_set_tunnel_opt; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c new file mode 100644 index 000000000000..7a15289da6cc --- /dev/null +++ b/samples/bpf/tcbpf2_kern.c @@ -0,0 +1,191 @@ +/* Copyright (c) 2016 VMware + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <uapi/linux/bpf.h> +#include <uapi/linux/if_ether.h> +#include <uapi/linux/if_packet.h> +#include <uapi/linux/ip.h> +#include <uapi/linux/in.h> +#include <uapi/linux/tcp.h> +#include <uapi/linux/filter.h> +#include <uapi/linux/pkt_cls.h> +#include "bpf_helpers.h" + +#define ERROR(ret) do {\ + char fmt[] = "ERROR line:%d ret:%d\n";\ + bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \ + } while(0) + +struct geneve_opt { + __be16 opt_class; + u8 type; + u8 length:5; + u8 r3:1; + u8 r2:1; + u8 r1:1; + u8 opt_data[8]; /* hard-coded to 8 byte */ +}; + +struct vxlan_metadata { + u32 gbp; +}; + +SEC("gre_set_tunnel") +int _gre_set_tunnel(struct __sk_buff *skb) +{ + int ret; + struct bpf_tunnel_key key; + + __builtin_memset(&key, 0x0, sizeof(key)); + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ + key.tunnel_id = 2; + key.tunnel_tos = 0; + key.tunnel_ttl = 64; + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + return TC_ACT_OK; +} + +SEC("gre_get_tunnel") +int _gre_get_tunnel(struct __sk_buff *skb) +{ + int ret; + struct bpf_tunnel_key key; + char fmt[] = "key %d remote ip 0x%x\n"; + + ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + bpf_trace_printk(fmt, sizeof(fmt), key.tunnel_id, key.remote_ipv4); + return TC_ACT_OK; +} + +SEC("vxlan_set_tunnel") +int _vxlan_set_tunnel(struct __sk_buff *skb) +{ + int ret; + struct bpf_tunnel_key key; + struct vxlan_metadata md; + + __builtin_memset(&key, 0x0, sizeof(key)); + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ + key.tunnel_id = 2; + key.tunnel_tos = 0; + key.tunnel_ttl = 64; + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */ + ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md)); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + return TC_ACT_OK; +} + +SEC("vxlan_get_tunnel") +int _vxlan_get_tunnel(struct __sk_buff *skb) +{ + int ret; + struct bpf_tunnel_key key; + struct vxlan_metadata md; + char fmt[] = "key %d remote ip 0x%x vxlan gbp 0x%x\n"; + + ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md)); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + bpf_trace_printk(fmt, sizeof(fmt), + key.tunnel_id, key.remote_ipv4, md.gbp); + + return TC_ACT_OK; +} + +SEC("geneve_set_tunnel") +int _geneve_set_tunnel(struct __sk_buff *skb) +{ + int ret, ret2; + struct bpf_tunnel_key key; + struct geneve_opt gopt; + + __builtin_memset(&key, 0x0, sizeof(key)); + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ + key.tunnel_id = 2; + key.tunnel_tos = 0; + key.tunnel_ttl = 64; + + __builtin_memset(&gopt, 0x0, sizeof(gopt)); + gopt.opt_class = 0x102; /* Open Virtual Networking (OVN) */ + gopt.type = 0x08; + gopt.r1 = 1; + gopt.r2 = 0; + gopt.r3 = 1; + gopt.length = 2; /* 4-byte multiple */ + *(int *) &gopt.opt_data = 0xdeadbeef; + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt)); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + return TC_ACT_OK; +} + +SEC("geneve_get_tunnel") +int _geneve_get_tunnel(struct __sk_buff *skb) +{ + int ret; + struct bpf_tunnel_key key; + struct geneve_opt gopt; + char fmt[] = "key %d remote ip 0x%x geneve class 0x%x\n"; + + ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt)); + if (ret < 0) { + ERROR(ret); + return TC_ACT_SHOT; + } + + bpf_trace_printk(fmt, sizeof(fmt), + key.tunnel_id, key.remote_ipv4, gopt.opt_class); + return TC_ACT_OK; +} + +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/test_current_task_under_cgroup_kern.c b/samples/bpf/test_current_task_under_cgroup_kern.c new file mode 100644 index 000000000000..86b28d7d6c99 --- /dev/null +++ b/samples/bpf/test_current_task_under_cgroup_kern.c @@ -0,0 +1,43 @@ +/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ + +#include <linux/ptrace.h> +#include <uapi/linux/bpf.h> +#include <linux/version.h> +#include "bpf_helpers.h" +#include <uapi/linux/utsname.h> + +struct bpf_map_def SEC("maps") cgroup_map = { + .type = BPF_MAP_TYPE_CGROUP_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(u32), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") perf_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(u64), + .max_entries = 1, +}; + +/* Writes the last PID that called sync to a map at index 0 */ +SEC("kprobe/sys_sync") +int bpf_prog1(struct pt_regs *ctx) +{ + u64 pid = bpf_get_current_pid_tgid(); + int idx = 0; + + if (!bpf_current_task_under_cgroup(&cgroup_map, 0)) + return 0; + + bpf_map_update_elem(&perf_map, &idx, &pid, BPF_ANY); + return 0; +} + +char _license[] SEC("license") = "GPL"; +u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c new file mode 100644 index 000000000000..30b0bce884f9 --- /dev/null +++ b/samples/bpf/test_current_task_under_cgroup_user.c @@ -0,0 +1,145 @@ +/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <linux/bpf.h> +#include <unistd.h> +#include "libbpf.h" +#include "bpf_load.h" +#include <string.h> +#include <fcntl.h> +#include <errno.h> +#include <linux/bpf.h> +#include <sched.h> +#include <sys/mount.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <linux/limits.h> + +#define CGROUP_MOUNT_PATH "/mnt" +#define CGROUP_PATH "/mnt/my-cgroup" + +#define clean_errno() (errno == 0 ? "None" : strerror(errno)) +#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ + __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) + +static int join_cgroup(char *path) +{ + int fd, rc = 0; + pid_t pid = getpid(); + char cgroup_path[PATH_MAX + 1]; + + snprintf(cgroup_path, sizeof(cgroup_path), "%s/cgroup.procs", path); + + fd = open(cgroup_path, O_WRONLY); + if (fd < 0) { + log_err("Opening Cgroup"); + return 1; + } + + if (dprintf(fd, "%d\n", pid) < 0) { + log_err("Joining Cgroup"); + rc = 1; + } + close(fd); + return rc; +} + +int main(int argc, char **argv) +{ + char filename[256]; + int cg2, idx = 0; + pid_t remote_pid, local_pid = getpid(); + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + /* + * This is to avoid interfering with existing cgroups. Unfortunately, + * most people don't have cgroupv2 enabled at this point in time. + * It's easier to create our own mount namespace and manage it + * ourselves. + */ + if (unshare(CLONE_NEWNS)) { + log_err("unshare"); + return 1; + } + + if (mount("none", "/", NULL, MS_REC | MS_PRIVATE, NULL)) { + log_err("mount fakeroot"); + return 1; + } + + if (mount("none", CGROUP_MOUNT_PATH, "cgroup2", 0, NULL)) { + log_err("mount cgroup2"); + return 1; + } + + if (mkdir(CGROUP_PATH, 0777) && errno != EEXIST) { + log_err("mkdir cgroup"); + return 1; + } + + cg2 = open(CGROUP_PATH, O_RDONLY); + if (cg2 < 0) { + log_err("opening target cgroup"); + goto cleanup_cgroup_err; + } + + if (bpf_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { + log_err("Adding target cgroup to map"); + goto cleanup_cgroup_err; + } + if (join_cgroup("/mnt/my-cgroup")) { + log_err("Leaving target cgroup"); + goto cleanup_cgroup_err; + } + + /* + * The installed helper program catched the sync call, and should + * write it to the map. + */ + + sync(); + bpf_lookup_elem(map_fd[1], &idx, &remote_pid); + + if (local_pid != remote_pid) { + fprintf(stderr, + "BPF Helper didn't write correct PID to map, but: %d\n", + remote_pid); + goto leave_cgroup_err; + } + + /* Verify the negative scenario; leave the cgroup */ + if (join_cgroup(CGROUP_MOUNT_PATH)) + goto leave_cgroup_err; + + remote_pid = 0; + bpf_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY); + + sync(); + bpf_lookup_elem(map_fd[1], &idx, &remote_pid); + + if (local_pid == remote_pid) { + fprintf(stderr, "BPF cgroup negative test did not work\n"); + goto cleanup_cgroup_err; + } + + rmdir(CGROUP_PATH); + return 0; + + /* Error condition, cleanup */ +leave_cgroup_err: + join_cgroup(CGROUP_MOUNT_PATH); +cleanup_cgroup_err: + rmdir(CGROUP_PATH); + return 1; +} diff --git a/samples/bpf/test_tunnel_bpf.sh b/samples/bpf/test_tunnel_bpf.sh new file mode 100755 index 000000000000..4956589a83ae --- /dev/null +++ b/samples/bpf/test_tunnel_bpf.sh @@ -0,0 +1,127 @@ +#!/bin/bash +# In Namespace 0 (at_ns0) using native tunnel +# Overlay IP: 10.1.1.100 +# local 192.16.1.100 remote 192.16.1.200 +# veth0 IP: 172.16.1.100, tunnel dev <type>00 + +# Out of Namespace using BPF set/get on lwtunnel +# Overlay IP: 10.1.1.200 +# local 172.16.1.200 remote 172.16.1.100 +# veth1 IP: 172.16.1.200, tunnel dev <type>11 + +set -e + +function config_device { + ip netns add at_ns0 + ip link add veth0 type veth peer name veth1 + ip link set veth0 netns at_ns0 + ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0 + ip netns exec at_ns0 ip link set dev veth0 up + ip link set dev veth1 up + ip addr add dev veth1 172.16.1.200/24 +} + +function add_gre_tunnel { + # in namespace + ip netns exec at_ns0 \ + ip link add dev $DEV_NS type $TYPE key 2 local 172.16.1.100 remote 172.16.1.200 + ip netns exec at_ns0 ip link set dev $DEV_NS up + ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 + + # out of namespace + ip link add dev $DEV type $TYPE key 2 external + ip link set dev $DEV up + ip addr add dev $DEV 10.1.1.200/24 +} + +function add_vxlan_tunnel { + # Set static ARP entry here because iptables set-mark works + # on L3 packet, as a result not applying to ARP packets, + # causing errors at get_tunnel_{key/opt}. + + # in namespace + ip netns exec at_ns0 \ + ip link add dev $DEV_NS type $TYPE id 2 dstport 4789 gbp remote 172.16.1.200 + ip netns exec at_ns0 ip link set dev $DEV_NS address 52:54:00:d9:01:00 up + ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 + ip netns exec at_ns0 arp -s 10.1.1.200 52:54:00:d9:02:00 + ip netns exec at_ns0 iptables -A OUTPUT -j MARK --set-mark 0x800FF + + # out of namespace + ip link add dev $DEV type $TYPE external gbp dstport 4789 + ip link set dev $DEV address 52:54:00:d9:02:00 up + ip addr add dev $DEV 10.1.1.200/24 + arp -s 10.1.1.100 52:54:00:d9:01:00 +} + +function add_geneve_tunnel { + # in namespace + ip netns exec at_ns0 \ + ip link add dev $DEV_NS type $TYPE id 2 dstport 6081 remote 172.16.1.200 + ip netns exec at_ns0 ip link set dev $DEV_NS up + ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 + + # out of namespace + ip link add dev $DEV type $TYPE dstport 6081 external + ip link set dev $DEV up + ip addr add dev $DEV 10.1.1.200/24 +} + +function attach_bpf { + DEV=$1 + SET_TUNNEL=$2 + GET_TUNNEL=$3 + tc qdisc add dev $DEV clsact + tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL + tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL +} + +function test_gre { + TYPE=gretap + DEV_NS=gretap00 + DEV=gretap11 + config_device + add_gre_tunnel + attach_bpf $DEV gre_set_tunnel gre_get_tunnel + ping -c 1 10.1.1.100 + ip netns exec at_ns0 ping -c 1 10.1.1.200 +} + +function test_vxlan { + TYPE=vxlan + DEV_NS=vxlan00 + DEV=vxlan11 + config_device + add_vxlan_tunnel + attach_bpf $DEV vxlan_set_tunnel vxlan_get_tunnel + ping -c 1 10.1.1.100 + ip netns exec at_ns0 ping -c 1 10.1.1.200 +} + +function test_geneve { + TYPE=geneve + DEV_NS=geneve00 + DEV=geneve11 + config_device + add_geneve_tunnel + attach_bpf $DEV geneve_set_tunnel geneve_get_tunnel + ping -c 1 10.1.1.100 + ip netns exec at_ns0 ping -c 1 10.1.1.200 +} + +function cleanup { + ip netns delete at_ns0 + ip link del veth1 + ip link del $DEV +} + +echo "Testing GRE tunnel..." +test_gre +cleanup +echo "Testing VXLAN tunnel..." +test_vxlan +cleanup +echo "Testing GENEVE tunnel..." +test_geneve +cleanup +echo "Success" diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index fe2fcec98c1f..78c6f131d94f 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -1449,7 +1449,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "pkt: test1", + "direct packet access: test1", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -1466,7 +1466,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "pkt: test2", + "direct packet access: test2", .insns = { BPF_MOV64_IMM(BPF_REG_0, 1), BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, @@ -1499,7 +1499,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "pkt: test3", + "direct packet access: test3", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -1511,7 +1511,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, }, { - "pkt: test4", + "direct packet access: test4", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -1528,6 +1528,112 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, + { + "helper access to packet: test1, valid packet_ptr range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup = {5}, + .result_unpriv = ACCEPT, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "helper access to packet: test2, unchecked packet_ptr", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup = {1}, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "helper access to packet: test3, variable add", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), + BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup = {11}, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "helper access to packet: test4, packet_ptr with bad range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup = {7}, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "helper access to packet: test5, packet_ptr with too short range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup = {6}, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, }; static int probe_filter_length(struct bpf_insn *fp) |