From 2121c2712f8249e4d2555a4c989e4666aba34031 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Wed, 6 Feb 2019 09:48:37 +0200 Subject: igc: Add multiple receive queues control supporting Enable the multi queues to receive. Program the direction of packets to specified queues according to the mode selected in the MRQC register. Multiple receive queues defined by filters and RSS for 4 queues. Enable/disable RSS hashing and also to enable multiple receive queues. This patch will allow further ethtool support development. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_main.c | 49 +++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'drivers/net/ethernet/intel/igc/igc_main.c') diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 87a11879bf2d..a6fe614820b6 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -620,6 +620,55 @@ static void igc_configure_tx(struct igc_adapter *adapter) */ static void igc_setup_mrqc(struct igc_adapter *adapter) { + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); } /** -- cgit v1.2.3 From 6245c8483ae0110d2eb7e7cd2922dba1a5fce720 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Thu, 14 Feb 2019 13:31:37 +0200 Subject: igc: Extend the ethtool supporting Add show and configure network flow classification (NFC) methods to the ethtool. Show the specifies Rx ntuple filters. Configures receive network flow classification option or rules. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc.h | 55 ++- drivers/net/ethernet/intel/igc/igc_defines.h | 4 + drivers/net/ethernet/intel/igc/igc_ethtool.c | 602 +++++++++++++++++++++++++++ drivers/net/ethernet/intel/igc/igc_main.c | 145 +++++++ drivers/net/ethernet/intel/igc/igc_regs.h | 11 + 5 files changed, 814 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/igc/igc_main.c') diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 473a65c51382..7eee12972d86 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -33,6 +33,10 @@ void igc_write_rss_indir_tbl(struct igc_adapter *adapter); bool igc_has_link(struct igc_adapter *adapter); void igc_reset(struct igc_adapter *adapter); int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); +int igc_add_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igc_del_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); extern char igc_driver_name[]; extern char igc_driver_version[]; @@ -292,15 +296,50 @@ struct igc_q_vector { struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; }; +#define MAX_ETYPE_FILTER (4 - 1) + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = 0x1, + IGC_FILTER_FLAG_VLAN_TCI = 0x2, + IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +/* RX network flow classification data structure */ +struct igc_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igc_nfc_filter { + struct hlist_node nfc_node; + struct igc_nfc_input filter; + unsigned long cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + struct igc_mac_addr { u8 addr[ETH_ALEN]; u8 queue; u8 state; /* bitmask */ }; -#define IGC_MAC_STATE_DEFAULT 0x1 -#define IGC_MAC_STATE_MODIFIED 0x2 -#define IGC_MAC_STATE_IN_USE 0x4 +#define IGC_MAC_STATE_DEFAULT 0x1 +#define IGC_MAC_STATE_IN_USE 0x2 +#define IGC_MAC_STATE_SRC_ADDR 0x4 +#define IGC_MAC_STATE_QUEUE_STEERING 0x8 + +#define IGC_MAX_RXNFC_FILTERS 16 /* Board specific private data structure */ struct igc_adapter { @@ -369,8 +408,14 @@ struct igc_adapter { u32 rss_queues; u32 rss_indir_tbl_init; + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + /* lock for RX network flow classification filter */ spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; struct igc_mac_addr *mac_table; @@ -456,6 +501,10 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) /* forward declaration */ void igc_reinit_locked(struct igc_adapter *); +int igc_add_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input); +int igc_erase_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input); #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 3666f8837cc8..925c89b57ec5 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -400,4 +400,8 @@ #define IGC_N0_QUEUE -1 +#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLAPQF_QUEUE_MASK 0x03 + #endif /* _IGC_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index eff37a6c0afa..25d14fc82bf8 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -2,6 +2,7 @@ /* Copyright (c) 2018 Intel Corporation */ /* ethtool support for igc */ +#include #include #include "igc.h" @@ -643,6 +644,605 @@ static int igc_set_coalesce(struct net_device *netdev, return 0; } +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGC_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + return 0; + } + return -EINVAL; +} + +static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGC_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V4_FLOW: + /* Fall through */ + case AH_ESP_V4_FLOW: + /* Fall through */ + case AH_V4_FLOW: + /* Fall through */ + case ESP_V4_FLOW: + /* Fall through */ + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V6_FLOW: + /* Fall through */ + case AH_ESP_V6_FLOW: + /* Fall through */ + case AH_V6_FLOW: + /* Fall through */ + case ESP_V6_FLOW: + /* Fall through */ + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igc_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = igc_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + dev_err(&adapter->pdev->dev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(IGC_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= IGC_ETQF_FILTER_ENABLE; + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= (etype & IGC_ETQF_ETYPE_MASK); + + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT) + & IGC_ETQF_QUEUE_MASK); + etqf |= IGC_ETQF_QUEUE_ENABLE; + + wr32(IGC_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(IGC_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) && + queue_index != input->action) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority); + vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(IGC_VLAPQF, vlapqf); + + return 0; +} + +int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + int err = -EINVAL; + + if (hw->mac.type == igc_i225 && + !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i225 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGC_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + err = igc_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igc_clear_etype_filter_regs(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 etqf = rd32(IGC_ETQF(reg_index)); + + etqf &= ~IGC_ETQF_QUEUE_ENABLE; + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf &= ~IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter, + u16 vlan_tci) +{ + struct igc_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(IGC_VLAPQF); + vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority, + IGC_VLAPQF_QUEUE_MASK); + + wr32(IGC_VLAPQF, vlapqf); +} + +int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input) +{ + if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGC_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + + return 0; +} + +static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter, + struct igc_nfc_filter *input, + u16 sw_idx) +{ + struct igc_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && rule->sw_idx == sw_idx) { + if (!input) + err = igc_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->nfc_node, &parent->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC || + fsp->ring_cookie >= adapter->num_rx_queues) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGC_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igc_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igc_set_rss_hash_opt(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = igc_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igc_del_ethtool_nfc_entry(adapter, cmd); + default: + break; + } + + return ret; +} + void igc_write_rss_indir_tbl(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; @@ -1013,6 +1613,8 @@ static const struct ethtool_ops igc_ethtool_ops = { .set_pauseparam = igc_set_pauseparam, .get_coalesce = igc_get_coalesce, .set_coalesce = igc_set_coalesce, + .get_rxnfc = igc_get_rxnfc, + .set_rxnfc = igc_set_rxnfc, .get_rxfh_indir_size = igc_get_rxfh_indir_size, .get_rxfh = igc_get_rxfh, .set_rxfh = igc_set_rxfh, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index a6fe614820b6..8460894829cb 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1793,6 +1793,29 @@ static void igc_update_stats(struct igc_adapter *adapter) static void igc_nfc_filter_exit(struct igc_adapter *adapter) { + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igc_erase_filter(adapter, rule); + + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igc_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igc_nfc_filter_restore(struct igc_adapter *adapter) +{ + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igc_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); } /** @@ -1955,6 +1978,7 @@ static void igc_configure(struct igc_adapter *adapter) igc_setup_mrqc(adapter); igc_setup_rctl(adapter); + igc_nfc_filter_restore(adapter); igc_configure_tx(adapter); igc_configure_rx(adapter); @@ -2016,6 +2040,127 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter) igc_rar_set_index(adapter, 0); } +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGC_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGC_MAC_STATE_SRC_ADDR) != + (flags & IGC_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used. + */ +static int igc_add_mac_filter_flags(struct igc_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct igc_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!igc_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags; + + igc_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; +} + +int igc_add_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igc_add_mac_filter_flags(adapter, addr, queue, + IGC_MAC_STATE_QUEUE_STEERING | flags); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGC_MAC_STATE_SRC_ADDR can be used. + */ +static int igc_del_mac_filter_flags(struct igc_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct igc_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE)) + continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } + + igc_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; +} + +int igc_del_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igc_del_mac_filter_flags(adapter, addr, queue, + IGC_MAC_STATE_QUEUE_STEERING | flags); +} + /** * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 325109cb20cc..50d7c04dccf5 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -83,6 +83,16 @@ /* RSS registers */ #define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + /* Redirection Table - RW Array */ #define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) /* RSS Random Key - RW Array */ @@ -106,6 +116,7 @@ #define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ #define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) #define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ /* Transmit Register Descriptions */ #define IGC_TCTL 0x00400 /* Tx Control - RW */ -- cgit v1.2.3 From 36b9fea60961d7426b6d4b0faaf609e5d820482d Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 18 Feb 2019 10:37:31 +0200 Subject: igc: Add support for statistics Add support for statistics and show basic counters. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc.h | 4 + drivers/net/ethernet/intel/igc/igc_ethtool.c | 233 +++++++++++++++++++++++++++ drivers/net/ethernet/intel/igc/igc_main.c | 167 ++++++++++++++++++- 3 files changed, 403 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/igc/igc_main.c') diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 7eee12972d86..0f5534ce27b0 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -37,6 +37,7 @@ int igc_add_mac_steering_filter(struct igc_adapter *adapter, const u8 *addr, u8 queue, u8 flags); int igc_del_mac_steering_filter(struct igc_adapter *adapter, const u8 *addr, u8 queue, u8 flags); +void igc_update_stats(struct igc_adapter *adapter); extern char igc_driver_name[]; extern char igc_driver_version[]; @@ -403,6 +404,9 @@ struct igc_adapter { u16 tx_ring_count; u16 rx_ring_count; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; u32 *shadow_vfta; u32 rss_queues; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 25d14fc82bf8..aaa1f97d5920 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -7,6 +7,115 @@ #include "igc.h" +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", @@ -546,6 +655,127 @@ static int igc_set_pauseparam(struct net_device *netdev, return retval; } +static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + memcpy(p, igc_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) { + memcpy(p, igc_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_restart", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_drops", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_csum_err", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_alloc_failed", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + static int igc_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -1611,6 +1841,9 @@ static const struct ethtool_ops igc_ethtool_ops = { .set_ringparam = igc_set_ringparam, .get_pauseparam = igc_get_pauseparam, .set_pauseparam = igc_set_pauseparam, + .get_strings = igc_get_strings, + .get_sset_count = igc_get_sset_count, + .get_ethtool_stats = igc_get_ethtool_stats, .get_coalesce = igc_get_coalesce, .set_coalesce = igc_set_coalesce, .get_rxnfc = igc_get_rxnfc, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 8460894829cb..1d21b95d9e2c 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1787,8 +1787,173 @@ void igc_up(struct igc_adapter *adapter) * igc_update_stats - Update the board statistics counters * @adapter: board private structure */ -static void igc_update_stats(struct igc_adapter *adapter) +void igc_update_stats(struct igc_adapter *adapter) { + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.symerrs += rd32(IGC_SYMERRS); + adapter->stats.sec += rd32(IGC_SEC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + adapter->stats.tsctfc += rd32(IGC_TSCTFC); + + adapter->stats.iac += rd32(IGC_IAC); + adapter->stats.icrxoc += rd32(IGC_ICRXOC); + adapter->stats.icrxptc += rd32(IGC_ICRXPTC); + adapter->stats.icrxatc += rd32(IGC_ICRXATC); + adapter->stats.ictxptc += rd32(IGC_ICTXPTC); + adapter->stats.ictxatc += rd32(IGC_ICTXATC); + adapter->stats.ictxqec += rd32(IGC_ICTXQEC); + adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); } static void igc_nfc_filter_exit(struct igc_adapter *adapter) -- cgit v1.2.3 From 65cd3a725e908fe5a93d507411c3ea83157d10c4 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Wed, 20 Feb 2019 14:39:31 +0200 Subject: igc: Add support for the ntuple feature Copy the ntuple feature into list of user selectable features. Enable the ntuple feature. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_defines.h | 3 + drivers/net/ethernet/intel/igc/igc_main.c | 86 ++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) (limited to 'drivers/net/ethernet/intel/igc/igc_main.c') diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 925c89b57ec5..a9a30268de59 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -400,6 +400,9 @@ #define IGC_N0_QUEUE -1 +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + #define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4)) #define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) #define IGC_VLAPQF_QUEUE_MASK 0x03 diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1d21b95d9e2c..a883b3f357e7 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2127,6 +2127,86 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev) return &netdev->stats; } +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igc_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + /** * igc_configure - configure the hardware for RX and TX * @adapter: private board structure @@ -3793,6 +3873,9 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, .ndo_get_stats = igc_get_stats, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, }; /* PCIe configuration access */ @@ -4022,6 +4105,9 @@ static int igc_probe(struct pci_dev *pdev, if (err) goto err_sw_init; + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; -- cgit v1.2.3 From 6b16f9ee89b8d5709f24bc3ac89ae8b5452c0d7c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 1 Apr 2019 16:42:14 +0200 Subject: net: move skb->xmit_more hint to softnet data There are two reasons for this. First, the xmit_more flag conceptually doesn't fit into the skb, as xmit_more is not a property related to the skb. Its only a hint to the driver that the stack is about to transmit another packet immediately. Second, it was only done this way to not have to pass another argument to ndo_start_xmit(). We can place xmit_more in the softnet data, next to the device recursion. The recursion counter is already written to on each transmit. The "more" indicator is placed right next to it. Drivers can use the netdev_xmit_more() helper instead of skb->xmit_more to check the "more packets coming" hint. skb->xmit_more is retained (but always 0) to not cause build breakage. This change takes care of the simple s/skb->xmit_more/netdev_xmit_more()/ conversions. Remaining drivers are converted in the next patches. Suggested-by: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++-- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 2 +- drivers/net/ethernet/broadcom/tg3.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +- drivers/net/ethernet/cisco/enic/enic_main.c | 2 +- drivers/net/ethernet/emulex/benet/be_main.c | 2 +- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 2 +- drivers/net/ethernet/intel/e1000/e1000_main.c | 2 +- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2 +- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 2 +- drivers/net/ethernet/intel/ice/ice_txrx.c | 2 +- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- drivers/net/ethernet/intel/igc/igc_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/marvell/mvneta.c | 2 +- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 ++- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 +- drivers/net/ethernet/qlogic/qede/qede_fp.c | 4 ++-- drivers/net/ethernet/rdc/r6040.c | 2 +- drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c | 2 +- drivers/net/hyperv/netvsc.c | 2 +- drivers/net/virtio_net.c | 2 +- drivers/staging/mt7621-eth/mtk_eth_soc.c | 6 ++++-- include/linux/netdevice.h | 2 +- 29 files changed, 35 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet/intel/igc/igc_main.c') diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 71c8cac6e44e..7e40d14682f7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2236,7 +2236,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - if (netif_xmit_stopped(txq) || !skb->xmit_more) { + if (netif_xmit_stopped(txq) || !netdev_xmit_more()) { /* trigger the dma engine. ena_com_write_sq_doorbell() * has a mb */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 4666084eda16..d5fd49dd25f3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1887,7 +1887,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) smp_wmb(); ring->cur = cur_index + 1; - if (!packet->skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, channel->queue_index))) xgbe_tx_start_xmit(channel, ring); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 35e34e23ba33..d22691403d28 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -551,7 +551,7 @@ normal_tx: prod = NEXT_TX(prod); txr->tx_prod = prod; - if (!skb->xmit_more || netif_xmit_stopped(txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) bnxt_db_write(bp, &txr->tx_db, prod); tx_done: @@ -559,7 +559,7 @@ tx_done: mmiowb(); if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { - if (skb->xmit_more && !tx_buf->is_push) + if (netdev_xmit_more() && !tx_buf->is_push) bnxt_db_write(bp, &txr->tx_db, prod); netif_tx_stop_queue(txq); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 983245c0867c..4fd973571e4c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1665,7 +1665,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); - if (!skb->xmit_more || netif_xmit_stopped(txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) /* Packets are ready, update producer index */ bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index, TDMA_PROD_INDEX); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 328373e0578f..45ccadee02af 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8156,7 +8156,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_wake_queue(txq); } - if (!skb->xmit_more || netif_xmit_stopped(txq)) { + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { /* Packets are ready, update Tx producer idx on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); mmiowb(); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index fb6f813cff65..eab805579f96 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2522,7 +2522,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) irh->vlan = skb_vlan_tag_get(skb) & 0xfff; } - xmit_more = skb->xmit_more; + xmit_more = netdev_xmit_more(); if (unlikely(cmdsetup.s.timestamp)) status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 54b245797d2e..db0b90555acb 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1585,7 +1585,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; } - xmit_more = skb->xmit_more; + xmit_more = netdev_xmit_more(); if (unlikely(cmdsetup.s.timestamp)) status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 733d9172425b..acb2856936d2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -897,7 +897,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) netif_tx_stop_queue(txq); skb_tx_timestamp(skb); - if (!skb->xmit_more || netif_xmit_stopped(txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); spin_unlock(&enic->wq_lock[txq_map]); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3c7c04406a2b..e2f9fbced174 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1376,7 +1376,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) u16 q_idx = skb_get_queue_mapping(skb); struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; struct be_wrb_params wrb_params = { 0 }; - bool flush = !skb->xmit_more; + bool flush = !netdev_xmit_more(); u16 wrb_cnt; skb = be_xmit_workarounds(adapter, skb, &wrb_params); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index e17bf33eba0c..0fbe8046824b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -518,7 +518,7 @@ process_sq_wqe: flush_skbs: netdev_txq = netdev_get_tx_queue(netdev, q_id); - if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) + if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq))) hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); return err; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index a7c76732849f..6f72ab139fd9 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3267,7 +3267,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); - if (!skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); /* we need this if more than one processor can write to diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 745c1242a2d9..a8fa4a1628f5 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5897,7 +5897,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, DIV_ROUND_UP(PAGE_SIZE, adapter->tx_fifo_limit) + 2)); - if (!skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_tdt_wa(tx_ring, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5a0419421511..e2fa112bed9a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1035,7 +1035,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6c97667d20ef..1a95223c9f99 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3469,7 +3469,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, first->next_to_watch = tx_desc; /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 9b4d7cec2e18..b64187753ad6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2358,7 +2358,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, first->next_to_watch = tx_desc; /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index f2462799154a..a6f7b7feaf3c 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1646,7 +1646,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ice_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index bea7175d171b..32d61d5a2706 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6029,7 +6029,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index a883b3f357e7..f79728381e8a 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -939,7 +939,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, /* Make sure there is space in the ring for the next send. */ igc_maybe_stop_tx(tx_ring, DESC_NEEDED); - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 16c728984164..60cec3540dd7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8297,7 +8297,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a944be3c57b1..bb68737dce56 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2467,7 +2467,7 @@ out: if (txq->count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); - if (!skb->xmit_more || netif_xmit_stopped(nq) || + if (!netdev_xmit_more() || netif_xmit_stopped(nq) || txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) mvneta_txq_pend_desc_add(pp, txq, frags); else diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 549d36497b8c..53abe925ecb1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -767,7 +767,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, */ wmb(); - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || + !netdev_xmit_more()) mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 99200b5dac76..961cd5e7bf2b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -909,7 +909,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) nfp_net_tx_ring_stop(nd_q, tx_ring); tx_ring->wr_ptr_add += nr_frags + 1; - if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more)) + if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more())) nfp_net_tx_xmit_more_flush(tx_ring); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index c342b07e3a93..954015d2011a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1665,12 +1665,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) txq->tx_db.data.bd_prod = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq)) qede_update_tx_producer(txq); if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1))) { - if (skb->xmit_more) + if (netdev_xmit_more()) qede_update_tx_producer(txq); netif_tx_stop_queue(netdev_txq); diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 04aa592f35c3..ad335bca3273 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -840,7 +840,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); /* Trigger the MAC to check the TX descriptor */ - if (!skb->xmit_more || netif_queue_stopped(dev)) + if (!netdev_xmit_more() || netif_queue_stopped(dev)) iowrite16(TM2TX, ioaddr + MTPR); lp->tx_insert_ptr = descptr->vndescp; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c index 99d86e39ff54..bf6c1c6779ff 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c @@ -995,7 +995,7 @@ static void xlgmac_dev_xmit(struct xlgmac_channel *channel) smp_wmb(); ring->cur = cur_index + 1; - if (!pkt_info->skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, channel->queue_index))) xlgmac_tx_start_xmit(channel, ring); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 813d195bbd57..9a022539d305 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -964,7 +964,7 @@ int netvsc_send(struct net_device *ndev, /* Keep aggregating only if stack says more data is coming * and not doing mixed modes send and not flow blocked */ - xmit_more = skb->xmit_more && + xmit_more = netdev_xmit_more() && !packet->cp_partial && !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1b03c4b6ebff..ba246fc475ae 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1568,7 +1568,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) struct send_queue *sq = &vi->sq[qnum]; int err; struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); - bool kick = !skb->xmit_more; + bool kick = !netdev_xmit_more(); bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c index 6027b19f7bc2..02a8584b3d1d 100644 --- a/drivers/staging/mt7621-eth/mtk_eth_soc.c +++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c @@ -741,7 +741,8 @@ static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev, wmb(); atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || + !netdev_xmit_more()) mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0); return 0; @@ -935,7 +936,8 @@ static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev, */ wmb(); - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || + !netdev_xmit_more()) mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); return 0; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2b25824642fa..eb9f05e0863d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4424,7 +4424,7 @@ static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, struct sk_buff *skb, struct net_device *dev, bool more) { - skb->xmit_more = more ? 1 : 0; + __this_cpu_write(softnet_data.xmit.more, more); return ops->ndo_start_xmit(skb, dev); } -- cgit v1.2.3 From c43f1255b866b423d2381f77eaa2cbc64a9c49aa Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 22 Apr 2019 08:55:48 -0700 Subject: net: pass net_device argument to the eth_get_headlen Update all users of eth_get_headlen to pass network device, fetch network namespace from it and pass it down to the flow dissector. This commit is a noop until administrator inserts BPF flow dissector program. Cc: Maxim Krasnyansky Cc: Saeed Mahameed Cc: Jeff Kirsher Cc: intel-wired-lan@lists.osuosl.org Cc: Yisen Zhuang Cc: Salil Mehta Cc: Michael Chan Cc: Igor Russkikh Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 3 ++- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ++- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 2 +- drivers/net/ethernet/intel/ice/ice_txrx.c | 2 +- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- drivers/net/ethernet/intel/igc/igc_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- drivers/net/tun.c | 3 ++- include/linux/etherdevice.h | 2 +- net/ethernet/eth.c | 5 +++-- 16 files changed, 22 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet/intel/igc/igc_main.c') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index c64e2fb5a4f1..350e385528fd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -354,7 +354,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, hdr_len = buff->len; if (hdr_len > AQ_CFG_RX_HDR_SIZE) - hdr_len = eth_get_headlen(aq_buf_vaddr(&buff->rxdata), + hdr_len = eth_get_headlen(skb->dev, + aq_buf_vaddr(&buff->rxdata), AQ_CFG_RX_HDR_SIZE); memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata), diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6528a597367b..526f36dcb204 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -899,7 +899,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, DMA_ATTR_WEAK_ORDERING); if (unlikely(!payload)) - payload = eth_get_headlen(data_ptr, len); + payload = eth_get_headlen(bp->dev, data_ptr, len); skb = napi_alloc_skb(&rxr->bnapi->napi, payload); if (!skb) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 297b95c1b3c1..65b985acae38 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -598,7 +598,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, } else { ring->stats.seg_pkt_cnt++; - pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE); + pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE); memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 176d4b965709..5f7b51c6ee91 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2580,7 +2580,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, ring->stats.seg_pkt_cnt++; u64_stats_update_end(&ring->syncp); - ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); __skb_put(skb, ring->pull_len); hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, desc_cb); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 2325cee76211..b4d970e44163 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -280,7 +280,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, /* we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); + pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1a95223c9f99..e1931701cd7e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2035,7 +2035,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > I40E_RX_HDR_SIZE) - headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, xdp->data, + I40E_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), xdp->data, diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index b64187753ad6..cf8be63a8a4f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1315,7 +1315,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IAVF_RX_HDR_SIZE) - headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 79043fec0187..259f118c7d8b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -699,7 +699,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, /* Determine available headroom for copy */ headlen = size; if (headlen > ICE_RX_HDR_SIZE) - headlen = eth_get_headlen(va, ICE_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index acbb5b4f333d..9b8a4bb25327 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8051,7 +8051,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IGB_RX_HDR_LEN) - headlen = eth_get_headlen(va, IGB_RX_HDR_LEN); + headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index f79728381e8a..e58a6e0dc4d9 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1199,7 +1199,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IGC_RX_HDR_LEN) - headlen = eth_get_headlen(va, IGC_RX_HDR_LEN); + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 60cec3540dd7..7b903206b534 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1800,7 +1800,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, * we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); + pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 49e23afa05a2..d189ed247665 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -895,7 +895,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IXGBEVF_RX_HDR_SIZE) - headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, xdp->data, + IXGBEVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), xdp->data, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 40f3f98aa279..7b61126fcec9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -163,7 +163,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, case MLX5_INLINE_MODE_NONE: return 0; case MLX5_INLINE_MODE_TCP_UDP: - hlen = eth_get_headlen(skb->data, skb_headlen(skb)); + hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) hlen += VLAN_HLEN; break; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 24d0220b9ba0..9d72f8c76c15 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1965,7 +1965,8 @@ drop: if (frags) { /* Exercise flow dissector code path. */ - u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); + u32 headlen = eth_get_headlen(tun->dev, skb->data, + skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { this_cpu_inc(tun->pcpu_stats->rx_dropped); diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index e2f3b21cd72a..c6c1930e28a0 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -33,7 +33,7 @@ struct device; int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); unsigned char *arch_get_platform_mac_address(void); int nvmem_get_mac_address(struct device *dev, void *addrbuf); -u32 eth_get_headlen(void *data, unsigned int max_len); +u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 1e439549c419..0f9863dc4d44 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -119,13 +119,14 @@ EXPORT_SYMBOL(eth_header); /** * eth_get_headlen - determine the length of header for an ethernet frame + * @dev: pointer to network device * @data: pointer to start of frame * @len: total length of frame * * Make a best effort attempt to pull the length for all of the headers for * a given frame in a linear buffer. */ -u32 eth_get_headlen(void *data, unsigned int len) +u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len) { const unsigned int flags = FLOW_DISSECTOR_F_PARSE_1ST_FRAG; const struct ethhdr *eth = (const struct ethhdr *)data; @@ -136,7 +137,7 @@ u32 eth_get_headlen(void *data, unsigned int len) return len; /* parse any remaining L2/L3 headers, check for L4 */ - if (!skb_flow_dissect_flow_keys_basic(NULL, NULL, &keys, data, + if (!skb_flow_dissect_flow_keys_basic(dev_net(dev), NULL, &keys, data, eth->h_proto, sizeof(*eth), len, flags)) return max_t(u32, keys.control.thoff, sizeof(*eth)); -- cgit v1.2.3