diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/defines.h | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ethtool.c | 230 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 270 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/vf.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/vf.h | 2 |
6 files changed, 320 insertions, 238 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 58434584b16d..74901f7ef391 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -74,7 +74,7 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDCTL_RLPML_EN 0x00008000 /* DCA Control */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ /* PSRTYPE bit definitions */ #define IXGBE_PSRTYPE_TCPHDR 0x00000010 @@ -296,16 +296,16 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ #define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ - -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ #endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index d7aa4b203f40..508e72c5f1c2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -42,65 +42,54 @@ #define IXGBE_ALL_RAR_ENTRIES 16 +enum {NETDEV_STATS, IXGBEVF_STATS}; + struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; - struct { - int sizeof_stat; - int stat_offset; - int base_stat_offset; - int saved_reset_offset; - }; + int type; + int sizeof_stat; + int stat_offset; }; -#define IXGBEVF_STAT(m, b, r) { \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ - .stat_offset = offsetof(struct ixgbevf_adapter, m), \ - .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ - .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ +#define IXGBEVF_STAT(_name, _stat) { \ + .stat_string = _name, \ + .type = IXGBEVF_STATS, \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \ + .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ } -#define IXGBEVF_ZSTAT(m) { \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ - .stat_offset = offsetof(struct ixgbevf_adapter, m), \ - .base_stat_offset = -1, \ - .saved_reset_offset = -1 \ +#define IXGBEVF_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .type = NETDEV_STATS, \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ } -static const struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, - stats.saved_reset_vfgprc)}, - {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, - stats.saved_reset_vfgptc)}, - {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, - stats.saved_reset_vfgorc)}, - {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, - stats.saved_reset_vfgotc)}, - {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, - {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)}, - {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)}, - {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, - stats.saved_reset_vfmprc)}, - {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, -#ifdef BP_EXTENDED_STATS - {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, - {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, - {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)}, - {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)}, - {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)}, - {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)}, -#endif +static struct ixgbe_stats ixgbevf_gstrings_stats[] = { + IXGBEVF_NETDEV_STAT(rx_packets), + IXGBEVF_NETDEV_STAT(tx_packets), + IXGBEVF_NETDEV_STAT(rx_bytes), + IXGBEVF_NETDEV_STAT(tx_bytes), + IXGBEVF_STAT("tx_busy", tx_busy), + IXGBEVF_STAT("tx_restart_queue", restart_queue), + IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), + IXGBEVF_NETDEV_STAT(multicast), + IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), }; -#define IXGBE_QUEUE_STATS_LEN 0 -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBEVF_QUEUE_STATS_LEN ( \ + (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ + ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ + (sizeof(struct ixgbe_stats) / sizeof(u64))) +#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) -#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) +#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Link test (on/offline)" }; -#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) +#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) static int ixgbevf_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) @@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, memset(p, 0, regs_len); - regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + /* generate a number suitable for ethtool's register version */ + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; /* General Registers */ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); @@ -392,13 +382,13 @@ clear_reset: return err; } -static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) +static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) { switch (stringset) { case ETH_SS_TEST: - return IXGBE_TEST_LEN; + return IXGBEVF_TEST_LEN; case ETH_SS_STATS: - return IXGBE_GLOBAL_STATS_LEN; + return IXGBEVF_STATS_LEN; default: return -EINVAL; } @@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - char *base = (char *)adapter; - int i; -#ifdef BP_EXTENDED_STATS - u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, - tx_yields = 0, tx_cleaned = 0, tx_missed = 0; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + unsigned int start; + struct ixgbevf_ring *ring; + int i, j; + char *p; - for (i = 0; i < adapter->num_rx_queues; i++) { - rx_yields += adapter->rx_ring[i]->stats.yields; - rx_cleaned += adapter->rx_ring[i]->stats.cleaned; - rx_yields += adapter->rx_ring[i]->stats.yields; - } + ixgbevf_update_stats(adapter); + net_stats = dev_get_stats(netdev, &temp); + for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { + switch (ixgbevf_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *)net_stats + + ixgbevf_gstrings_stats[i].stat_offset; + break; + case IXGBEVF_STATS: + p = (char *)adapter + + ixgbevf_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } - for (i = 0; i < adapter->num_tx_queues; i++) { - tx_yields += adapter->tx_ring[i]->stats.yields; - tx_cleaned += adapter->tx_ring[i]->stats.cleaned; - tx_yields += adapter->tx_ring[i]->stats.yields; + data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - adapter->bp_rx_yields = rx_yields; - adapter->bp_rx_cleaned = rx_cleaned; - adapter->bp_rx_missed = rx_missed; + /* populate Tx queue data */ + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } - adapter->bp_tx_yields = tx_yields; - adapter->bp_tx_cleaned = tx_cleaned; - adapter->bp_tx_missed = tx_missed; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i + 1] = ring->stats.misses; + data[i + 2] = ring->stats.cleaned; + i += 3; #endif + } - ixgbevf_update_stats(adapter); - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - char *p = base + ixgbe_gstrings_stats[i].stat_offset; - char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; - char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; - - if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { - if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) - data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; - else - data[i] = *(u64 *)p; - } else { - if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) - data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r; - else - data[i] = *(u32 *)p; + /* populate Rx queue data */ + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i + 1] = ring->stats.misses; + data[i + 2] = ring->stats.cleaned; + i += 3; +#endif } } static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct ixgbevf_adapter *adapter = netdev_priv(netdev); char *p = (char *)data; int i; switch (stringset) { case ETH_SS_TEST: memcpy(data, *ixgbe_gstrings_test, - IXGBE_TEST_LEN * ETH_GSTRING_LEN); + IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_stats[i].stat_string, + for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbevf_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } break; } } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 991eeae81473..aa28c4fb1a43 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -166,10 +166,10 @@ struct ixgbevf_ring { #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) -#define IXGBE_TX_FLAGS_CSUM (u32)(1) -#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) -#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) -#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_CSUM BIT(0) +#define IXGBE_TX_FLAGS_VLAN BIT(1) +#define IXGBE_TX_FLAGS_TSO BIT(2) +#define IXGBE_TX_FLAGS_IPV4 BIT(3) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 @@ -403,13 +403,6 @@ struct ixgbevf_adapter { u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; - /* Some features need tri-state capability, - * thus the additional *_CAPABLE flags. - */ - u32 flags; -#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1) -#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) - struct msix_entry *msix_entries; /* OS defined structs */ @@ -429,16 +422,6 @@ struct ixgbevf_adapter { unsigned int tx_ring_count; unsigned int rx_ring_count; -#ifdef BP_EXTENDED_STATS - u64 bp_rx_yields; - u64 bp_rx_cleaned; - u64 bp_rx_missed; - - u64 bp_tx_yields; - u64 bp_tx_cleaned; - u64 bp_tx_missed; -#endif - u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 link_speed; bool link_up; @@ -461,6 +444,8 @@ enum ixbgevf_state_t { __IXGBEVF_REMOVING, __IXGBEVF_SERVICE_SCHED, __IXGBEVF_SERVICE_INITED, + __IXGBEVF_RESET_REQUESTED, + __IXGBEVF_QUEUE_RESET_REQUESTED, }; enum ixgbevf_boards { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b0edae94d73d..319e25f29883 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -268,7 +268,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) { /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { - adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); ixgbevf_service_event_schedule(adapter); } } @@ -288,9 +288,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev) * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: board private structure * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *tx_ring) + struct ixgbevf_ring *tx_ring, int napi_budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; @@ -328,7 +329,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1013,8 +1014,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) int per_ring_budget, work_done = 0; bool clean_complete = true; - ixgbevf_for_each_ring(ring, q_vector->tx) - clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + ixgbevf_for_each_ring(ring, q_vector->tx) { + if (!ixgbevf_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } if (budget <= 0) return budget; @@ -1035,7 +1038,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; - clean_complete &= (cleaned < per_ring_budget); + if (cleaned >= per_ring_budget) + clean_complete = false; } #ifdef CONFIG_NET_RX_BUSY_POLL @@ -1052,7 +1056,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state)) ixgbevf_irq_enable_queues(adapter, - 1 << q_vector->v_idx); + BIT(q_vector->v_idx)); return 0; } @@ -1154,14 +1158,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) } /* add q_vector eims value to global eims_enable_mask */ - adapter->eims_enable_mask |= 1 << v_idx; + adapter->eims_enable_mask |= BIT(v_idx); ixgbevf_write_eitr(q_vector); } ixgbevf_set_ivar(adapter, -1, 1, v_idx); /* setup eims_other and add value to global eims_enable_mask */ - adapter->eims_other = 1 << v_idx; + adapter->eims_other = BIT(v_idx); adapter->eims_enable_mask |= adapter->eims_other; } @@ -1585,8 +1589,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, txdctl |= (8 << 16); /* WTHRESH = 8 */ /* Setting PTHRESH to 32 both improves performance */ - txdctl |= (1 << 8) | /* HTHRESH = 1 */ - 32; /* PTHRESH = 32 */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); @@ -1642,7 +1646,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) IXGBE_PSRTYPE_L2HDR; if (adapter->num_rx_queues > 1) - psrtype |= 1 << 29; + psrtype |= BIT(29); IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); } @@ -1984,7 +1988,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) hw->mbx.timeout = 0; /* wait for watchdog to come around and bail us out */ - adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; + set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); } return 0; @@ -2052,7 +2056,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { - err = ixgbevf_negotiate_api_version(hw, api[idx]); + err = hw->mac.ops.negotiate_api_version(hw, api[idx]); if (!err) break; idx++; @@ -2749,11 +2753,9 @@ static void ixgbevf_service_timer(unsigned long data) static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) { - if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) return; - adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED; - /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -2795,7 +2797,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= 1 << i; + eics |= BIT(i); } /* Cause software interrupt to ensure rings are cleaned */ @@ -2821,7 +2823,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) /* if check for link returns error we will need to reset */ if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { - adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); link_up = false; } @@ -3222,11 +3224,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) { struct net_device *dev = adapter->netdev; - if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, + &adapter->state)) return; - adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; - /* if interface is down do nothing */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -3271,9 +3272,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3286,49 +3296,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; - /* update GSO size and bytecount with header size */ + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + + /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 1 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, @@ -3337,76 +3351,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, return 1; } +static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 l4_hdr = 0; - __be16 frag_off; - - switch (first->protocol) { - case htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; - if (likely(skb_network_header_len(skb) == - sizeof(struct ipv6hdr))) - break; - ipv6_skip_exthdr(skb, skb_network_offset(skb) + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - l4_hdr = NEXTHDR_FRAGMENT; - break; - default: - break; - } + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto no_csum; - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + ixgbevf_ipv6_csum_is_sctp(skb))) { + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum, l3 proto=%x, l4 proto=%x\n", - first->protocol, l4_hdr); - } - skb_checksum_help(skb); - goto no_csum; } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + /* fall through */ + default: + skb_checksum_help(skb); + goto no_csum; } - + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@ -3442,7 +3435,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, /* use index 1 context for TSO/FSO/FCOE */ if (tx_flags & IXGBE_TX_FLAGS_TSO) - olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); /* Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running @@ -3890,6 +3883,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } +#define IXGBEVF_MAX_MAC_HDR_LEN 127 +#define IXGBEVF_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, @@ -3908,7 +3935,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif - .ndo_features_check = passthru_features_check, + .ndo_features_check = ixgbevf_features_check, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) @@ -4013,26 +4040,37 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; + +#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; + netdev->hw_features |= NETIF_F_GSO_PARTIAL | + IXGBEVF_GSO_PARTIAL_FEATURES; - netdev->vlan_features |= NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_SG; + netdev->features = netdev->hw_features; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->priv_flags |= IFF_UNICAST_FLT; if (IXGBE_REMOVED(hw->hw_addr)) { diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 4d613a4f2a7f..987ad69d4918 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -670,11 +670,11 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) } /** - * ixgbevf_negotiate_api_version - Negotiate supported API version + * ixgbevf_negotiate_api_version_vf - Negotiate supported API version * @hw: pointer to the HW structure * @api: integer containing requested API version **/ -int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) { int err; u32 msg[3]; @@ -769,6 +769,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .stop_adapter = ixgbevf_stop_hw_vf, .setup_link = ixgbevf_setup_mac_link_vf, .check_link = ixgbevf_check_mac_link_vf, + .negotiate_api_version = ixgbevf_negotiate_api_version_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index ef9f7736b4dc..8e623f9327ae 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -51,6 +51,7 @@ struct ixgbe_mac_operations { s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *); + s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); /* Link */ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); @@ -208,7 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); -int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc); int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); |