diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 459 |
1 files changed, 321 insertions, 138 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index de1ad506665d..c3db6cd69b68 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "2.6.0-k" +#define DRV_VERSION "2.7.12-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; @@ -99,6 +99,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); /* forward decls */ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); +static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, struct ixgbevf_ring *rx_ring, @@ -120,7 +121,6 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue - * */ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) @@ -287,17 +287,19 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) __vlan_hwaccel_put_tag(skb, tag); - napi_gro_receive(&q_vector->napi, skb); + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + napi_gro_receive(&q_vector->napi, skb); + else + netif_rx(skb); } /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum - * @adapter: address of board private structure + * @ring: pointer to Rx descriptor ring structure * @status_err: hardware indication of status of receive * @skb: skb currently being received and modified **/ -static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *ring, +static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -309,7 +311,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, /* if IP and error */ if ((status_err & IXGBE_RXD_STAT_IPCS) && (status_err & IXGBE_RXDADV_ERR_IPE)) { - adapter->hw_csum_rx_error++; + ring->hw_csum_rx_error++; return; } @@ -317,13 +319,13 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, return; if (status_err & IXGBE_RXDADV_ERR_TCPE) { - adapter->hw_csum_rx_error++; + ring->hw_csum_rx_error++; return; } /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; - adapter->hw_csum_rx_good++; + ring->hw_csum_rx_good++; } /** @@ -337,15 +339,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; - struct sk_buff *skb; unsigned int i = rx_ring->next_to_use; bi = &rx_ring->rx_buffer_info[i]; while (cleaned_count--) { rx_desc = IXGBEVF_RX_DESC(rx_ring, i); - skb = bi->skb; - if (!skb) { + + if (!bi->skb) { + struct sk_buff *skb; + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, rx_ring->rx_buf_len); if (!skb) { @@ -353,11 +356,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, goto no_buffers; } bi->skb = skb; - } - if (!bi->dma) { + bi->dma = dma_map_single(&pdev->dev, skb->data, rx_ring->rx_buf_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, bi->dma)) { + dev_kfree_skb(skb); + bi->skb = NULL; + dev_err(&pdev->dev, "RX DMA map failed\n"); + break; + } } rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); @@ -370,7 +378,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; - ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); } } @@ -454,7 +461,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, goto next_desc; } - ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); + ixgbevf_rx_checksum(rx_ring, staterr, skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -471,6 +478,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, } skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* Workaround hardware that can't do proper VEPA multicast + * source pruning. + */ + if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && + !(compare_ether_addr(adapter->netdev->dev_addr, + eth_hdr(skb)->h_source))) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); next_desc: @@ -533,9 +550,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; + adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget); + adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; /* If all work not completed, return budget and keep polling */ if (!clean_complete) @@ -731,19 +750,43 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; + struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; + u32 msg; + bool got_ack = false; hw->mac.get_link_status = 1; + if (!hw->mbx.ops.check_for_ack(hw)) + got_ack = true; - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies); + if (!hw->mbx.ops.check_for_msg(hw)) { + hw->mbx.ops.read(hw, &msg, 1); + + if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) { + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 1)); + adapter->link_up = false; + } + + if (msg & IXGBE_VT_MSGTYPE_NACK) + dev_info(&pdev->dev, + "Last Request of type %2.2x to PF Nacked\n", + msg & 0xFF); + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; + } + + /* checking for the ack clears the PFACK bit. Place + * it back in the v2p_mailbox cache so that anyone + * polling for an ack will not miss it + */ + if (got_ack) + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); return IRQ_HANDLED; } - /** * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) * @irq: unused @@ -1065,20 +1108,20 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) max_frame += VLAN_HLEN; /* - * Make best use of allocation by using all but 1K of a - * power of 2 allocation that will be used for skb->head. + * Allocate buffer sizes that fit well into 32K and + * take into account max frame size of 9.5K */ if ((hw->mac.type == ixgbe_mac_X540_vf) && (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else if (max_frame <= IXGBEVF_RXBUFFER_3K) - rx_buf_len = IXGBEVF_RXBUFFER_3K; - else if (max_frame <= IXGBEVF_RXBUFFER_7K) - rx_buf_len = IXGBEVF_RXBUFFER_7K; - else if (max_frame <= IXGBEVF_RXBUFFER_15K) - rx_buf_len = IXGBEVF_RXBUFFER_15K; + else if (max_frame <= IXGBEVF_RXBUFFER_2K) + rx_buf_len = IXGBEVF_RXBUFFER_2K; + else if (max_frame <= IXGBEVF_RXBUFFER_4K) + rx_buf_len = IXGBEVF_RXBUFFER_4K; + else if (max_frame <= IXGBEVF_RXBUFFER_8K) + rx_buf_len = IXGBEVF_RXBUFFER_8K; else - rx_buf_len = IXGBEVF_MAX_RXBUFFER; + rx_buf_len = IXGBEVF_RXBUFFER_10K; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i].rx_buf_len = rx_buf_len; @@ -1128,15 +1171,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) struct ixgbe_hw *hw = &adapter->hw; int err; - if (!hw->mac.ops.set_vfta) - return -EOPNOTSUPP; - - spin_lock(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); /* add VID to filter table */ err = hw->mac.ops.set_vfta(hw, vid, 0, true); - spin_unlock(&adapter->mbx_lock); + spin_unlock_bh(&adapter->mbx_lock); /* translate error return types so error makes sense */ if (err == IXGBE_ERR_MBX) @@ -1156,13 +1196,12 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) struct ixgbe_hw *hw = &adapter->hw; int err = -EOPNOTSUPP; - spin_lock(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); /* remove VID from filter table */ - if (hw->mac.ops.set_vfta) - err = hw->mac.ops.set_vfta(hw, vid, 0, false); + err = hw->mac.ops.set_vfta(hw, vid, 0, false); - spin_unlock(&adapter->mbx_lock); + spin_unlock_bh(&adapter->mbx_lock); clear_bit(vid, adapter->active_vlans); @@ -1206,27 +1245,27 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) } /** - * ixgbevf_set_rx_mode - Multicast set + * ixgbevf_set_rx_mode - Multicast and unicast set * @netdev: network interface device structure * * The set_rx_method entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast mode. + * list, unicast address list or the network interface flags are updated. + * This routine is responsible for configuring the hardware for proper + * multicast mode and configuring requested unicast filters. **/ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - spin_lock(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); /* reprogram multicast list */ - if (hw->mac.ops.update_mc_addr_list) - hw->mac.ops.update_mc_addr_list(hw, netdev); + hw->mac.ops.update_mc_addr_list(hw, netdev); ixgbevf_write_uc_addr_list(netdev); - spin_unlock(&adapter->mbx_lock); + spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) @@ -1290,8 +1329,8 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, "not set within the polling period\n", rxr); } - ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], - (adapter->rx_ring[rxr].count - 1)); + ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], + adapter->rx_ring[rxr].count - 1); } static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) @@ -1335,11 +1374,12 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_10, + int api[] = { ixgbe_mbox_api_11, + ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; int err = 0, idx = 0; - spin_lock(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { err = ixgbevf_negotiate_api_version(hw, api[idx]); @@ -1348,7 +1388,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) idx++; } - spin_unlock(&adapter->mbx_lock); + spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) @@ -1389,16 +1429,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) ixgbevf_configure_msix(adapter); - spin_lock(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); - if (hw->mac.ops.set_rar) { - if (is_valid_ether_addr(hw->mac.addr)) - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); - else - hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); - } + if (is_valid_ether_addr(hw->mac.addr)) + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + else + hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); - spin_unlock(&adapter->mbx_lock); + spin_unlock_bh(&adapter->mbx_lock); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -1413,12 +1451,87 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) mod_timer(&adapter->watchdog_timer, jiffies); } +static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbevf_ring *rx_ring; + unsigned int def_q = 0; + unsigned int num_tcs = 0; + unsigned int num_rx_queues = 1; + int err, i; + + spin_lock_bh(&adapter->mbx_lock); + + /* fetch queue configuration from the PF */ + err = ixgbevf_get_queues(hw, &num_tcs, &def_q); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return err; + + if (num_tcs > 1) { + /* update default Tx ring register index */ + adapter->tx_ring[0].reg_idx = def_q; + + /* we need as many queues as traffic classes */ + num_rx_queues = num_tcs; + } + + /* nothing to do if we have the correct number of queues */ + if (adapter->num_rx_queues == num_rx_queues) + return 0; + + /* allocate new rings */ + rx_ring = kcalloc(num_rx_queues, + sizeof(struct ixgbevf_ring), GFP_KERNEL); + if (!rx_ring) + return -ENOMEM; + + /* setup ring fields */ + for (i = 0; i < num_rx_queues; i++) { + rx_ring[i].count = adapter->rx_ring_count; + rx_ring[i].queue_index = i; + rx_ring[i].reg_idx = i; + rx_ring[i].dev = &adapter->pdev->dev; + rx_ring[i].netdev = adapter->netdev; + + /* allocate resources on the ring */ + err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_rx_resources(adapter, &rx_ring[i]); + } + kfree(rx_ring); + return err; + } + } + + /* free the existing rings and queues */ + ixgbevf_free_all_rx_resources(adapter); + adapter->num_rx_queues = 0; + kfree(adapter->rx_ring); + + /* move new rings into position on the adapter struct */ + adapter->rx_ring = rx_ring; + adapter->num_rx_queues = num_rx_queues; + + /* reset ring to vector mapping */ + ixgbevf_reset_q_vectors(adapter); + ixgbevf_map_rings_to_vectors(adapter); + + return 0; +} + void ixgbevf_up(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; ixgbevf_negotiate_api(adapter); + ixgbevf_reset_queues(adapter); + ixgbevf_configure(adapter); ixgbevf_up_complete(adapter); @@ -1497,7 +1610,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, return; /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); @@ -1593,13 +1705,6 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) msleep(1); - /* - * Check if PF is up before re-init. If not then skip until - * later when the PF is up and ready to service requests from - * the VF via mailbox. If the VF is up and running then the - * watchdog task will continue to schedule reset tasks until - * the PF is up and running. - */ ixgbevf_down(adapter); ixgbevf_up(adapter); @@ -1611,15 +1716,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - spin_lock(&adapter->mbx_lock); - if (hw->mac.ops.reset_hw(hw)) hw_dbg(hw, "PF still resetting\n"); else hw->mac.ops.init_hw(hw); - spin_unlock(&adapter->mbx_lock); - if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); @@ -1628,10 +1729,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) } } -static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, - int vectors) +static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, + int vectors) { - int err, vector_threshold; + int err = 0; + int vector_threshold; /* We'll want at least 2 (vector_threshold): * 1) TxQ[0] + RxQ[0] handler @@ -1647,21 +1749,18 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, while (vectors >= vector_threshold) { err = pci_enable_msix(adapter->pdev, adapter->msix_entries, vectors); - if (!err) /* Success in acquiring all requested vectors. */ + if (!err || err < 0) /* Success or a nasty failure. */ break; - else if (err < 0) - vectors = 0; /* Nasty failure, quit now */ else /* err == number of vectors we should try again with */ vectors = err; } - if (vectors < vector_threshold) { - /* Can't allocate enough MSI-X interrupts? Oh well. - * This just means we'll go with either a single MSI - * vector or fall back to legacy interrupts. - */ - hw_dbg(&adapter->hw, - "Unable to allocate MSI-X interrupts\n"); + if (vectors < vector_threshold) + err = -ENOMEM; + + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate MSI-X interrupts\n"); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else { @@ -1672,6 +1771,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, */ adapter->num_msix_vectors = vectors; } + + return err; } /** @@ -1717,6 +1818,7 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { adapter->tx_ring[i].count = adapter->tx_ring_count; adapter->tx_ring[i].queue_index = i; + /* reg_idx may be remapped later by DCB config */ adapter->tx_ring[i].reg_idx = i; adapter->tx_ring[i].dev = &adapter->pdev->dev; adapter->tx_ring[i].netdev = adapter->netdev; @@ -1774,7 +1876,9 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; - ixgbevf_acquire_msix_vectors(adapter, v_budget); + err = ixgbevf_acquire_msix_vectors(adapter, v_budget); + if (err) + goto out; err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) @@ -1834,18 +1938,13 @@ err_out: **/ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) { - int q_idx, num_q_vectors; - int napi_vectors; - - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - napi_vectors = adapter->num_rx_queues; + int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; adapter->q_vector[q_idx] = NULL; - if (q_idx < napi_vectors) - netif_napi_del(&q_vector->napi); + netif_napi_del(&q_vector->napi); kfree(q_vector); } } @@ -1935,7 +2034,7 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ -static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) +static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; @@ -1950,8 +2049,11 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) hw->subsystem_device_id = pdev->subsystem_device; hw->mbx.ops.init_params(hw); - hw->mac.max_tx_queues = MAX_TX_QUEUES; - hw->mac.max_rx_queues = MAX_RX_QUEUES; + + /* assume legacy case in which PF would only give VF 2 queues */ + hw->mac.max_tx_queues = 2; + hw->mac.max_rx_queues = 2; + err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, @@ -1966,7 +2068,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) goto out; } memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, - adapter->netdev->addr_len); + adapter->netdev->addr_len); } /* lock to protect mailbox accesses */ @@ -2016,6 +2118,10 @@ out: void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (!adapter->link_up) + return; UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, adapter->stats.vfgprc); @@ -2029,6 +2135,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) adapter->stats.vfgotc); UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, adapter->stats.vfmprc); + + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->hw_csum_rx_error += + adapter->rx_ring[i].hw_csum_rx_error; + adapter->hw_csum_rx_good += + adapter->rx_ring[i].hw_csum_rx_good; + adapter->rx_ring[i].hw_csum_rx_error = 0; + adapter->rx_ring[i].hw_csum_rx_good = 0; + } } /** @@ -2103,6 +2218,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work) struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool link_up = adapter->link_up; + s32 need_reset; adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; @@ -2110,38 +2226,42 @@ static void ixgbevf_watchdog_task(struct work_struct *work) * Always check the link on the watchdog because we have * no LSC interrupt */ - if (hw->mac.ops.check_link) { - s32 need_reset; + spin_lock_bh(&adapter->mbx_lock); - spin_lock(&adapter->mbx_lock); + need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - need_reset = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); + spin_unlock_bh(&adapter->mbx_lock); - spin_unlock(&adapter->mbx_lock); - - if (need_reset) { - adapter->link_up = link_up; - adapter->link_speed = link_speed; - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - schedule_work(&adapter->reset_task); - goto pf_has_reset; - } - } else { - /* always assume link is up, if no check link - * function */ - link_speed = IXGBE_LINK_SPEED_10GB_FULL; - link_up = true; + if (need_reset) { + adapter->link_up = link_up; + adapter->link_speed = link_speed; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + schedule_work(&adapter->reset_task); + goto pf_has_reset; } adapter->link_up = link_up; adapter->link_speed = link_speed; if (link_up) { if (!netif_carrier_ok(netdev)) { - hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", - (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - 10 : 1); + char *link_speed_string; + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + link_speed_string = "10 Gbps"; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + link_speed_string = "1 Gbps"; + break; + case IXGBE_LINK_SPEED_100_FULL: + link_speed_string = "100 Mbps"; + break; + default: + link_speed_string = "unknown speed"; + break; + } + dev_info(&adapter->pdev->dev, + "NIC Link is Up, %s\n", link_speed_string); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); } @@ -2149,7 +2269,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work) adapter->link_up = false; adapter->link_speed = 0; if (netif_carrier_ok(netdev)) { - hw_dbg(&adapter->hw, "NIC Link is Down\n"); + dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -2377,6 +2497,63 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) &adapter->rx_ring[i]); } +static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbevf_ring *rx_ring; + unsigned int def_q = 0; + unsigned int num_tcs = 0; + unsigned int num_rx_queues = 1; + int err, i; + + spin_lock_bh(&adapter->mbx_lock); + + /* fetch queue configuration from the PF */ + err = ixgbevf_get_queues(hw, &num_tcs, &def_q); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return err; + + if (num_tcs > 1) { + /* update default Tx ring register index */ + adapter->tx_ring[0].reg_idx = def_q; + + /* we need as many queues as traffic classes */ + num_rx_queues = num_tcs; + } + + /* nothing to do if we have the correct number of queues */ + if (adapter->num_rx_queues == num_rx_queues) + return 0; + + /* allocate new rings */ + rx_ring = kcalloc(num_rx_queues, + sizeof(struct ixgbevf_ring), GFP_KERNEL); + if (!rx_ring) + return -ENOMEM; + + /* setup ring fields */ + for (i = 0; i < num_rx_queues; i++) { + rx_ring[i].count = adapter->rx_ring_count; + rx_ring[i].queue_index = i; + rx_ring[i].reg_idx = i; + rx_ring[i].dev = &adapter->pdev->dev; + rx_ring[i].netdev = adapter->netdev; + } + + /* free the existing ring and queues */ + adapter->num_rx_queues = 0; + kfree(adapter->rx_ring); + + /* move new rings into position on the adapter struct */ + adapter->rx_ring = rx_ring; + adapter->num_rx_queues = num_rx_queues; + + return 0; +} + /** * ixgbevf_open - Called when a network interface is made active * @netdev: network interface device structure @@ -2413,6 +2590,11 @@ static int ixgbevf_open(struct net_device *netdev) ixgbevf_negotiate_api(adapter); + /* setup queue reg_idx and Rx queue count */ + err = ixgbevf_setup_queues(adapter); + if (err) + goto err_setup_queues; + /* allocate transmit descriptors */ err = ixgbevf_setup_all_tx_resources(adapter); if (err) @@ -2451,6 +2633,7 @@ err_setup_rx: ixgbevf_free_all_rx_resources(adapter); err_setup_tx: ixgbevf_free_all_tx_resources(adapter); +err_setup_queues: ixgbevf_reset(adapter); err_setup_reset: @@ -2562,9 +2745,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { - - - u32 vlan_macip_lens = 0; u32 mss_l4len_idx = 0; u32 type_tucmd = 0; @@ -2678,10 +2858,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_buffer_info->dma = skb_frag_dma_map(tx_ring->dev, frag, offset, size, DMA_TO_DEVICE); - tx_buffer_info->mapped_as_page = true; if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) goto dma_error; + tx_buffer_info->mapped_as_page = true; tx_buffer_info->next_to_watch = i; len -= size; @@ -2754,7 +2934,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; - } /* @@ -2823,6 +3002,11 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif + u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); + if (!dst_mac || is_link_local_ether_addr(dst_mac)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } tx_ring = &adapter->tx_ring[r_idx]; @@ -2902,12 +3086,11 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - spin_lock(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); - if (hw->mac.ops.set_rar) - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); - spin_unlock(&adapter->mbx_lock); + spin_unlock_bh(&adapter->mbx_lock); return 0; } @@ -2925,8 +3108,15 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; - if (adapter->hw.mac.type == ixgbe_mac_X540_vf) + switch (adapter->hw.api_version) { + case ixgbe_mbox_api_11: max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; + break; + default: + if (adapter->hw.mac.type == ixgbe_mac_X540_vf) + max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; + break; + } /* MTU < 68 is an error and causes problems on some kernels */ if ((new_mtu < 68) || (max_frame > max_possible_frame)) @@ -3094,8 +3284,7 @@ static void ixgbevf_assign_netdev_ops(struct net_device *dev) * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int __devinit ixgbevf_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbevf_adapter *adapter = NULL; @@ -3181,8 +3370,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, goto err_sw_init; /* The HW MAC address was set and/or determined in sw_init */ - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->dev_addr)) { pr_err("invalid MAC address\n"); err = -EIO; @@ -3223,10 +3410,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, if (err) goto err_sw_init; - /* pick up the PCI bus settings for reporting later */ - if (hw->mac.ops.get_bus_info) - hw->mac.ops.get_bus_info(hw); - strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); @@ -3270,7 +3453,7 @@ err_dma: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void __devexit ixgbevf_remove(struct pci_dev *pdev) +static void ixgbevf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -3384,7 +3567,7 @@ static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, - .remove = __devexit_p(ixgbevf_remove), + .remove = ixgbevf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ .suspend = ixgbevf_suspend, |