summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c16
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c306
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c50
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c59
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/macvlan.c68
17 files changed, 399 insertions, 211 deletions
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index c99e3e845ac0..a90080f12e67 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1074,16 +1074,12 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
amd8111e_set_coalesce(dev,TX_INTR_COAL);
coal_conf->tx_coal_type = MEDIUM_COALESCE;
}
-
- }
- else if(tx_pkt_size >= 1024){
- if (tx_pkt_size >= 1024){
- if(coal_conf->tx_coal_type != HIGH_COALESCE){
- coal_conf->tx_timeout = 4;
- coal_conf->tx_event_count = 8;
- amd8111e_set_coalesce(dev,TX_INTR_COAL);
- coal_conf->tx_coal_type = HIGH_COALESCE;
- }
+ } else if (tx_pkt_size >= 1024) {
+ if (coal_conf->tx_coal_type != HIGH_COALESCE) {
+ coal_conf->tx_timeout = 4;
+ coal_conf->tx_event_count = 8;
+ amd8111e_set_coalesce(dev, TX_INTR_COAL);
+ coal_conf->tx_coal_type = HIGH_COALESCE;
}
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index df8607097e4a..c51d61f5f715 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -445,15 +445,14 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
l2_accel = NULL;
}
- skb->protocol = eth_type_trans(skb, dev);
-
/* Record Rx queue, or update macvlan statistics */
if (!l2_accel)
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
- (skb->pkt_type == PACKET_BROADCAST) ||
- (skb->pkt_type == PACKET_MULTICAST));
+ false);
+
+ skb->protocol = eth_type_trans(skb, dev);
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 45793491d4ba..26e749766337 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -22,6 +22,7 @@
#include "fm10k.h"
#include <linux/vmalloc.h>
#include <net/udp_tunnel.h>
+#include <linux/if_macvlan.h>
/**
* fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@@ -1254,7 +1255,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
glort = l2_accel->dglort + 1 + i;
hw->mac.ops.update_xcast_mode(hw, glort,
- FM10K_XCAST_MODE_MULTI);
+ FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort,
sdev->dev_addr,
hw->mac.default_vid, true);
@@ -1449,6 +1450,13 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
int size = 0, i;
u16 glort;
+ /* The hardware supported by fm10k only filters on the destination MAC
+ * address. In order to avoid issues we only support offloading modes
+ * where the hardware can actually provide the functionality.
+ */
+ if (!macvlan_supports_dest_filter(sdev))
+ return ERR_PTR(-EMEDIUMTYPE);
+
/* allocate l2 accel structure if it is not available */
if (!l2_accel) {
/* verify there is enough free GLORTs to support l2_accel */
@@ -1515,7 +1523,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
if (fm10k_host_mbx_ready(interface)) {
hw->mac.ops.update_xcast_mode(hw, glort,
- FM10K_XCAST_MODE_MULTI);
+ FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
hw->mac.default_vid, true);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7dd5038cfcc4..8fccca57cd6a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -305,7 +305,6 @@ enum ixgbe_ring_state_t {
struct ixgbe_fwd_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
- struct ixgbe_adapter *real_adapter;
unsigned int tx_base_queue;
unsigned int rx_base_queue;
int pool;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 51e7d82a5860..b6e5cea84949 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1768,15 +1768,14 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
- skb->protocol = eth_type_trans(skb, dev);
-
/* record Rx queue, or update MACVLAN statistics */
if (netif_is_ixgbe(dev))
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
- (skb->pkt_type == PACKET_BROADCAST) ||
- (skb->pkt_type == PACKET_MULTICAST));
+ false);
+
+ skb->protocol = eth_type_trans(skb, dev);
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -4219,7 +4218,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_offset, vf_shift;
+ u16 pool = adapter->num_rx_pools;
+ u32 reg_offset, vf_shift, vmolr;
u32 gcr_ext, vmdctl;
int i;
@@ -4233,6 +4233,13 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
vmdctl |= IXGBE_VT_CTL_REPLEN;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ /* accept untagged packets until a vlan tag is
+ * specifically set for the VMDQ queue/pool
+ */
+ vmolr = IXGBE_VMOLR_AUPE;
+ while (pool--)
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
+
vf_shift = VMDQ_P(0) % 32;
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
@@ -4900,36 +4907,6 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
return -ENOMEM;
}
-/**
- * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
- * @vfn: pool to associate with unicast addresses
- *
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
- **/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int count = 0;
-
- /* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
- return -ENOMEM;
-
- if (!netdev_uc_empty(netdev)) {
- struct netdev_hw_addr *ha;
- netdev_for_each_uc_addr(ha, netdev) {
- ixgbe_del_mac_filter(adapter, ha->addr, vfn);
- ixgbe_add_mac_filter(adapter, ha->addr, vfn);
- count++;
- }
- }
- return count;
-}
-
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -5309,29 +5286,6 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
-static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
- struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vmolr;
-
- /* No unicast promiscuous support for VMDQ devices. */
- vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
- vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
-
- /* clear the affected bit */
- vmolr &= ~IXGBE_VMOLR_MPE;
-
- if (dev->flags & IFF_ALLMULTI) {
- vmolr |= IXGBE_VMOLR_MPE;
- } else {
- vmolr |= IXGBE_VMOLR_ROMPE;
- hw->mac.ops.update_mc_addr_list(hw, dev);
- }
- ixgbe_write_uc_addr_list(adapter->netdev, pool);
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@@ -5384,21 +5338,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
rx_ring->next_to_use = 0;
}
-static int ixgbe_fwd_ring_up(struct net_device *vdev,
+static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
- struct ixgbe_adapter *adapter = accel->real_adapter;
+ struct net_device *vdev = accel->netdev;
int i, baseq, err;
- if (!test_bit(accel->pool, adapter->fwd_bitmask))
- return 0;
-
baseq = accel->pool * adapter->num_rx_queues_per_pool;
netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
baseq, baseq + adapter->num_rx_queues_per_pool);
- accel->netdev = vdev;
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
@@ -5415,26 +5365,36 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
*/
err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
VMDQ_P(accel->pool));
- if (err >= 0) {
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ if (err >= 0)
return 0;
- }
+
+ /* if we cannot add the MAC rule then disable the offload */
+ macvlan_release_l2fw_offload(vdev);
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = NULL;
+ netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
+
+ clear_bit(accel->pool, adapter->fwd_bitmask);
+ kfree(accel);
+
return err;
}
-static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
+static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_fwd_adapter *accel;
- if (dfwd->fwd_priv)
- ixgbe_fwd_ring_up(upper, vadapter);
- }
+ if (!netif_is_macvlan(vdev))
+ return 0;
+
+ accel = macvlan_accel_priv(vdev);
+ if (!accel)
+ return 0;
+
+ ixgbe_fwd_ring_up(adapter, accel);
return 0;
}
@@ -5442,7 +5402,7 @@ static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
{
netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_upper_dev_walk, NULL);
+ ixgbe_macvlan_up, adapter);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter)
@@ -8843,6 +8803,49 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
#endif /* CONFIG_IXGBE_DCB */
+static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
+{
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_fwd_adapter *accel;
+ int pool;
+
+ /* we only care about macvlans... */
+ if (!netif_is_macvlan(vdev))
+ return 0;
+
+ /* that have hardware offload enabled... */
+ accel = macvlan_accel_priv(vdev);
+ if (!accel)
+ return 0;
+
+ /* If we can relocate to a different bit do so */
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ if (pool < adapter->num_rx_pools) {
+ set_bit(pool, adapter->fwd_bitmask);
+ accel->pool = pool;
+ return 0;
+ }
+
+ /* if we cannot find a free pool then disable the offload */
+ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
+ macvlan_release_l2fw_offload(vdev);
+ kfree(accel);
+
+ return 0;
+}
+
+static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* flush any stale bits out of the fwd bitmask */
+ bitmap_clear(adapter->fwd_bitmask, 1, 63);
+
+ /* walk through upper devices reassigning pools */
+ netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
+ adapter);
+}
+
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@@ -8910,6 +8913,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
#endif /* CONFIG_IXGBE_DCB */
ixgbe_init_interrupt_scheme(adapter);
+ ixgbe_defrag_macvlan_pools(dev);
+
if (netif_running(dev))
return ixgbe_open(dev);
@@ -9014,13 +9019,12 @@ struct upper_walk_data {
static int get_macvlan_queue(struct net_device *upper, void *_data)
{
if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
struct upper_walk_data *data = _data;
struct ixgbe_adapter *adapter = data->adapter;
int ifindex = data->ifindex;
- if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ if (vadapter && upper->ifindex == ifindex) {
data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
data->action = data->queue;
return 1;
@@ -9460,6 +9464,22 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
return features;
}
+static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
+{
+ int rss = min_t(int, ixgbe_max_rss_indices(adapter),
+ num_online_cpus());
+
+ /* go back to full RSS if we're not running SR-IOV */
+ if (!adapter->ring_feature[RING_F_VMDQ].offset)
+ adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED);
+
+ adapter->ring_feature[RING_F_RSS].limit = rss;
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+
+ ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
+}
+
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -9540,7 +9560,9 @@ static int ixgbe_set_features(struct net_device *netdev,
}
}
- if (need_reset)
+ if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
+ ixgbe_reset_l2fw_offload(adapter);
+ else if (need_reset)
ixgbe_do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER))
@@ -9803,71 +9825,98 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
- struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
- int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
- unsigned int limit;
int pool, err;
- /* Hardware has a limited number of available pools. Each VF, and the
- * PF require a pool. Check to ensure we don't attempt to use more
- * then the available number of pools.
+ /* The hardware supported by ixgbe only filters on the destination MAC
+ * address. In order to avoid issues we only support offloading modes
+ * where the hardware can actually provide the functionality.
*/
- if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
- return ERR_PTR(-EINVAL);
+ if (!macvlan_supports_dest_filter(vdev))
+ return ERR_PTR(-EMEDIUMTYPE);
- if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
- (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
- return ERR_PTR(-EBUSY);
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ if (pool == adapter->num_rx_pools) {
+ u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ u16 reserved_pools;
+
+ if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
+ adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
+ return ERR_PTR(-EBUSY);
+
+ /* Hardware has a limited number of available pools. Each VF,
+ * and the PF require a pool. Check to ensure we don't
+ * attempt to use more then the available number of pools.
+ */
+ if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
+ return ERR_PTR(-EBUSY);
- fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
- if (!fwd_adapter)
- return ERR_PTR(-ENOMEM);
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED;
- pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
- set_bit(pool, adapter->fwd_bitmask);
- limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
+ /* Try to reserve as many queues per pool as possible,
+ * we start with the configurations that support 4 queues
+ * per pools, followed by 2, and then by just 1 per pool.
+ */
+ if (used_pools < 32 && adapter->num_rx_pools < 16)
+ reserved_pools = min_t(u16,
+ 32 - used_pools,
+ 16 - adapter->num_rx_pools);
+ else if (adapter->num_rx_pools < 32)
+ reserved_pools = min_t(u16,
+ 64 - used_pools,
+ 32 - adapter->num_rx_pools);
+ else
+ reserved_pools = 64 - used_pools;
- /* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- fwd_adapter->pool = pool;
- fwd_adapter->real_adapter = adapter;
+ if (!reserved_pools)
+ return ERR_PTR(-EBUSY);
- /* Force reinit of ring allocation with VMDQ enabled */
- err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
- if (!err && netif_running(pdev))
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ if (err)
+ return ERR_PTR(err);
- if (!err)
- return fwd_adapter;
+ if (pool >= adapter->num_rx_pools)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ accel = kzalloc(sizeof(*accel), GFP_KERNEL);
+ if (!accel)
+ return ERR_PTR(-ENOMEM);
- /* unwind counter and free adapter struct */
- netdev_info(pdev,
- "%s: dfwd hardware acceleration failed\n", vdev->name);
- clear_bit(pool, adapter->fwd_bitmask);
- kfree(fwd_adapter);
- return ERR_PTR(err);
+ set_bit(pool, adapter->fwd_bitmask);
+ accel->pool = pool;
+ accel->netdev = vdev;
+
+ if (!netif_running(pdev))
+ return accel;
+
+ err = ixgbe_fwd_ring_up(adapter, accel);
+ if (err)
+ return ERR_PTR(err);
+
+ return accel;
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
- struct ixgbe_adapter *adapter = accel->real_adapter;
+ struct ixgbe_adapter *adapter = netdev_priv(pdev);
unsigned int rxbase = accel->rx_base_queue;
- unsigned int limit, i;
+ unsigned int i;
/* delete unicast filter associated with offloaded interface */
ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
VMDQ_P(accel->pool));
- /* disable ability to receive packets for this pool */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
-
/* Allow remaining Rx packets to get flushed out of the
* Rx FIFO before we drop the netdev for the ring.
*/
@@ -9886,25 +9935,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
}
clear_bit(accel->pool, adapter->fwd_bitmask);
- limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
-
- /* go back to full RSS if we're done with our VMQs */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
- int rss = min_t(int, ixgbe_max_rss_indices(adapter),
- num_online_cpus());
-
- adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- adapter->ring_feature[RING_F_RSS].limit = rss;
- }
-
- ixgbe_setup_tc(pdev, adapter->hw_tcs);
- netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
- accel->pool, adapter->num_rx_pools,
- accel->rx_base_queue,
- accel->rx_base_queue +
- adapter->num_rx_queues_per_pool);
kfree(accel);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 008aa073a679..bfc4171cd3f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -266,7 +266,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
#endif
/* Disable VMDq flag so device will be set in VM mode */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
+ if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
rss = min_t(int, ixgbe_max_rss_indices(adapter),
@@ -312,7 +312,8 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* other values out of range.
*/
num_tc = adapter->hw_tcs;
- num_rx_pools = adapter->num_rx_pools;
+ num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
+ adapter->num_rx_pools);
limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c67e1b54c614..733ff53cc601 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -47,6 +47,7 @@
struct net_device;
struct nfp_app;
+#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ENTRY_RS BIT(20)
#define NFP_FL_STATS_ELEM_RS 4
#define NFP_FL_REPEATED_HASH_MAX BIT(17)
@@ -189,9 +190,11 @@ struct nfp_fl_payload {
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
__be32 nfp_tun_ipv4_addr;
+ struct net_device *ingress_dev;
char *unmasked_data;
char *mask_data;
char *action_data;
+ bool ingress_offload;
};
struct nfp_fl_stats_frame {
@@ -216,12 +219,14 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow);
int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ struct net_device *netdev);
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow);
struct nfp_fl_payload *
-nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
+ struct net_device *netdev, __be32 host_ctx);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index db977cf8e933..21668aa435e8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -99,14 +99,18 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
-nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
+ struct net_device *netdev, __be32 host_ctx)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flower_entry;
hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
tc_flower_cookie)
- if (flower_entry->tc_flower_cookie == tc_flower_cookie)
+ if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
+ (!netdev || flower_entry->ingress_dev == netdev) &&
+ (host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
+ flower_entry->meta.host_ctx_id == host_ctx))
return flower_entry;
return NULL;
@@ -121,13 +125,11 @@ nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
flower_cookie = be64_to_cpu(stats->stats_cookie);
rcu_read_lock();
- nfp_flow = nfp_flower_search_fl_table(app, flower_cookie);
+ nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
+ stats->stats_con_id);
if (!nfp_flow)
goto exit_rcu_unlock;
- if (nfp_flow->meta.host_ctx_id != stats->stats_con_id)
- goto exit_rcu_unlock;
-
spin_lock(&nfp_flow->lock);
nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
@@ -317,7 +319,8 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ struct net_device *netdev)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry;
@@ -348,7 +351,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->stats.bytes = 0;
nfp_flow->stats.used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie);
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 114d2ab02a38..70ec9d821b91 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -345,7 +345,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
{
struct nfp_fl_payload *flow_pay;
@@ -371,6 +371,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
+ flow_pay->ingress_offload = !egress;
+
return flow_pay;
err_free_mask:
@@ -402,8 +404,20 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
+ struct net_device *ingr_dev;
int err;
+ ingr_dev = egress ? NULL : netdev;
+ flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
+ if (flow_pay) {
+ /* Ignore as duplicate if it has been added by different cb. */
+ if (flow_pay->ingress_offload && egress)
+ return 0;
+ else
+ return -EOPNOTSUPP;
+ }
+
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
@@ -413,12 +427,14 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_free_key_ls;
- flow_pay = nfp_flower_allocate_new(key_layer);
+ flow_pay = nfp_flower_allocate_new(key_layer, egress);
if (!flow_pay) {
err = -ENOMEM;
goto err_free_key_ls;
}
+ flow_pay->ingress_dev = egress ? NULL : netdev;
+
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
tun_type);
if (err)
@@ -428,7 +444,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay,
+ flow_pay->ingress_dev);
if (err)
goto err_destroy_flow;
@@ -462,6 +479,7 @@ err_free_key_ls:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure
+ * @egress: Netdev is the egress dev.
*
* Removes a flow from the repeated hash structure and clears the
* action payload.
@@ -470,15 +488,18 @@ err_free_key_ls:
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_fl_payload *nfp_flow;
+ struct net_device *ingr_dev;
int err;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ ingr_dev = egress ? NULL : netdev;
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (!nfp_flow)
- return -ENOENT;
+ return egress ? 0 : -ENOENT;
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -505,7 +526,9 @@ err_free_flow:
/**
* nfp_flower_get_stats() - Populates flow stats obtained from hardware.
* @app: Pointer to the APP handle
+ * @netdev: Netdev structure.
* @flow: TC flower classifier offload structure
+ * @egress: Netdev is the egress dev.
*
* Populates a flow statistics structure which which corresponds to a
* specific flow.
@@ -513,14 +536,21 @@ err_free_flow:
* Return: negative value on error, 0 if stats populated successfully.
*/
static int
-nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
+nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_fl_payload *nfp_flow;
+ struct net_device *ingr_dev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ ingr_dev = egress ? NULL : netdev;
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (!nfp_flow)
return -EINVAL;
+ if (nfp_flow->ingress_offload && egress)
+ return 0;
+
spin_lock_bh(&nfp_flow->lock);
tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
nfp_flow->stats.pkts, nfp_flow->stats.used);
@@ -543,9 +573,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
case TC_CLSFLOWER_REPLACE:
return nfp_flower_add_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_DESTROY:
- return nfp_flower_del_offload(app, netdev, flower);
+ return nfp_flower_del_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_STATS:
- return nfp_flower_get_stats(app, flower);
+ return nfp_flower_get_stats(app, netdev, flower, egress);
}
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index c4b1f344b4da..0ade122805ad 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -486,6 +486,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_disable_msix;
}
+ err = nfp_resource_table_init(pf->cpp);
+ if (err)
+ goto err_cpp_free;
+
pf->hwinfo = nfp_hwinfo_read(pf->cpp);
dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n",
@@ -548,6 +552,7 @@ err_fw_unload:
vfree(pf->dumpspec);
err_hwinfo_free:
kfree(pf->hwinfo);
+err_cpp_free:
nfp_cpp_free(pf->cpp);
err_disable_msix:
destroy_workqueue(pf->wq);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index ced62d112aa2..f44d0a857314 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -94,6 +94,8 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
/* MAC Statistics Accumulator */
#define NFP_RESOURCE_MAC_STATISTICS "mac.stat"
+int nfp_resource_table_init(struct nfp_cpp *cpp);
+
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index cd678323bacb..a0e336bd1d85 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1330,6 +1330,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
/* Finished with card initialization. */
dev_info(&pdev->dev,
"Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
+ pcie_print_link_status(pdev);
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index c8f2c064cce3..4e19add1c539 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -295,6 +295,8 @@ void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
+ unsigned long long address);
/**
* nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index cb28ac03e4ca..c88bf673cb76 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -59,6 +59,11 @@ static u32 nfp_mutex_unlocked(u16 interface)
return (u32)interface << 16 | 0x0000;
}
+static u32 nfp_mutex_owner(u32 val)
+{
+ return val >> 16;
+}
+
static bool nfp_mutex_is_locked(u32 val)
{
return (val & 0xffff) == 0x000f;
@@ -351,3 +356,43 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
+
+/**
+ * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ *
+ * Release lock if held by local system. Extreme care is advised, call only
+ * when no local lock users can exist.
+ *
+ * Return: 0 if the lock was OK, 1 if locked by us, -errno on invalid mutex
+ */
+int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
+ unsigned long long address)
+{
+ const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ u16 interface = nfp_cpp_interface(cpp);
+ int err;
+ u32 tmp;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return err;
+
+ /* Check lock */
+ err = nfp_cpp_readl(cpp, mur, address, &tmp);
+ if (err < 0)
+ return err;
+
+ if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface)
+ return 0;
+
+ /* Bust the lock */
+ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface));
+ if (err < 0)
+ return err;
+
+ return 1;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index 7e14725055c7..2dd89dba9311 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -338,3 +338,62 @@ u64 nfp_resource_size(struct nfp_resource *res)
{
return res->size;
}
+
+/**
+ * nfp_resource_table_init() - Run initial checks on the resource table
+ * @cpp: NFP CPP handle
+ *
+ * Start-of-day init procedure for resource table. Must be called before
+ * any local resource table users may exist.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int nfp_resource_table_init(struct nfp_cpp *cpp)
+{
+ struct nfp_cpp_mutex *dev_mutex;
+ int i, err;
+
+ err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE);
+ if (err < 0) {
+ nfp_err(cpp, "Error: failed to reclaim resource table mutex\n");
+ return err;
+ }
+ if (err)
+ nfp_warn(cpp, "Warning: busted main resource table mutex\n");
+
+ dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE,
+ NFP_RESOURCE_TBL_KEY);
+ if (!dev_mutex)
+ return -ENOMEM;
+
+ if (nfp_cpp_mutex_lock(dev_mutex)) {
+ nfp_err(cpp, "Error: failed to claim resource table mutex\n");
+ nfp_cpp_mutex_free(dev_mutex);
+ return -EINVAL;
+ }
+
+ /* Resource 0 is the dev_mutex, start from 1 */
+ for (i = 1; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
+ u64 addr = NFP_RESOURCE_TBL_BASE +
+ sizeof(struct nfp_resource_entry) * i;
+
+ err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, addr);
+ if (err < 0) {
+ nfp_err(cpp,
+ "Error: failed to reclaim resource %d mutex\n",
+ i);
+ goto err_unlock;
+ }
+ if (err)
+ nfp_warn(cpp, "Warning: busted resource %d mutex\n", i);
+ }
+
+ err = 0;
+err_unlock:
+ nfp_cpp_mutex_unlock(dev_mutex);
+ nfp_cpp_mutex_free(dev_mutex);
+
+ return err;
+}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index c180b480f8ef..13e4c1eff353 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -217,7 +217,7 @@ static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc,
c = *s++;
else if (len > 1)
c = crc >> 8;
- else if (len > 0)
+ else
c = crc & 0xff;
len--;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 725f4b4afc6d..adde8fc45588 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -514,6 +514,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
+ void *accel_priv = NULL;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
@@ -533,9 +534,14 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ /* For packets that are non-multicast and not bridged we will pass
+ * the necessary information so that the lowerdev can distinguish
+ * the source of the packets via the accel_priv value.
+ */
+ accel_priv = vlan->accel_priv;
xmit_world:
skb->dev = vlan->lowerdev;
- return dev_queue_xmit(skb);
+ return dev_queue_xmit_accel(skb, accel_priv);
}
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
@@ -552,19 +558,14 @@ static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, str
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct macvlan_dev *vlan = netdev_priv(dev);
unsigned int len = skb->len;
int ret;
- struct macvlan_dev *vlan = netdev_priv(dev);
if (unlikely(netpoll_tx_running(dev)))
return macvlan_netpoll_send_skb(vlan, skb);
- if (vlan->fwd_priv) {
- skb->dev = vlan->lowerdev;
- ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
- } else {
- ret = macvlan_queue_xmit(skb, dev);
- }
+ ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct vlan_pcpu_stats *pcpu_stats;
@@ -613,26 +614,27 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
- if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
- vlan->fwd_priv =
- lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
-
- /* If we get a NULL pointer back, or if we get an error
- * then we should just fall through to the non accelerated path
- */
- if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
- vlan->fwd_priv = NULL;
- } else
- return 0;
- }
-
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
- err = dev_uc_add(lowerdev, dev->dev_addr);
- if (err < 0)
- goto out;
+ /* Attempt to populate accel_priv which is used to offload the L2
+ * forwarding requests for unicast packets.
+ */
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD)
+ vlan->accel_priv =
+ lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
+
+ /* If earlier attempt to offload failed, or accel_priv is not
+ * populated we must add the unicast address to the lower device.
+ */
+ if (IS_ERR_OR_NULL(vlan->accel_priv)) {
+ vlan->accel_priv = NULL;
+ err = dev_uc_add(lowerdev, dev->dev_addr);
+ if (err < 0)
+ goto out;
+ }
+
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(lowerdev, 1);
if (err < 0)
@@ -653,13 +655,14 @@ clear_multi:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
del_unicast:
- dev_uc_del(lowerdev, dev->dev_addr);
-out:
- if (vlan->fwd_priv) {
+ if (vlan->accel_priv) {
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
- vlan->fwd_priv);
- vlan->fwd_priv = NULL;
+ vlan->accel_priv);
+ vlan->accel_priv = NULL;
+ } else {
+ dev_uc_del(lowerdev, dev->dev_addr);
}
+out:
return err;
}
@@ -668,11 +671,10 @@ static int macvlan_stop(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- if (vlan->fwd_priv) {
+ if (vlan->accel_priv) {
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
- vlan->fwd_priv);
- vlan->fwd_priv = NULL;
- return 0;
+ vlan->accel_priv);
+ vlan->accel_priv = NULL;
}
dev_uc_unsync(lowerdev, dev);