diff options
author | Paolo Abeni <pabeni@redhat.com> | 2019-03-20 11:02:06 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-03-20 11:18:55 -0700 |
commit | a350eccee5830d9a1f29e393a88dc05a15326d44 (patch) | |
tree | 606fd11e76d457dbcfcb197f573808cb625bee3f /net | |
parent | b71b5837f8711dbc4bc0424cb5c75e5921be055c (diff) | |
download | linux-a350eccee5830d9a1f29e393a88dc05a15326d44.tar.bz2 |
net: remove 'fallback' argument from dev->ndo_select_queue()
After the previous patch, all the callers of ndo_select_queue()
provide as a 'fallback' argument netdev_pick_tx.
The only exceptions are nested calls to ndo_select_queue(),
which pass down the 'fallback' available in the current scope
- still netdev_pick_tx.
We can drop such argument and replace fallback() invocation with
netdev_pick_tx(). This avoids an indirect call per xmit packet
in some scenarios (TCP syn, UDP unconnected, XDP generic, pktgen)
with device drivers implementing such ndo. It also clean the code
a bit.
Tested with ixgbe and CONFIG_FCOE=m
With pktgen using queue xmit:
threads vanilla patched
(kpps) (kpps)
1 2334 2428
2 4166 4278
4 7895 8100
v1 -> v2:
- rebased after helper's name change
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 9 | ||||
-rw-r--r-- | net/mac80211/iface.c | 6 | ||||
-rw-r--r-- | net/packet/af_packet.c | 3 |
3 files changed, 6 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 1a76b4fe9b97..357111431ec9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3689,16 +3689,14 @@ get_cpus_map: } u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { return 0; } EXPORT_SYMBOL(dev_pick_tx_zero); u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; } @@ -3748,8 +3746,7 @@ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue) - queue_index = ops->ndo_select_queue(dev, skb, sb_dev, - netdev_pick_tx); + queue_index = ops->ndo_select_queue(dev, skb, sb_dev); else queue_index = netdev_pick_tx(dev, skb, sb_dev); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 4a6ff1482a9f..f0d97eba250b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1133,8 +1133,7 @@ static void ieee80211_uninit(struct net_device *dev) static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); } @@ -1179,8 +1178,7 @@ static const struct net_device_ops ieee80211_dataif_ops = { static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a8809dc0e1ab..741953b42f44 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -287,8 +287,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb) #endif skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); if (ops->ndo_select_queue) { - queue_index = ops->ndo_select_queue(dev, skb, NULL, - netdev_pick_tx); + queue_index = ops->ndo_select_queue(dev, skb, NULL); queue_index = netdev_cap_txqueue(dev, queue_index); } else { queue_index = netdev_pick_tx(dev, skb, NULL); |