summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/tx.c
diff options
context:
space:
mode:
authorEdward Cree <ecree@solarflare.com>2020-09-03 22:34:15 +0100
committerJakub Kicinski <kuba@kernel.org>2020-09-05 12:21:39 -0700
commit1c0544d24927e4fad04f858216b8ea767a3bd123 (patch)
treed3a63922d57081bca0521f16daecc8372787d397 /drivers/net/ethernet/sfc/tx.c
parent44a8c4f33c0073ca614db79f22e023811bdd0f3c (diff)
downloadlinux-1c0544d24927e4fad04f858216b8ea767a3bd123.tar.bz2
sfc: add and use efx_tx_send_pending in tx.c
Instead of using efx_tx_queue_partner(), which relies on the assumption that tx_queues_per_channel is 2, efx_tx_send_pending() iterates over txqs with efx_for_each_channel_tx_queue(). We unconditionally set tx_queue->xmit_pending (renamed from xmit_more_available), then condition on xmit_more for the call to efx_tx_send_pending(), which will clear xmit_pending. Thus, after an xmit_more TX, the doorbell is un-rung and xmit_pending is true. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/sfc/tx.c')
-rw-r--r--drivers/net/ethernet/sfc/tx.c59
1 files changed, 31 insertions, 28 deletions
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 727201d5eb24..c502d226371a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -268,6 +268,19 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
}
#endif /* EFX_USE_PIO */
+/* Send any pending traffic for a channel. xmit_more is shared across all
+ * queues for a channel, so we must check all of them.
+ */
+static void efx_tx_send_pending(struct efx_channel *channel)
+{
+ struct efx_tx_queue *q;
+
+ efx_for_each_channel_tx_queue(q, channel) {
+ if (q->xmit_pending)
+ efx_nic_push_buffers(q);
+ }
+}
+
/*
* Add a socket buffer to a TX queue
*
@@ -336,21 +349,11 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
efx_tx_maybe_stop_queue(tx_queue);
- /* Pass off to hardware */
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-
- /* There could be packets left on the partner queue if
- * xmit_more was set. If we do not push those they
- * could be left for a long time and cause a netdev watchdog.
- */
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
+ tx_queue->xmit_pending = true;
- efx_nic_push_buffers(tx_queue);
- } else {
- tx_queue->xmit_more_available = xmit_more;
- }
+ /* Pass off to hardware */
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
+ efx_tx_send_pending(tx_queue->channel);
if (segments) {
tx_queue->tso_bursts++;
@@ -371,14 +374,8 @@ err:
* on this queue or a partner queue then we need to push here to get the
* previous packets out.
*/
- if (!xmit_more) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
-
- efx_nic_push_buffers(tx_queue);
- }
+ if (!xmit_more)
+ efx_tx_send_pending(tx_queue->channel);
return NETDEV_TX_OK;
}
@@ -489,18 +486,24 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
- /* PTP "event" packet */
- if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
- unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
- return efx_ptp_tx(efx, skb);
- }
-
index = skb_get_queue_mapping(skb);
type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
if (index >= efx->n_tx_channels) {
index -= efx->n_tx_channels;
type |= EFX_TXQ_TYPE_HIGHPRI;
}
+
+ /* PTP "event" packet */
+ if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
+ unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ /* There may be existing transmits on the channel that are
+ * waiting for this packet to trigger the doorbell write.
+ * We need to send the packets at this point.
+ */
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
+ return efx_ptp_tx(efx, skb);
+ }
+
tx_queue = efx_get_tx_queue(efx, index, type);
return __efx_enqueue_skb(tx_queue, skb);