diff options
author | Ezequiel Garcia <ezequiel.garcia@free-electrons.com> | 2014-05-19 13:59:54 -0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-22 14:57:15 -0400 |
commit | e19d2dda90c5305a201282317aa492ce0337aa62 (patch) | |
tree | 61a7187a57ec45a3f1245d4cfd3b7cff35b10081 | |
parent | 01ef26ca44fdfc04a611975f9abe34a04261f98e (diff) | |
download | linux-e19d2dda90c5305a201282317aa492ce0337aa62.tar.bz2 |
net: mvneta: Clean mvneta_tx() sk_buff handling
Rework mvneta_tx() so that the code that performs the final handling
before a sk_buff is transmitted is done only if the numbers of fragments
processed if positive.
This is preparation work to add the support for software TSO.
Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/marvell/mvneta.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ed7f924a41e2..85090291e210 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1584,7 +1584,6 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_desc *tx_desc; - struct netdev_queue *nq; int frags = 0; u32 tx_cmd; @@ -1592,7 +1591,6 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) goto out; frags = skb_shinfo(skb)->nr_frags + 1; - nq = netdev_get_tx_queue(dev, txq_id); /* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); @@ -1635,15 +1633,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) } } - txq->count += frags; - mvneta_txq_pend_desc_add(pp, txq, frags); - - if (txq->size - txq->count < MAX_SKB_FRAGS + 1) - netif_tx_stop_queue(nq); - out: if (frags > 0) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + txq->count += frags; + mvneta_txq_pend_desc_add(pp, txq, frags); + + if (txq->size - txq->count < MAX_SKB_FRAGS + 1) + netif_tx_stop_queue(nq); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; |