diff options
author | Willem de Bruijn <willemb@google.com> | 2017-04-24 13:49:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-04-24 23:55:19 -0400 |
commit | 7b0411ef4aa69c9256d6a2c289d0a2b320414633 (patch) | |
tree | 53dfadb8aa5536fb7029b405a1b854551a6d9502 /drivers | |
parent | ea7735d97ba9064c448664429e249991ccd8aa77 (diff) | |
download | linux-7b0411ef4aa69c9256d6a2c289d0a2b320414633.tar.bz2 |
virtio-net: clean tx descriptors from rx napi
Amortize the cost of virtual interrupts by doing both rx and tx work
on reception of a receive interrupt if tx napi is enabled. With
VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
interrupts for bidirectional workloads.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/virtio_net.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4ec79e5d7a86..9dd978f34c1f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1075,12 +1075,33 @@ static void free_old_xmit_skbs(struct send_queue *sq) u64_stats_update_end(&stats->tx_syncp); } +static void virtnet_poll_cleantx(struct receive_queue *rq) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + unsigned int index = vq2rxq(rq->vq); + struct send_queue *sq = &vi->sq[index]; + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); + + if (!sq->napi.weight) + return; + + if (__netif_tx_trylock(txq)) { + free_old_xmit_skbs(sq); + __netif_tx_unlock(txq); + } + + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + netif_tx_wake_queue(txq); +} + static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); unsigned int received; + virtnet_poll_cleantx(rq); + received = virtnet_receive(rq, budget); /* Out of packets? */ |