diff options
author | Stefano Garzarella <sgarzare@redhat.com> | 2020-04-24 17:08:30 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-04-27 10:18:01 -0700 |
commit | a78d163978567adc2733465289293dad479d842a (patch) | |
tree | c62b88df77025e92bf501e3e622de84970b81958 | |
parent | 107bc0766b9feb5113074c753735a3f115c2141f (diff) | |
download | linux-a78d163978567adc2733465289293dad479d842a.tar.bz2 |
vsock/virtio: fix multiple packet delivery to monitoring devices
In virtio_transport.c, if the virtqueue is full, the transmitting
packet is queued up and it will be sent in the next iteration.
This causes the same packet to be delivered multiple times to
monitoring devices.
We want to continue to deliver packets to monitoring devices before
it is put in the virtqueue, to avoid that replies can appear in the
packet capture before the transmitted packet.
This patch fixes the issue, adding a new flag (tap_delivered) in
struct virtio_vsock_pkt, to check if the packet is already delivered
to monitoring devices.
In vhost/vsock.c, we are splitting packets, so we must set
'tap_delivered' to false when we queue up the same virtio_vsock_pkt
to handle the remaining bytes.
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/vhost/vsock.c | 6 | ||||
-rw-r--r-- | include/linux/virtio_vsock.h | 1 | ||||
-rw-r--r-- | net/vmw_vsock/virtio_transport_common.c | 4 |
3 files changed, 11 insertions, 0 deletions
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 4f50dcb89ac8..31a98c74f678 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -196,6 +196,12 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, * to send it with the next available buffer. */ if (pkt->off < pkt->len) { + /* We are queueing the same virtio_vsock_pkt to handle + * the remaining bytes, and we want to deliver it + * to monitoring devices in the next iteration. + */ + pkt->tap_delivered = false; + spin_lock_bh(&vsock->send_pkt_list_lock); list_add(&pkt->list, &vsock->send_pkt_list); spin_unlock_bh(&vsock->send_pkt_list_lock); diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 71c81e0dc8f2..dc636b727179 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -48,6 +48,7 @@ struct virtio_vsock_pkt { u32 len; u32 off; bool reply; + bool tap_delivered; }; struct virtio_vsock_pkt_info { diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 709038a4783e..69efc891885f 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -157,7 +157,11 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt) { + if (pkt->tap_delivered) + return; + vsock_deliver_tap(virtio_transport_build_skb, pkt); + pkt->tap_delivered = true; } EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); |