summaryrefslogtreecommitdiffstats
path: root/drivers/virtio
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2015-12-17 12:20:39 +0200
committerMichael S. Tsirkin <mst@redhat.com>2016-01-12 20:47:02 +0200
commit788e5b3a5da24cc8d93ce2f7c6508181cd7d7fb6 (patch)
tree7e54e74c3f7373a04780ad14cb157089c76abeb2 /drivers/virtio
parent9e3f84ce416663c84a191cb3ead300fc1a4adadc (diff)
downloadlinux-788e5b3a5da24cc8d93ce2f7c6508181cd7d7fb6.tar.bz2
virtio_ring: use virt_store_mb
We need a full barrier after writing out event index, using virt_store_mb there seems better than open-coding. As usual, we need a wrapper to account for strong barriers. It's tempting to use this in vhost as well, for that, we'll need a variant of smp_store_mb that works on __user pointers. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_ring.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ee663c458b20..e12e385f7ac3 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -517,10 +517,10 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
/* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
* the read in the next get_buf call. */
- if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
- vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
- virtio_mb(vq->weak_barriers);
- }
+ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
+ virtio_store_mb(vq->weak_barriers,
+ &vring_used_event(&vq->vring),
+ cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
#ifdef DEBUG
vq->last_add_time_valid = false;
@@ -653,8 +653,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
}
/* TODO: tune this threshold */
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
- vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
- virtio_mb(vq->weak_barriers);
+
+ virtio_store_mb(vq->weak_barriers,
+ &vring_used_event(&vq->vring),
+ cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
+
if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;