summaryrefslogtreecommitdiffstats
path: root/drivers/vhost/net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r--drivers/vhost/net.c92
1 files changed, 42 insertions, 50 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 969a85960e9f..831eb4fd197d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -276,12 +276,12 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
* of used idx. Once lower device DMA done contiguously, we will signal KVM
* guest used idx.
*/
-static int vhost_zerocopy_signal_used(struct vhost_net *net,
- struct vhost_virtqueue *vq)
+static void vhost_zerocopy_signal_used(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
{
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
- int i;
+ int i, add;
int j = 0;
for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
@@ -289,15 +289,17 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net,
vhost_net_tx_err(net);
if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
- vhost_add_used_and_signal(vq->dev, vq,
- vq->heads[i].id, 0);
++j;
} else
break;
}
- if (j)
- nvq->done_idx = i;
- return j;
+ while (j) {
+ add = min(UIO_MAXIOV - nvq->done_idx, j);
+ vhost_add_used_and_signal_n(vq->dev, vq,
+ &vq->heads[nvq->done_idx], add);
+ nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
+ j -= add;
+ }
}
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
@@ -306,6 +308,11 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
struct vhost_virtqueue *vq = ubufs->vq;
int cnt = atomic_read(&ubufs->kref.refcount);
+ /* set len to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = success ?
+ VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+ vhost_net_ubuf_put(ubufs);
+
/*
* Trigger polling thread if guest stopped submitting new buffers:
* in this case, the refcount after decrement will eventually reach 1
@@ -316,10 +323,6 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
*/
if (cnt <= 2 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
- /* set len to mark this desc buffers done DMA */
- vq->heads[ubuf->desc].len = success ?
- VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_net_ubuf_put(ubufs);
}
/* Expects to be always run from workqueue - which acts as
@@ -360,6 +363,13 @@ static void handle_tx(struct vhost_net *net)
if (zcopy)
vhost_zerocopy_signal_used(net, vq);
+ /* If more outstanding DMAs, queue the work.
+ * Handle upend_idx wrap around
+ */
+ if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
+ % UIO_MAXIOV == nvq->done_idx))
+ break;
+
head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
@@ -369,17 +379,6 @@ static void handle_tx(struct vhost_net *net)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- int num_pends;
-
- /* If more outstanding DMAs, queue the work.
- * Handle upend_idx wrap around
- */
- num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
- (nvq->upend_idx - nvq->done_idx) :
- (nvq->upend_idx + UIO_MAXIOV -
- nvq->done_idx);
- if (unlikely(num_pends > VHOST_MAX_PEND))
- break;
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@ -402,43 +401,36 @@ static void handle_tx(struct vhost_net *net)
iov_length(nvq->hdr, s), hdr_size);
break;
}
- zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
- nvq->upend_idx != nvq->done_idx);
+
+ zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
+ && (nvq->upend_idx + 1) % UIO_MAXIOV !=
+ nvq->done_idx
+ && vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
+ struct ubuf_info *ubuf;
+ ubuf = nvq->ubuf_info + nvq->upend_idx;
+
vq->heads[nvq->upend_idx].id = head;
- if (!vhost_net_tx_select_zcopy(net) ||
- len < VHOST_GOODCOPY_LEN) {
- /* copy don't need to wait for DMA done */
- vq->heads[nvq->upend_idx].len =
- VHOST_DMA_DONE_LEN;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- ubufs = NULL;
- } else {
- struct ubuf_info *ubuf;
- ubuf = nvq->ubuf_info + nvq->upend_idx;
-
- vq->heads[nvq->upend_idx].len =
- VHOST_DMA_IN_PROGRESS;
- ubuf->callback = vhost_zerocopy_callback;
- ubuf->ctx = nvq->ubufs;
- ubuf->desc = nvq->upend_idx;
- msg.msg_control = ubuf;
- msg.msg_controllen = sizeof(ubuf);
- ubufs = nvq->ubufs;
- kref_get(&ubufs->kref);
- }
+ vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
+ ubuf->callback = vhost_zerocopy_callback;
+ ubuf->ctx = nvq->ubufs;
+ ubuf->desc = nvq->upend_idx;
+ msg.msg_control = ubuf;
+ msg.msg_controllen = sizeof(ubuf);
+ ubufs = nvq->ubufs;
+ kref_get(&ubufs->kref);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
- } else
+ } else {
msg.msg_control = NULL;
+ ubufs = NULL;
+ }
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- if (ubufs)
- vhost_net_ubuf_put(ubufs);
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}