diff options
author | Bijan Mottahedeh <bijan.mottahedeh@oracle.com> | 2020-04-03 13:51:33 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-04-03 14:55:39 -0600 |
commit | 581f981034890dfd27be7e98946e8f0461f3967a (patch) | |
tree | 6025e4ec59cc494b03333ab11c73224c6f26624f /fs/io_uring.c | |
parent | c336e992cb1cb1db9ee608dfb30342ae781057ab (diff) | |
download | linux-581f981034890dfd27be7e98946e8f0461f3967a.tar.bz2 |
io_uring: process requests completed with -EAGAIN on poll list
A request that completes with an -EAGAIN result after it has been added
to the poll list, will not be removed from that list in io_do_iopoll()
because the f_op->iopoll() will not succeed for that request.
Maintain a retryable local list similar to the done list, and explicity
reissue requests with an -EAGAIN result.
Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 29 |
1 files changed, 26 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index ce76157c2f95..78ae8e8ed5bf 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1744,11 +1744,24 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, io_free_req_many(ctx, &rb); } +static void io_iopoll_queue(struct list_head *again) +{ + struct io_kiocb *req; + + do { + req = list_first_entry(again, struct io_kiocb, list); + list_del(&req->list); + refcount_inc(&req->refs); + io_queue_async_work(req); + } while (!list_empty(again)); +} + static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { struct io_kiocb *req, *tmp; LIST_HEAD(done); + LIST_HEAD(again); bool spin; int ret; @@ -1763,9 +1776,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, struct kiocb *kiocb = &req->rw.kiocb; /* - * Move completed entries to our local list. If we find a - * request that requires polling, break out and complete - * the done list first, if we have entries there. + * Move completed and retryable entries to our local lists. + * If we find a request that requires polling, break out + * and complete those lists first, if we have entries there. */ if (req->flags & REQ_F_IOPOLL_COMPLETED) { list_move_tail(&req->list, &done); @@ -1774,6 +1787,13 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) break; + if (req->result == -EAGAIN) { + list_move_tail(&req->list, &again); + continue; + } + if (!list_empty(&again)) + break; + ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); if (ret < 0) break; @@ -1786,6 +1806,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) io_iopoll_complete(ctx, nr_events, &done); + if (!list_empty(&again)) + io_iopoll_queue(&again); + return ret; } |