diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-08-15 10:40:26 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-08-23 13:10:43 -0600 |
commit | 906c6caaf586180261ea581915e1cf8bc466bd69 (patch) | |
tree | 362bec352f429aa6711795fa058fd1479b5f0cb8 | |
parent | 0756a8691017518ceeca4c083e7a359107186498 (diff) | |
download | linux-906c6caaf586180261ea581915e1cf8bc466bd69.tar.bz2 |
io_uring: optimise io_prep_linked_timeout()
Linked timeout handling during issuing is heavy, it adds extra
instructions and forces to save the next linked timeout before
io_issue_sqe().
Follwing the same reasoning as in refcounting patches, a request can't
be freed by the time it returns from io_issue_sqe(), so now we don't
need to do io_prep_linked_timeout() in advance, and it can be delayed to
colder paths optimising the generic path.
Also, it should also save quite a lot for requests with linked timeouts
and completed inline on timeout spinlocking + hrtimer_start() +
hrtimer_try_to_cancel() and so on.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/19bfc9a0d26c5c5f1e359f7650afe807ca8ef879.1628981736.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | fs/io_uring.c | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 8eea3a1e8c21..d78a05ecbf68 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1306,8 +1306,16 @@ static void io_req_track_inflight(struct io_kiocb *req) } } +static inline void io_unprep_linked_timeout(struct io_kiocb *req) +{ + req->flags &= ~REQ_F_LINK_TIMEOUT; +} + static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) { + if (WARN_ON_ONCE(!req->link)) + return NULL; + req->flags &= ~REQ_F_ARM_LTIMEOUT; req->flags |= REQ_F_LINK_TIMEOUT; @@ -1932,6 +1940,7 @@ static bool io_disarm_next(struct io_kiocb *req) if (req->flags & REQ_F_ARM_LTIMEOUT) { struct io_kiocb *link = req->link; + req->flags &= ~REQ_F_ARM_LTIMEOUT; if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { io_remove_next_linked(req); io_cqring_fill_event(link->ctx, link->user_data, @@ -6485,7 +6494,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req) static void __io_queue_sqe(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { - struct io_kiocb *linked_timeout = io_prep_linked_timeout(req); + struct io_kiocb *linked_timeout; int ret; issue_sqe: @@ -6503,10 +6512,19 @@ issue_sqe: state->compl_reqs[state->compl_nr++] = req; if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) io_submit_flush_completions(ctx); + return; } + + linked_timeout = io_prep_linked_timeout(req); + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { + linked_timeout = io_prep_linked_timeout(req); + switch (io_arm_poll_handler(req)) { case IO_APOLL_READY: + if (linked_timeout) + io_unprep_linked_timeout(req); goto issue_sqe; case IO_APOLL_ABORTED: /* @@ -6516,11 +6534,12 @@ issue_sqe: io_queue_async_work(req); break; } + + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); } else { io_req_complete_failed(req, ret); } - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); } static inline void io_queue_sqe(struct io_kiocb *req) |