summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-12-17 13:42:24 -0700
committerJens Axboe <axboe@kernel.dk>2022-12-17 20:35:54 -0700
commit35d90f95cfa773b7e3b1f57ba15ce06a470f354c (patch)
treeee753101bc7a1ef45e98c6bc4cf9ff1d3536f97c /io_uring
parent6434ec0186b80c734aa7a2acf95f75f5c6dd943b (diff)
downloadlinux-35d90f95cfa773b7e3b1f57ba15ce06a470f354c.tar.bz2
io_uring: include task_work run after scheduling in wait for events
It's quite possible that we got woken up because task_work was queued, and we need to process this task_work to generate the events waited for. If we return to the wait loop without running task_work, we'll end up adding the task to the waitqueue again, only to call io_cqring_wait_schedule() again which will run the task_work. This is less efficient than it could be, as it requires adding to the cq_wait queue again. It also triggers the wakeup path for completions as cq_wait is now non-empty with the task itself, and it'll require another lock grab and deletion to remove ourselves from the waitqueue. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 16a323a9ff70..ff2bbac1a10f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2481,7 +2481,14 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
}
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
return -ETIME;
- return 1;
+
+ /*
+ * Run task_work after scheduling. If we got woken because of
+ * task_work being processed, run it now rather than let the caller
+ * do another wait loop.
+ */
+ ret = io_run_task_work_sig(ctx);
+ return ret < 0 ? ret : 1;
}
/*
@@ -2546,6 +2553,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
+ if (__io_cqring_events_user(ctx) >= min_events)
+ break;
cond_resched();
} while (ret > 0);