From 6971253f078766543c716db708ba2c787826690d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 2 Dec 2022 17:47:23 +0000 Subject: io_uring: revise completion_lock locking io_kill_timeouts() doesn't post any events but queues everything to task_work. Locking there is needed for protecting linked requests traversing, we should grab completion_lock directly instead of using io_cq_[un]lock helpers. Same goes for __io_req_find_next_prep(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/88e75d481a65dc295cb59722bb1cf76402d1c06b.1670002973.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 16 ++++++++++++++-- io_uring/io_uring.h | 11 ----------- io_uring/timeout.c | 8 ++++++-- 3 files changed, 20 insertions(+), 15 deletions(-) (limited to 'io_uring') diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index b521186efa5c..698c54f951ea 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -597,6 +597,18 @@ static inline void __io_cq_unlock(struct io_ring_ctx *ctx) spin_unlock(&ctx->completion_lock); } +static inline void io_cq_lock(struct io_ring_ctx *ctx) + __acquires(ctx->completion_lock) +{ + spin_lock(&ctx->completion_lock); +} + +static inline void io_cq_unlock(struct io_ring_ctx *ctx) + __releases(ctx->completion_lock) +{ + spin_unlock(&ctx->completion_lock); +} + /* keep it inlined for io_submit_flush_completions() */ static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) __releases(ctx->completion_lock) @@ -1074,9 +1086,9 @@ static void __io_req_find_next_prep(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - io_cq_lock(ctx); + spin_lock(&ctx->completion_lock); io_disarm_next(req); - io_cq_unlock_post(ctx); + spin_unlock(&ctx->completion_lock); } static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 1b2f0b2cc888..c117e029c8dc 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -87,17 +87,6 @@ static inline void io_req_task_work_add(struct io_kiocb *req) #define io_for_each_link(pos, head) \ for (pos = (head); pos; pos = pos->link) -static inline void io_cq_lock(struct io_ring_ctx *ctx) - __acquires(ctx->completion_lock) -{ - spin_lock(&ctx->completion_lock); -} - -static inline void io_cq_unlock(struct io_ring_ctx *ctx) -{ - spin_unlock(&ctx->completion_lock); -} - void io_cq_unlock_post(struct io_ring_ctx *ctx); static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx, diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 4c6a5666541c..eae005b2d1d2 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -624,7 +624,11 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, struct io_timeout *timeout, *tmp; int canceled = 0; - io_cq_lock(ctx); + /* + * completion_lock is needed for io_match_task(). Take it before + * timeout_lockfirst to keep locking ordering. + */ + spin_lock(&ctx->completion_lock); spin_lock_irq(&ctx->timeout_lock); list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { struct io_kiocb *req = cmd_to_io_kiocb(timeout); @@ -634,6 +638,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, canceled++; } spin_unlock_irq(&ctx->timeout_lock); - io_cq_unlock_post(ctx); + spin_unlock(&ctx->completion_lock); return canceled != 0; } -- cgit v1.2.3