summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-11-12 08:15:53 -0700
committerJens Axboe <axboe@kernel.dk>2019-11-12 12:26:34 -0700
commit7c9e7f0fe0d8abf856a957c150c48778e75154c1 (patch)
treeaeeb068ae9c6a2d240a2a0ad86a48af7582fcdf3
parent960e432dfa5927892a9b170d14de874597b84849 (diff)
downloadlinux-7c9e7f0fe0d8abf856a957c150c48778e75154c1.tar.bz2
io_uring: fix potential deadlock in io_poll_wake()
We attempt to run the poll completion inline, but we're using trylock to do so. This avoids a deadlock since we're grabbing the locks in reverse order at this point, we already hold the poll wq lock and we're trying to grab the completion lock, while the normal rules are the reverse of that order. IO completion for a timeout link will need to grab the completion lock, but that's not safe from this context. Put the completion under the completion_lock in io_poll_wake(), and mark the request as entering the completion with the completion_lock already held. Fixes: 2665abfd757f ("io_uring: add support for linked SQE timeouts") Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3c573f0578a8..247e5e1137a3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -341,6 +341,7 @@ struct io_kiocb {
#define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
#define REQ_F_INFLIGHT 8192 /* on inflight list */
+#define REQ_F_COMP_LOCKED 16384 /* completion under lock */
u64 user_data;
u32 result;
u32 sequence;
@@ -931,14 +932,15 @@ static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
*/
if (req->flags & REQ_F_FAIL_LINK) {
io_fail_links(req);
- } else if (req->flags & REQ_F_LINK_TIMEOUT) {
+ } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
+ REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
/*
* If this is a timeout link, we could be racing with the
* timeout timer. Grab the completion lock for this case to
- * protection against that.
+ * protect against that.
*/
spin_lock_irqsave(&ctx->completion_lock, flags);
io_req_link_next(req, nxt);
@@ -2064,13 +2066,20 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del_init(&poll->wait.entry);
+ /*
+ * Run completion inline if we can. We're using trylock here because
+ * we are violating the completion_lock -> poll wq lock ordering.
+ * If we have a link timeout we're going to need the completion_lock
+ * for finalizing the request, mark us as having grabbed that already.
+ */
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
list_del(&req->list);
io_poll_complete(req, mask);
+ req->flags |= REQ_F_COMP_LOCKED;
+ io_put_req(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
- io_put_req(req);
} else {
io_queue_async_work(req);
}