diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-03-12 16:25:55 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-03-12 09:42:56 -0700 |
commit | 58f99373834151e1ca7edc49bc5578d9d40db099 (patch) | |
tree | cd211fc27afd2ab9be6ce2cded3b03147bdbd8f3 /fs | |
parent | 521d6a737a31c08dbab204a95cd4fb5bee725f0f (diff) | |
download | linux-58f99373834151e1ca7edc49bc5578d9d40db099.tar.bz2 |
io_uring: fix OP_ASYNC_CANCEL across tasks
IORING_OP_ASYNC_CANCEL tries io-wq cancellation only for current task.
If it fails go over tctx_list and try it out for every single tctx.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 41 |
1 files changed, 40 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 70286b393c0e..a4bce17af506 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5667,8 +5667,47 @@ static int io_async_cancel_prep(struct io_kiocb *req, static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; + u64 sqe_addr = req->cancel.addr; + struct io_tctx_node *node; + int ret; - io_async_find_and_cancel(ctx, req, req->cancel.addr, 0); + /* tasks should wait for their io-wq threads, so safe w/o sync */ + ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); + spin_lock_irq(&ctx->completion_lock); + if (ret != -ENOENT) + goto done; + ret = io_timeout_cancel(ctx, sqe_addr); + if (ret != -ENOENT) + goto done; + ret = io_poll_cancel(ctx, sqe_addr); + if (ret != -ENOENT) + goto done; + spin_unlock_irq(&ctx->completion_lock); + + /* slow path, try all io-wq's */ + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + ret = -ENOENT; + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { + struct io_uring_task *tctx = node->task->io_uring; + + if (!tctx || !tctx->io_wq) + continue; + ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); + if (ret != -ENOENT) + break; + } + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + + spin_lock_irq(&ctx->completion_lock); +done: + io_cqring_fill_event(req, ret); + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + io_cqring_ev_posted(ctx); + + if (ret < 0) + req_set_fail_links(req); + io_put_req(req); return 0; } |