From 583863ed918136412ddf14de2e12534f17cfdc6f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 17 May 2020 09:20:00 -0600 Subject: io_uring: initialize ctx->sqo_wait earlier Ensure that ctx->sqo_wait is initialized as soon as the ctx is allocated, instead of deferring it to the offload setup. This fixes a syzbot reported lockdep complaint, which is really due to trying to wake_up on an uninitialized wait queue: RSP: 002b:00007fffb1fb9aa8 EFLAGS: 00000246 ORIG_RAX: 00000000000001a9 RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 0000000000441319 RDX: 0000000000000001 RSI: 0000000020000140 RDI: 000000000000047b RBP: 0000000000010475 R08: 0000000000000001 R09: 00000000004002c8 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000402260 R13: 00000000004022f0 R14: 0000000000000000 R15: 0000000000000000 INFO: trying to register non-static key. the code is fine but needs lockdep annotation. turning off the locking correctness validator. CPU: 1 PID: 7090 Comm: syz-executor222 Not tainted 5.7.0-rc1-next-20200415-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x188/0x20d lib/dump_stack.c:118 assign_lock_key kernel/locking/lockdep.c:913 [inline] register_lock_class+0x1664/0x1760 kernel/locking/lockdep.c:1225 __lock_acquire+0x104/0x4c50 kernel/locking/lockdep.c:4234 lock_acquire+0x1f2/0x8f0 kernel/locking/lockdep.c:4934 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0x8c/0xbf kernel/locking/spinlock.c:159 __wake_up_common_lock+0xb4/0x130 kernel/sched/wait.c:122 io_cqring_ev_posted+0xa5/0x1e0 fs/io_uring.c:1160 io_poll_remove_all fs/io_uring.c:4357 [inline] io_ring_ctx_wait_and_kill+0x2bc/0x5a0 fs/io_uring.c:7305 io_uring_create fs/io_uring.c:7843 [inline] io_uring_setup+0x115e/0x22b0 fs/io_uring.c:7870 do_syscall_64+0xf6/0x7d0 arch/x86/entry/common.c:295 entry_SYSCALL_64_after_hwframe+0x49/0xb3 RIP: 0033:0x441319 Code: e8 5c ae 02 00 48 83 c4 18 c3 0f 1f 80 00 00 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 bb 0a fc ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007fffb1fb9aa8 EFLAGS: 00000246 ORIG_RAX: 00000000000001a9 Reported-by: syzbot+8c91f5d054e998721c57@syzkaller.appspotmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 70ae7e840c85..79c90eb28c0d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -924,6 +924,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) goto err; ctx->flags = p->flags; + init_waitqueue_head(&ctx->sqo_wait); init_waitqueue_head(&ctx->cq_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); init_completion(&ctx->completions[0]); @@ -6837,7 +6838,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, { int ret; - init_waitqueue_head(&ctx->sqo_wait); mmgrab(current->mm); ctx->sqo_mm = current->mm; -- cgit v1.2.3 From 650b548129b60b0d23508351800108196f4aa89f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 May 2020 14:02:11 +0300 Subject: io_uring: don't prepare DRAIN reqs twice If req->io is not NULL, it's already prepared. Don't do it again, it's dangerous. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 79c90eb28c0d..51be07390634 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5014,12 +5014,13 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list)) return 0; - if (!req->io && io_alloc_async_ctx(req)) - return -EAGAIN; - - ret = io_req_defer_prep(req, sqe); - if (ret < 0) - return ret; + if (!req->io) { + if (io_alloc_async_ctx(req)) + return -EAGAIN; + ret = io_req_defer_prep(req, sqe); + if (ret < 0) + return ret; + } spin_lock_irq(&ctx->completion_lock); if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { -- cgit v1.2.3 From bd2ab18a1d6267446eae1b47dd839050452bdf7f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 May 2020 14:02:12 +0300 Subject: io_uring: fix FORCE_ASYNC req preparation As for other not inlined requests, alloc req->io for FORCE_ASYNC reqs, so they can be prepared properly. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 51be07390634..f18cd98be783 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5608,9 +5608,15 @@ fail_req: io_double_put_req(req); } } else if (req->flags & REQ_F_FORCE_ASYNC) { - ret = io_req_defer_prep(req, sqe); - if (unlikely(ret < 0)) - goto fail_req; + if (!req->io) { + ret = -EAGAIN; + if (io_alloc_async_ctx(req)) + goto fail_req; + ret = io_req_defer_prep(req, sqe); + if (unlikely(ret < 0)) + goto fail_req; + } + /* * Never try inline submit of IOSQE_ASYNC is set, go straight * to async execution. -- cgit v1.2.3 From 948a7749454b1712f1b2f2429f9493eb3e4a89b0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 17 May 2020 14:21:38 -0600 Subject: io_uring: remove dead check in io_splice() We checked for 'force_nonblock' higher up, so it's definitely false at this point. Kill the check, it's a remnant of when we tried to do inline splice without always punting to async context. Fixes: 2fb3e82284fc ("io_uring: punt splice async because of inode mutex") Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index f18cd98be783..ecfd7f054ef6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2772,11 +2772,8 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock) poff_in = (sp->off_in == -1) ? NULL : &sp->off_in; poff_out = (sp->off_out == -1) ? NULL : &sp->off_out; - if (sp->len) { + if (sp->len) ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); - if (force_nonblock && ret == -EAGAIN) - return -EAGAIN; - } io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); req->flags &= ~REQ_F_NEED_CLEANUP; -- cgit v1.2.3 From e3aabf9554fd04eb14cd44ae7583fc9d40edd250 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 May 2020 11:04:17 -0600 Subject: io_uring: cancel work if task_work_add() fails We currently move it to the io_wqe_manager for execution, but we cannot safely do so as we may lack some of the state to execute it out of context. As we cancel work anyway when the ring/task exits, just mark this request as canceled and io_async_task_func() will do the right thing. Fixes: aa96bf8a9ee3 ("io_uring: use io-wq manager as backup task if task is exiting") Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index ecfd7f054ef6..29aa53000def 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4135,12 +4135,14 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, req->result = mask; init_task_work(&req->task_work, func); /* - * If this fails, then the task is exiting. Punt to one of the io-wq - * threads to ensure the work gets run, we can't always rely on exit - * cancelation taking care of this. + * If this fails, then the task is exiting. When a task exits, the + * work gets canceled, so just cancel this request as well instead + * of executing it. We can't safely execute it anyway, as we may not + * have the needed state needed for it anyway. */ ret = task_work_add(tsk, &req->task_work, true); if (unlikely(ret)) { + WRITE_ONCE(poll->canceled, true); tsk = io_wq_get_task(req->ctx->io_wq); task_work_add(tsk, &req->task_work, true); } -- cgit v1.2.3 From 4f4eeba87cc731b200bff9372d14a80f5996b277 Mon Sep 17 00:00:00 2001 From: Bijan Mottahedeh Date: Tue, 19 May 2020 14:52:49 -0700 Subject: io_uring: don't use kiocb.private to store buf_index kiocb.private is used in iomap_dio_rw() so store buf_index separately. Signed-off-by: Bijan Mottahedeh Move 'buf_index' to a hole in io_kiocb. Signed-off-by: Jens Axboe --- fs/io_uring.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 29aa53000def..d43f7e98e07a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -619,6 +619,8 @@ struct io_kiocb { bool needs_fixed_file; u8 opcode; + u16 buf_index; + struct io_ring_ctx *ctx; struct list_head list; unsigned int flags; @@ -2101,9 +2103,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); - /* we own ->private, reuse it for the buffer index / buffer ID */ - req->rw.kiocb.private = (void *) (unsigned long) - READ_ONCE(sqe->buf_index); + req->buf_index = READ_ONCE(sqe->buf_index); return 0; } @@ -2146,7 +2146,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw, struct io_ring_ctx *ctx = req->ctx; size_t len = req->rw.len; struct io_mapped_ubuf *imu; - unsigned index, buf_index; + u16 index, buf_index; size_t offset; u64 buf_addr; @@ -2154,7 +2154,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw, if (unlikely(!ctx->user_bufs)) return -EFAULT; - buf_index = (unsigned long) req->rw.kiocb.private; + buf_index = req->buf_index; if (unlikely(buf_index >= ctx->nr_user_bufs)) return -EFAULT; @@ -2270,10 +2270,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, bool needs_lock) { struct io_buffer *kbuf; - int bgid; + u16 bgid; kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; - bgid = (int) (unsigned long) req->rw.kiocb.private; + bgid = req->buf_index; kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); if (IS_ERR(kbuf)) return kbuf; @@ -2364,7 +2364,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, } /* buffer index only valid with fixed read/write, or buffer select */ - if (req->rw.kiocb.private && !(req->flags & REQ_F_BUFFER_SELECT)) + if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) return -EINVAL; if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { -- cgit v1.2.3 From b532576ed39efe3b351ae8320b2ab67a4c4c3719 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 19 May 2020 21:20:27 -0600 Subject: io_uring: don't add non-IO requests to iopoll pending list We normally disable any commands that aren't specifically poll commands for a ring that is setup for polling, but we do allow buffer provide and remove commands to support buffer selection for polled IO. Once a request is issued, we add it to the poll list to poll for completion. But we should not do that for non-IO commands, as those request complete inline immediately and aren't pollable. If we do, we can leave requests on the iopoll list after they are freed. Fixes: ddf0322db79c ("io_uring: add IORING_OP_PROVIDE_BUFFERS") Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index d43f7e98e07a..f9f79ac5ac7b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5306,7 +5306,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret) return ret; - if (ctx->flags & IORING_SETUP_IOPOLL) { + /* If the op doesn't have a file, we're not polling for it */ + if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) { const bool in_async = io_wq_current_is_worker(); if (req->result == -EAGAIN) -- cgit v1.2.3 From d4ae271dfaae2a5f41c015f2f20d62a1deeec734 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Wed, 20 May 2020 21:24:35 +0800 Subject: io_uring: reset -EBUSY error when io sq thread is waken up In io_sq_thread(), currently if we get an -EBUSY error and go to sleep, we will won't clear it again, which will result in io_sq_thread() will never have a chance to submit sqes again. Below test program test.c can reveal this bug: int main(int argc, char *argv[]) { struct io_uring ring; int i, fd, ret; struct io_uring_sqe *sqe; struct io_uring_cqe *cqe; struct iovec *iovecs; void *buf; struct io_uring_params p; if (argc < 2) { printf("%s: file\n", argv[0]); return 1; } memset(&p, 0, sizeof(p)); p.flags = IORING_SETUP_SQPOLL; ret = io_uring_queue_init_params(4, &ring, &p); if (ret < 0) { fprintf(stderr, "queue_init: %s\n", strerror(-ret)); return 1; } fd = open(argv[1], O_RDONLY | O_DIRECT); if (fd < 0) { perror("open"); return 1; } iovecs = calloc(10, sizeof(struct iovec)); for (i = 0; i < 10; i++) { if (posix_memalign(&buf, 4096, 4096)) return 1; iovecs[i].iov_base = buf; iovecs[i].iov_len = 4096; } ret = io_uring_register_files(&ring, &fd, 1); if (ret < 0) { fprintf(stderr, "%s: register %d\n", __FUNCTION__, ret); return ret; } for (i = 0; i < 10; i++) { sqe = io_uring_get_sqe(&ring); if (!sqe) break; io_uring_prep_readv(sqe, 0, &iovecs[i], 1, 0); sqe->flags |= IOSQE_FIXED_FILE; ret = io_uring_submit(&ring); sleep(1); printf("submit %d\n", i); } for (i = 0; i < 10; i++) { io_uring_wait_cqe(&ring, &cqe); printf("receive: %d\n", i); if (cqe->res != 4096) { fprintf(stderr, "ret=%d, wanted 4096\n", cqe->res); ret = 1; } io_uring_cqe_seen(&ring, cqe); } close(fd); io_uring_queue_exit(&ring); return 0; } sudo ./test testfile above command will hang on the tenth request, to fix this bug, when io sq_thread is waken up, we reset the variable 'ret' to be zero. Suggested-by: Jens Axboe Signed-off-by: Xiaoguang Wang Signed-off-by: Jens Axboe --- fs/io_uring.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index f9f79ac5ac7b..bb25e3997d41 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6032,6 +6032,7 @@ static int io_sq_thread(void *data) finish_wait(&ctx->sqo_wait, &wait); ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; + ret = 0; continue; } finish_wait(&ctx->sqo_wait, &wait); -- cgit v1.2.3