From d195a66e367b3d24fdd3c3565f37ab7c6882b9d2 Mon Sep 17 00:00:00 2001 From: Brian Gianforcaro Date: Fri, 13 Dec 2019 03:09:50 -0800 Subject: io_uring: fix stale comment and a few typos - Fix a few typos found while reading the code. - Fix stale io_get_sqring comment referencing s->sqe, the 's' parameter was renamed to 'req', but the comment still holds. Signed-off-by: Brian Gianforcaro Signed-off-by: Jens Axboe --- fs/io-wq.c | 2 +- fs/io_uring.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index 90c4978781fb..11e80b7252a8 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -948,7 +948,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, /* * Now check if a free (going busy) or busy worker has the work * currently running. If we find it there, we'll return CANCEL_RUNNING - * as an indication that we attempte to signal cancellation. The + * as an indication that we attempt to signal cancellation. The * completion will run normally in this case. */ rcu_read_lock(); diff --git a/fs/io_uring.c b/fs/io_uring.c index 9b1833fedc5c..04cff3870b3b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1178,7 +1178,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, } /* - * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a + * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a * non-spinning poll check - we'll still enter the driver poll loop, but only * as a non-spinning completion check. */ @@ -2573,7 +2573,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) /* * Adjust the reqs sequence before the current one because it - * will consume a slot in the cq_ring and the the cq_tail + * will consume a slot in the cq_ring and the cq_tail * pointer will be increased, otherwise other timeout reqs may * return in advance without waiting for enough wait_nr. */ @@ -3430,7 +3430,7 @@ static void io_commit_sqring(struct io_ring_ctx *ctx) } /* - * Fetch an sqe, if one is available. Note that s->sqe will point to memory + * Fetch an sqe, if one is available. Note that req->sqe will point to memory * that is mapped by userspace. This means that care needs to be taken to * ensure that reads are stable, as we cannot rely on userspace always * being a good citizen. If members of the sqe are validated and then later @@ -3694,7 +3694,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush) struct io_ring_ctx *ctx = iowq->ctx; /* - * Wake up if we have enough events, or if a timeout occured since we + * Wake up if we have enough events, or if a timeout occurred since we * started waiting. For timeouts, we always want to return to userspace, * regardless of event count. */ -- cgit v1.2.3 From 0b416c3e1345fd696db4c422643468d844410877 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 15 Dec 2019 10:57:46 -0700 Subject: io_uring: fix sporadic -EFAULT from IORING_OP_RECVMSG MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we have to punt the recvmsg to async context, we copy all the context. But since the iovec used can be either on-stack (if small) or dynamically allocated, if it's on-stack, then we need to ensure we reset the iov pointer. If we don't, then we're reusing old stack data, and that can lead to -EFAULTs if things get overwritten. Ensure we retain the right pointers for the iov, and free it as well if we end up having to go beyond UIO_FASTIOV number of vectors. Fixes: 03b1230ca12a ("io_uring: ensure async punted sendmsg/recvmsg requests copy data") Reported-by: 李通洲 Signed-off-by: Jens Axboe --- fs/io_uring.c | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 04cff3870b3b..0e01cdc8a120 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2041,6 +2041,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct io_kiocb **nxt, bool force_nonblock) { #if defined(CONFIG_NET) + struct io_async_msghdr *kmsg = NULL; struct socket *sock; int ret; @@ -2051,7 +2052,6 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (sock) { struct io_async_ctx io, *copy; struct sockaddr_storage addr; - struct msghdr *kmsg; unsigned flags; flags = READ_ONCE(sqe->msg_flags); @@ -2061,17 +2061,21 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, flags |= MSG_DONTWAIT; if (req->io) { - kmsg = &req->io->msg.msg; - kmsg->msg_name = &addr; + kmsg = &req->io->msg; + kmsg->msg.msg_name = &addr; + /* if iov is set, it's allocated already */ + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; } else { - kmsg = &io.msg.msg; - kmsg->msg_name = &addr; + kmsg = &io.msg; + kmsg->msg.msg_name = &addr; ret = io_sendmsg_prep(req, &io); if (ret) goto out; } - ret = __sys_sendmsg_sock(sock, kmsg, flags); + ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); if (force_nonblock && ret == -EAGAIN) { copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (!copy) { @@ -2082,13 +2086,15 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, req->io = copy; memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); req->sqe = &req->io->sqe; - return ret; + return -EAGAIN; } if (ret == -ERESTARTSYS) ret = -EINTR; } out: + if (kmsg && kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); @@ -2120,6 +2126,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct io_kiocb **nxt, bool force_nonblock) { #if defined(CONFIG_NET) + struct io_async_msghdr *kmsg = NULL; struct socket *sock; int ret; @@ -2131,7 +2138,6 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct user_msghdr __user *msg; struct io_async_ctx io, *copy; struct sockaddr_storage addr; - struct msghdr *kmsg; unsigned flags; flags = READ_ONCE(sqe->msg_flags); @@ -2143,17 +2149,21 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, msg = (struct user_msghdr __user *) (unsigned long) READ_ONCE(sqe->addr); if (req->io) { - kmsg = &req->io->msg.msg; - kmsg->msg_name = &addr; + kmsg = &req->io->msg; + kmsg->msg.msg_name = &addr; + /* if iov is set, it's allocated already */ + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; } else { - kmsg = &io.msg.msg; - kmsg->msg_name = &addr; + kmsg = &io.msg; + kmsg->msg.msg_name = &addr; ret = io_recvmsg_prep(req, &io); if (ret) goto out; } - ret = __sys_recvmsg_sock(sock, kmsg, msg, io.msg.uaddr, flags); + ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags); if (force_nonblock && ret == -EAGAIN) { copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (!copy) { @@ -2164,13 +2174,15 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, req->io = copy; memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); req->sqe = &req->io->sqe; - return ret; + return -EAGAIN; } if (ret == -ERESTARTSYS) ret = -EINTR; } out: + if (kmsg && kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); -- cgit v1.2.3 From 525b305d61ede489ce2118b000a5dabd6d869dac Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Dec 2019 14:13:37 -0700 Subject: io-wq: re-add io_wq_current_is_worker() This reverts commit 8cdda87a4414, we now have several use csaes for this helper. Reinstate it. Signed-off-by: Jens Axboe --- fs/io-wq.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.h b/fs/io-wq.h index fb993b2bd0ef..3f5e356de980 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -120,6 +120,10 @@ static inline void io_wq_worker_sleeping(struct task_struct *tsk) static inline void io_wq_worker_running(struct task_struct *tsk) { } -#endif /* CONFIG_IO_WQ */ +#endif -#endif /* INTERNAL_IO_WQ_H */ +static inline bool io_wq_current_is_worker(void) +{ + return in_task() && (current->flags & PF_IO_WORKER); +} +#endif -- cgit v1.2.3 From b7bb4f7da0a1a92f142697f1c9ce335e7a44f4b1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 15 Dec 2019 22:13:43 -0700 Subject: io_uring: fix pre-prepped issue with force_nonblock == true Some of these code paths assume that any force_nonblock == true issue is not prepped, but that's not true if we did prep as part of link setup earlier. Check if we already have an async context allocate before setting up a new one. Cleanup the async context setup in general, we have a lot of duplicated code there. Fixes: 03b1230ca12a ("io_uring: ensure async punted sendmsg/recvmsg requests copy data") Fixes: f67676d160c6 ("io_uring: ensure async punted read/write requests copy iovec") Signed-off-by: Jens Axboe --- fs/io_uring.c | 175 ++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 98 insertions(+), 77 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 0e01cdc8a120..582c7c19bdd7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1701,7 +1701,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, return ret; } -static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, +static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size, struct iovec *iovec, struct iovec *fast_iov, struct iov_iter *iter) { @@ -1715,19 +1715,39 @@ static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, } } -static int io_setup_async_io(struct io_kiocb *req, ssize_t io_size, - struct iovec *iovec, struct iovec *fast_iov, - struct iov_iter *iter) +static int io_alloc_async_ctx(struct io_kiocb *req) { req->io = kmalloc(sizeof(*req->io), GFP_KERNEL); if (req->io) { - io_req_map_io(req, io_size, iovec, fast_iov, iter); memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe)); req->sqe = &req->io->sqe; return 0; } - return -ENOMEM; + return 1; +} + +static void io_rw_async(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + struct iovec *iov = NULL; + + if (req->io->rw.iov != req->io->rw.fast_iov) + iov = req->io->rw.iov; + io_wq_submit_work(workptr); + kfree(iov); +} + +static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, + struct iovec *iovec, struct iovec *fast_iov, + struct iov_iter *iter) +{ + if (!req->io && io_alloc_async_ctx(req)) + return -ENOMEM; + + io_req_map_rw(req, io_size, iovec, fast_iov, iter); + req->work.func = io_rw_async; + return 0; } static int io_read_prep(struct io_kiocb *req, struct iovec **iovec, @@ -1806,7 +1826,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, kiocb_done(kiocb, ret2, nxt, req->in_async); } else { copy_iov: - ret = io_setup_async_io(req, io_size, iovec, + ret = io_setup_async_rw(req, io_size, iovec, inline_vecs, &iter); if (ret) goto out_free; @@ -1814,7 +1834,8 @@ copy_iov: } } out_free: - kfree(iovec); + if (!io_wq_current_is_worker()) + kfree(iovec); return ret; } @@ -1900,7 +1921,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, kiocb_done(kiocb, ret2, nxt, req->in_async); } else { copy_iov: - ret = io_setup_async_io(req, io_size, iovec, + ret = io_setup_async_rw(req, io_size, iovec, inline_vecs, &iter); if (ret) goto out_free; @@ -1908,7 +1929,8 @@ copy_iov: } } out_free: - kfree(iovec); + if (!io_wq_current_is_worker()) + kfree(iovec); return ret; } @@ -2021,6 +2043,19 @@ static int io_sync_file_range(struct io_kiocb *req, return 0; } +#if defined(CONFIG_NET) +static void io_sendrecv_async(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + struct iovec *iov = NULL; + + if (req->io->rw.iov != req->io->rw.fast_iov) + iov = req->io->msg.iov; + io_wq_submit_work(workptr); + kfree(iov); +} +#endif + static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io) { #if defined(CONFIG_NET) @@ -2050,7 +2085,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, sock = sock_from_file(req->file, &ret); if (sock) { - struct io_async_ctx io, *copy; + struct io_async_ctx io; struct sockaddr_storage addr; unsigned flags; @@ -2077,15 +2112,12 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); if (force_nonblock && ret == -EAGAIN) { - copy = kmalloc(sizeof(*copy), GFP_KERNEL); - if (!copy) { - ret = -ENOMEM; - goto out; - } - memcpy(©->msg, &io.msg, sizeof(copy->msg)); - req->io = copy; - memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); - req->sqe = &req->io->sqe; + if (req->io) + return -EAGAIN; + if (io_alloc_async_ctx(req)) + return -ENOMEM; + memcpy(&req->io->msg, &io.msg, sizeof(io.msg)); + req->work.func = io_sendrecv_async; return -EAGAIN; } if (ret == -ERESTARTSYS) @@ -2093,7 +2125,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, } out: - if (kmsg && kmsg->iov != kmsg->fast_iov) + if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov) kfree(kmsg->iov); io_cqring_add_event(req, ret); if (ret < 0) @@ -2136,7 +2168,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, sock = sock_from_file(req->file, &ret); if (sock) { struct user_msghdr __user *msg; - struct io_async_ctx io, *copy; + struct io_async_ctx io; struct sockaddr_storage addr; unsigned flags; @@ -2165,15 +2197,12 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags); if (force_nonblock && ret == -EAGAIN) { - copy = kmalloc(sizeof(*copy), GFP_KERNEL); - if (!copy) { - ret = -ENOMEM; - goto out; - } - memcpy(copy, &io, sizeof(*copy)); - req->io = copy; - memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); - req->sqe = &req->io->sqe; + if (req->io) + return -EAGAIN; + if (io_alloc_async_ctx(req)) + return -ENOMEM; + memcpy(&req->io->msg, &io.msg, sizeof(io.msg)); + req->work.func = io_sendrecv_async; return -EAGAIN; } if (ret == -ERESTARTSYS) @@ -2181,7 +2210,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, } out: - if (kmsg && kmsg->iov != kmsg->fast_iov) + if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov) kfree(kmsg->iov); io_cqring_add_event(req, ret); if (ret < 0) @@ -2272,15 +2301,13 @@ static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = __sys_connect_file(req->file, &io->connect.address, addr_len, file_flags); if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) { + if (req->io) + return -EAGAIN; + if (io_alloc_async_ctx(req)) { ret = -ENOMEM; goto out; } - memcpy(&io->connect, &__io.connect, sizeof(io->connect)); - req->io = io; - memcpy(&io->sqe, req->sqe, sizeof(*req->sqe)); - req->sqe = &io->sqe; + memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect)); return -EAGAIN; } if (ret == -ERESTARTSYS) @@ -2511,7 +2538,6 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (!poll->file) return -EBADF; - req->io = NULL; INIT_IO_WORK(&req->work, io_poll_complete_work); events = READ_ONCE(sqe->poll_events); poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; @@ -2692,7 +2718,6 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io, data->mode = HRTIMER_MODE_REL; hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); - req->io = io; return 0; } @@ -2701,22 +2726,16 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) unsigned count; struct io_ring_ctx *ctx = req->ctx; struct io_timeout_data *data; - struct io_async_ctx *io; struct list_head *entry; unsigned span = 0; + int ret; - io = req->io; - if (!io) { - int ret; - - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) + if (!req->io) { + if (io_alloc_async_ctx(req)) return -ENOMEM; - ret = io_timeout_prep(req, io, false); - if (ret) { - kfree(io); + ret = io_timeout_prep(req, req->io, false); + if (ret) return ret; - } } data = &req->io->timeout; @@ -2858,23 +2877,35 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, return 0; } -static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) +static int io_req_defer_prep(struct io_kiocb *req) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; + struct io_async_ctx *io = req->io; struct iov_iter iter; ssize_t ret; - memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); - req->sqe = &io->sqe; - switch (io->sqe.opcode) { case IORING_OP_READV: case IORING_OP_READ_FIXED: + /* ensure prep does right import */ + req->io = NULL; ret = io_read_prep(req, &iovec, &iter, true); + req->io = io; + if (ret < 0) + break; + io_req_map_rw(req, ret, iovec, inline_vecs, &iter); + ret = 0; break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: + /* ensure prep does right import */ + req->io = NULL; ret = io_write_prep(req, &iovec, &iter, true); + req->io = io; + if (ret < 0) + break; + io_req_map_rw(req, ret, iovec, inline_vecs, &iter); + ret = 0; break; case IORING_OP_SENDMSG: ret = io_sendmsg_prep(req, io); @@ -2886,41 +2917,34 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) ret = io_connect_prep(req, io); break; case IORING_OP_TIMEOUT: - return io_timeout_prep(req, io, false); + ret = io_timeout_prep(req, io, false); + break; case IORING_OP_LINK_TIMEOUT: - return io_timeout_prep(req, io, true); + ret = io_timeout_prep(req, io, true); + break; default: - req->io = io; - return 0; + ret = 0; + break; } - if (ret < 0) - return ret; - - req->io = io; - io_req_map_io(req, ret, iovec, inline_vecs, &iter); - return 0; + return ret; } static int io_req_defer(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct io_async_ctx *io; int ret; /* Still need defer if there is pending req in defer list. */ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) return 0; - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) + if (io_alloc_async_ctx(req)) return -EAGAIN; - ret = io_req_defer_prep(req, io); - if (ret < 0) { - kfree(io); + ret = io_req_defer_prep(req); + if (ret < 0) return ret; - } spin_lock_irq(&ctx->completion_lock); if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { @@ -3366,7 +3390,6 @@ err_req: */ if (*link) { struct io_kiocb *prev = *link; - struct io_async_ctx *io; if (req->sqe->flags & IOSQE_IO_DRAIN) (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN; @@ -3374,15 +3397,13 @@ err_req: if (req->sqe->flags & IOSQE_IO_HARDLINK) req->flags |= REQ_F_HARDLINK; - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) { + if (io_alloc_async_ctx(req)) { ret = -EAGAIN; goto err_req; } - ret = io_req_defer_prep(req, io); + ret = io_req_defer_prep(req); if (ret) { - kfree(io); /* fail even hard links since we don't submit */ prev->flags |= REQ_F_FAIL_LINK; goto err_req; -- cgit v1.2.3 From fc4df999e24fc3006441acd4ce6250e6a76ac851 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 10 Dec 2019 14:38:45 -0700 Subject: io_uring: remove 'sqe' parameter to the OP helpers that take it We pass in req->sqe for all of them, no need to pass it in as the request is always passed in. This is a necessary prep patch to be able to cleanup/fix the request prep path. Signed-off-by: Jens Axboe --- fs/io_uring.c | 80 ++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 582c7c19bdd7..0298dd0abac0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1949,8 +1949,9 @@ static int io_nop(struct io_kiocb *req) return 0; } -static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_prep_fsync(struct io_kiocb *req) { + const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; if (!req->file) @@ -1964,9 +1965,10 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { + const struct io_uring_sqe *sqe = req->sqe; loff_t sqe_off = READ_ONCE(sqe->off); loff_t sqe_len = READ_ONCE(sqe->len); loff_t end = sqe_off + sqe_len; @@ -1977,7 +1979,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC)) return -EINVAL; - ret = io_prep_fsync(req, sqe); + ret = io_prep_fsync(req); if (ret) return ret; @@ -1996,8 +1998,9 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, return 0; } -static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_prep_sfr(struct io_kiocb *req) { + const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; int ret = 0; @@ -2012,17 +2015,16 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) return ret; } -static int io_sync_file_range(struct io_kiocb *req, - const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, +static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { + const struct io_uring_sqe *sqe = req->sqe; loff_t sqe_off; loff_t sqe_len; unsigned flags; int ret; - ret = io_prep_sfr(req, sqe); + ret = io_prep_sfr(req); if (ret) return ret; @@ -2072,10 +2074,11 @@ static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io) #endif } -static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { #if defined(CONFIG_NET) + const struct io_uring_sqe *sqe = req->sqe; struct io_async_msghdr *kmsg = NULL; struct socket *sock; int ret; @@ -2154,10 +2157,11 @@ static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io) #endif } -static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { #if defined(CONFIG_NET) + const struct io_uring_sqe *sqe = req->sqe; struct io_async_msghdr *kmsg = NULL; struct socket *sock; int ret; @@ -2222,10 +2226,11 @@ out: #endif } -static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { #if defined(CONFIG_NET) + const struct io_uring_sqe *sqe = req->sqe; struct sockaddr __user *addr; int __user *addr_len; unsigned file_flags; @@ -2273,10 +2278,11 @@ static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io) #endif } -static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { #if defined(CONFIG_NET) + const struct io_uring_sqe *sqe = req->sqe; struct io_async_ctx __io, *io; unsigned file_flags; int addr_len, ret; @@ -2374,8 +2380,9 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) * Find a running poll command that matches one specified in sqe->addr, * and remove it if found. */ -static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_poll_remove(struct io_kiocb *req) { + const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; int ret; @@ -2521,9 +2528,9 @@ static void io_poll_req_insert(struct io_kiocb *req) hlist_add_head(&req->hash_node, list); } -static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt) +static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt) { + const struct io_uring_sqe *sqe = req->sqe; struct io_poll_iocb *poll = &req->poll; struct io_ring_ctx *ctx = req->ctx; struct io_poll_table ipt; @@ -2660,9 +2667,9 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) /* * Remove or update an existing timeout command */ -static int io_timeout_remove(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static int io_timeout_remove(struct io_kiocb *req) { + const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; unsigned flags; int ret; @@ -2721,8 +2728,9 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io, return 0; } -static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_timeout(struct io_kiocb *req) { + const struct io_uring_sqe *sqe = req->sqe; unsigned count; struct io_ring_ctx *ctx = req->ctx; struct io_timeout_data *data; @@ -2862,9 +2870,9 @@ done: io_put_req_find_next(req, nxt); } -static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt) +static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt) { + const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) @@ -2987,37 +2995,37 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt, ret = io_write(req, nxt, force_nonblock); break; case IORING_OP_FSYNC: - ret = io_fsync(req, req->sqe, nxt, force_nonblock); + ret = io_fsync(req, nxt, force_nonblock); break; case IORING_OP_POLL_ADD: - ret = io_poll_add(req, req->sqe, nxt); + ret = io_poll_add(req, nxt); break; case IORING_OP_POLL_REMOVE: - ret = io_poll_remove(req, req->sqe); + ret = io_poll_remove(req); break; case IORING_OP_SYNC_FILE_RANGE: - ret = io_sync_file_range(req, req->sqe, nxt, force_nonblock); + ret = io_sync_file_range(req, nxt, force_nonblock); break; case IORING_OP_SENDMSG: - ret = io_sendmsg(req, req->sqe, nxt, force_nonblock); + ret = io_sendmsg(req, nxt, force_nonblock); break; case IORING_OP_RECVMSG: - ret = io_recvmsg(req, req->sqe, nxt, force_nonblock); + ret = io_recvmsg(req, nxt, force_nonblock); break; case IORING_OP_TIMEOUT: - ret = io_timeout(req, req->sqe); + ret = io_timeout(req); break; case IORING_OP_TIMEOUT_REMOVE: - ret = io_timeout_remove(req, req->sqe); + ret = io_timeout_remove(req); break; case IORING_OP_ACCEPT: - ret = io_accept(req, req->sqe, nxt, force_nonblock); + ret = io_accept(req, nxt, force_nonblock); break; case IORING_OP_CONNECT: - ret = io_connect(req, req->sqe, nxt, force_nonblock); + ret = io_connect(req, nxt, force_nonblock); break; case IORING_OP_ASYNC_CANCEL: - ret = io_async_cancel(req, req->sqe, nxt); + ret = io_async_cancel(req, nxt); break; default: ret = -EINVAL; -- cgit v1.2.3 From 8ed8d3c3bc32bf5b442c9f54013b4a47d5cae740 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 16 Dec 2019 11:55:28 -0700 Subject: io_uring: any deferred command must have stable sqe data We're currently not retaining sqe data for accept, fsync, and sync_file_range. None of these commands need data outside of what is directly provided, hence it can't go stale when the request is deferred. However, it can get reused, if an application reuses SQE entries. Ensure that we retain the information we need and only read the sqe contents once, off the submission path. Most of this is just moving code into a prep and finish function. Signed-off-by: Jens Axboe --- fs/io_uring.c | 221 +++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 172 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 0298dd0abac0..67e1758bc937 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -304,6 +304,20 @@ struct io_timeout_data { u32 seq_offset; }; +struct io_accept { + struct file *file; + struct sockaddr __user *addr; + int __user *addr_len; + int flags; +}; + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -343,6 +357,8 @@ struct io_kiocb { struct file *file; struct kiocb rw; struct io_poll_iocb poll; + struct io_accept accept; + struct io_sync sync; }; const struct io_uring_sqe *sqe; @@ -378,6 +394,7 @@ struct io_kiocb { #define REQ_F_INFLIGHT 16384 /* on inflight list */ #define REQ_F_COMP_LOCKED 32768 /* completion under lock */ #define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */ +#define REQ_F_PREPPED 131072 /* request already opcode prepared */ u64 user_data; u32 result; u32 sequence; @@ -1954,6 +1971,8 @@ static int io_prep_fsync(struct io_kiocb *req) const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; + if (req->flags & REQ_F_PREPPED) + return 0; if (!req->file) return -EBADF; @@ -1962,39 +1981,70 @@ static int io_prep_fsync(struct io_kiocb *req) if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) return -EINVAL; + req->sync.flags = READ_ONCE(sqe->fsync_flags); + if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) + return -EINVAL; + + req->sync.off = READ_ONCE(sqe->off); + req->sync.len = READ_ONCE(sqe->len); + req->flags |= REQ_F_PREPPED; return 0; } +static bool io_req_cancelled(struct io_kiocb *req) +{ + if (req->work.flags & IO_WQ_WORK_CANCEL) { + req_set_fail_links(req); + io_cqring_add_event(req, -ECANCELED); + io_put_req(req); + return true; + } + + return false; +} + +static void io_fsync_finish(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + loff_t end = req->sync.off + req->sync.len; + struct io_kiocb *nxt = NULL; + int ret; + + if (io_req_cancelled(req)) + return; + + ret = vfs_fsync_range(req->rw.ki_filp, req->sync.off, + end > 0 ? end : LLONG_MAX, + req->sync.flags & IORING_FSYNC_DATASYNC); + if (ret < 0) + req_set_fail_links(req); + io_cqring_add_event(req, ret); + io_put_req_find_next(req, &nxt); + if (nxt) + *workptr = &nxt->work; +} + static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { - const struct io_uring_sqe *sqe = req->sqe; - loff_t sqe_off = READ_ONCE(sqe->off); - loff_t sqe_len = READ_ONCE(sqe->len); - loff_t end = sqe_off + sqe_len; - unsigned fsync_flags; + struct io_wq_work *work, *old_work; int ret; - fsync_flags = READ_ONCE(sqe->fsync_flags); - if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC)) - return -EINVAL; - ret = io_prep_fsync(req); if (ret) return ret; /* fsync always requires a blocking context */ - if (force_nonblock) + if (force_nonblock) { + io_put_req(req); + req->work.func = io_fsync_finish; return -EAGAIN; + } - ret = vfs_fsync_range(req->rw.ki_filp, sqe_off, - end > 0 ? end : LLONG_MAX, - fsync_flags & IORING_FSYNC_DATASYNC); - - if (ret < 0) - req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req_find_next(req, nxt); + work = old_work = &req->work; + io_fsync_finish(&work); + if (work && work != old_work) + *nxt = container_of(work, struct io_kiocb, work); return 0; } @@ -2002,8 +2052,9 @@ static int io_prep_sfr(struct io_kiocb *req) { const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; - int ret = 0; + if (req->flags & REQ_F_PREPPED) + return 0; if (!req->file) return -EBADF; @@ -2012,16 +2063,36 @@ static int io_prep_sfr(struct io_kiocb *req) if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) return -EINVAL; - return ret; + req->sync.off = READ_ONCE(sqe->off); + req->sync.len = READ_ONCE(sqe->len); + req->sync.flags = READ_ONCE(sqe->sync_range_flags); + req->flags |= REQ_F_PREPPED; + return 0; +} + +static void io_sync_file_range_finish(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + struct io_kiocb *nxt = NULL; + int ret; + + if (io_req_cancelled(req)) + return; + + ret = sync_file_range(req->rw.ki_filp, req->sync.off, req->sync.len, + req->sync.flags); + if (ret < 0) + req_set_fail_links(req); + io_cqring_add_event(req, ret); + io_put_req_find_next(req, &nxt); + if (nxt) + *workptr = &nxt->work; } static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { - const struct io_uring_sqe *sqe = req->sqe; - loff_t sqe_off; - loff_t sqe_len; - unsigned flags; + struct io_wq_work *work, *old_work; int ret; ret = io_prep_sfr(req); @@ -2029,19 +2100,16 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt, return ret; /* sync_file_range always requires a blocking context */ - if (force_nonblock) + if (force_nonblock) { + io_put_req(req); + req->work.func = io_sync_file_range_finish; return -EAGAIN; + } - sqe_off = READ_ONCE(sqe->off); - sqe_len = READ_ONCE(sqe->len); - flags = READ_ONCE(sqe->sync_range_flags); - - ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags); - - if (ret < 0) - req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req_find_next(req, nxt); + work = old_work = &req->work; + io_sync_file_range_finish(&work); + if (work && work != old_work) + *nxt = container_of(work, struct io_kiocb, work); return 0; } @@ -2226,31 +2294,44 @@ out: #endif } -static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt, - bool force_nonblock) +static int io_accept_prep(struct io_kiocb *req) { #if defined(CONFIG_NET) const struct io_uring_sqe *sqe = req->sqe; - struct sockaddr __user *addr; - int __user *addr_len; - unsigned file_flags; - int flags, ret; + struct io_accept *accept = &req->accept; + + if (req->flags & REQ_F_PREPPED) + return 0; if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) return -EINVAL; if (sqe->ioprio || sqe->len || sqe->buf_index) return -EINVAL; - addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr); - addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2); - flags = READ_ONCE(sqe->accept_flags); - file_flags = force_nonblock ? O_NONBLOCK : 0; + accept->addr = (struct sockaddr __user *) + (unsigned long) READ_ONCE(sqe->addr); + accept->addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2); + accept->flags = READ_ONCE(sqe->accept_flags); + req->flags |= REQ_F_PREPPED; + return 0; +#else + return -EOPNOTSUPP; +#endif +} - ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags); - if (ret == -EAGAIN && force_nonblock) { - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; +#if defined(CONFIG_NET) +static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) +{ + struct io_accept *accept = &req->accept; + unsigned file_flags; + int ret; + + file_flags = force_nonblock ? O_NONBLOCK : 0; + ret = __sys_accept4_file(req->file, file_flags, accept->addr, + accept->addr_len, accept->flags); + if (ret == -EAGAIN && force_nonblock) return -EAGAIN; - } if (ret == -ERESTARTSYS) ret = -EINTR; if (ret < 0) @@ -2258,6 +2339,39 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt, io_cqring_add_event(req, ret); io_put_req_find_next(req, nxt); return 0; +} + +static void io_accept_finish(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + struct io_kiocb *nxt = NULL; + + if (io_req_cancelled(req)) + return; + __io_accept(req, &nxt, false); + if (nxt) + *workptr = &nxt->work; +} +#endif + +static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) +{ +#if defined(CONFIG_NET) + int ret; + + ret = io_accept_prep(req); + if (ret) + return ret; + + ret = __io_accept(req, nxt, force_nonblock); + if (ret == -EAGAIN && force_nonblock) { + req->work.func = io_accept_finish; + req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + io_put_req(req); + return -EAGAIN; + } + return 0; #else return -EOPNOTSUPP; #endif @@ -2915,6 +3029,12 @@ static int io_req_defer_prep(struct io_kiocb *req) io_req_map_rw(req, ret, iovec, inline_vecs, &iter); ret = 0; break; + case IORING_OP_FSYNC: + ret = io_prep_fsync(req); + break; + case IORING_OP_SYNC_FILE_RANGE: + ret = io_prep_sfr(req); + break; case IORING_OP_SENDMSG: ret = io_sendmsg_prep(req, io); break; @@ -2930,6 +3050,9 @@ static int io_req_defer_prep(struct io_kiocb *req) case IORING_OP_LINK_TIMEOUT: ret = io_timeout_prep(req, io, true); break; + case IORING_OP_ACCEPT: + ret = io_accept_prep(req); + break; default: ret = 0; break; -- cgit v1.2.3 From ffbb8d6b76910d4f3a2bafeaf68c419011e98d05 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 17 Dec 2019 20:57:05 +0300 Subject: io_uring: make HARDLINK imply LINK The rules are as follows, if IOSQE_IO_HARDLINK is specified, then it's a link and there is no need to set IOSQE_IO_LINK separately, though it could be there. Add proper check and ensure that IOSQE_IO_HARDLINK implies IOSQE_IO_LINK. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 67e1758bc937..b476bd304045 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3698,7 +3698,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, * If previous wasn't linked and we have a linked command, * that's the end of the chain. Submit the previous link. */ - if (!(sqe_flags & IOSQE_IO_LINK) && link) { + if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) && link) { io_queue_link_head(link); link = NULL; } -- cgit v1.2.3 From 0969e783e3a8913f79df27286501a6c21e961524 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Dec 2019 18:40:57 -0700 Subject: io_uring: make IORING_POLL_ADD and IORING_POLL_REMOVE deferrable If we defer these commands as part of a link, we have to make sure that the SQE data has been read upfront. Integrate the poll add/remove into the prep handling to make it safe for SQE reuse. Signed-off-by: Jens Axboe --- fs/io_uring.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 54 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index b476bd304045..b0411406c50a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -289,7 +289,10 @@ struct io_ring_ctx { */ struct io_poll_iocb { struct file *file; - struct wait_queue_head *head; + union { + struct wait_queue_head *head; + u64 addr; + }; __poll_t events; bool done; bool canceled; @@ -2490,24 +2493,40 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) return -ENOENT; } +static int io_poll_remove_prep(struct io_kiocb *req) +{ + const struct io_uring_sqe *sqe = req->sqe; + + if (req->flags & REQ_F_PREPPED) + return 0; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || + sqe->poll_events) + return -EINVAL; + + req->poll.addr = READ_ONCE(sqe->addr); + req->flags |= REQ_F_PREPPED; + return 0; +} + /* * Find a running poll command that matches one specified in sqe->addr, * and remove it if found. */ static int io_poll_remove(struct io_kiocb *req) { - const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; + u64 addr; int ret; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || - sqe->poll_events) - return -EINVAL; + ret = io_poll_remove_prep(req); + if (ret) + return ret; + addr = req->poll.addr; spin_lock_irq(&ctx->completion_lock); - ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr)); + ret = io_poll_cancel(ctx, addr); spin_unlock_irq(&ctx->completion_lock); io_cqring_add_event(req, ret); @@ -2642,16 +2661,14 @@ static void io_poll_req_insert(struct io_kiocb *req) hlist_add_head(&req->hash_node, list); } -static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt) +static int io_poll_add_prep(struct io_kiocb *req) { const struct io_uring_sqe *sqe = req->sqe; struct io_poll_iocb *poll = &req->poll; - struct io_ring_ctx *ctx = req->ctx; - struct io_poll_table ipt; - bool cancel = false; - __poll_t mask; u16 events; + if (req->flags & REQ_F_PREPPED) + return 0; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) @@ -2659,9 +2676,26 @@ static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt) if (!poll->file) return -EBADF; - INIT_IO_WORK(&req->work, io_poll_complete_work); + req->flags |= REQ_F_PREPPED; events = READ_ONCE(sqe->poll_events); poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; + return 0; +} + +static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt) +{ + struct io_poll_iocb *poll = &req->poll; + struct io_ring_ctx *ctx = req->ctx; + struct io_poll_table ipt; + bool cancel = false; + __poll_t mask; + int ret; + + ret = io_poll_add_prep(req); + if (ret) + return ret; + + INIT_IO_WORK(&req->work, io_poll_complete_work); INIT_HLIST_NODE(&req->hash_node); poll->head = NULL; @@ -3029,6 +3063,12 @@ static int io_req_defer_prep(struct io_kiocb *req) io_req_map_rw(req, ret, iovec, inline_vecs, &iter); ret = 0; break; + case IORING_OP_POLL_ADD: + ret = io_poll_add_prep(req); + break; + case IORING_OP_POLL_REMOVE: + ret = io_poll_remove_prep(req); + break; case IORING_OP_FSYNC: ret = io_prep_fsync(req); break; -- cgit v1.2.3 From fbf23849b1724d3ea362e346d0877a8d87978fe6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Dec 2019 18:45:56 -0700 Subject: io_uring: make IORING_OP_CANCEL_ASYNC deferrable If we defer this command as part of a link, we have to make sure that the SQE data has been read upfront. Integrate the async cancel op into the prep handling to make it safe for SQE reuse. Signed-off-by: Jens Axboe --- fs/io_uring.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index b0411406c50a..1d6a5083f37f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -321,6 +321,11 @@ struct io_sync { int flags; }; +struct io_cancel { + struct file *file; + u64 addr; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -362,6 +367,7 @@ struct io_kiocb { struct io_poll_iocb poll; struct io_accept accept; struct io_sync sync; + struct io_cancel cancel; }; const struct io_uring_sqe *sqe; @@ -3018,18 +3024,33 @@ done: io_put_req_find_next(req, nxt); } -static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt) +static int io_async_cancel_prep(struct io_kiocb *req) { const struct io_uring_sqe *sqe = req->sqe; - struct io_ring_ctx *ctx = req->ctx; - if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) + if (req->flags & REQ_F_PREPPED) + return 0; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags) return -EINVAL; - io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0); + req->flags |= REQ_F_PREPPED; + req->cancel.addr = READ_ONCE(sqe->addr); + return 0; +} + +static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + ret = io_async_cancel_prep(req); + if (ret) + return ret; + + io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0); return 0; } @@ -3087,6 +3108,9 @@ static int io_req_defer_prep(struct io_kiocb *req) case IORING_OP_TIMEOUT: ret = io_timeout_prep(req, io, false); break; + case IORING_OP_ASYNC_CANCEL: + ret = io_async_cancel_prep(req); + break; case IORING_OP_LINK_TIMEOUT: ret = io_timeout_prep(req, io, true); break; -- cgit v1.2.3 From b29472ee7b53784f44011069fad15e539fd25bcf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Dec 2019 18:50:29 -0700 Subject: io_uring: make IORING_OP_TIMEOUT_REMOVE deferrable If we defer this command as part of a link, we have to make sure that the SQE data has been read upfront. Integrate the timeout remove op into the prep handling to make it safe for SQE reuse. Signed-off-by: Jens Axboe --- fs/io_uring.c | 44 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 1d6a5083f37f..9d4f8274ee1e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -326,6 +326,12 @@ struct io_cancel { u64 addr; }; +struct io_timeout { + struct file *file; + u64 addr; + int flags; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -368,6 +374,7 @@ struct io_kiocb { struct io_accept accept; struct io_sync sync; struct io_cancel cancel; + struct io_timeout timeout; }; const struct io_uring_sqe *sqe; @@ -2818,26 +2825,40 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) return 0; } +static int io_timeout_remove_prep(struct io_kiocb *req) +{ + const struct io_uring_sqe *sqe = req->sqe; + + if (req->flags & REQ_F_PREPPED) + return 0; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) + return -EINVAL; + + req->timeout.addr = READ_ONCE(sqe->addr); + req->timeout.flags = READ_ONCE(sqe->timeout_flags); + if (req->timeout.flags) + return -EINVAL; + + req->flags |= REQ_F_PREPPED; + return 0; +} + /* * Remove or update an existing timeout command */ static int io_timeout_remove(struct io_kiocb *req) { - const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; - unsigned flags; int ret; - if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) - return -EINVAL; - flags = READ_ONCE(sqe->timeout_flags); - if (flags) - return -EINVAL; + ret = io_timeout_remove_prep(req); + if (ret) + return ret; spin_lock_irq(&ctx->completion_lock); - ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr)); + ret = io_timeout_cancel(ctx, req->timeout.addr); io_cqring_fill_event(req, ret); io_commit_cqring(ctx); @@ -3108,6 +3129,9 @@ static int io_req_defer_prep(struct io_kiocb *req) case IORING_OP_TIMEOUT: ret = io_timeout_prep(req, io, false); break; + case IORING_OP_TIMEOUT_REMOVE: + ret = io_timeout_remove_prep(req); + break; case IORING_OP_ASYNC_CANCEL: ret = io_async_cancel_prep(req); break; -- cgit v1.2.3 From d625c6ee4975000140c57da7e1ff244efefde274 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Dec 2019 19:53:05 -0700 Subject: io_uring: read opcode and user_data from SQE exactly once If we defer a request, we can't be reading the opcode again. Ensure that the user_data and opcode fields are stable. For the user_data we already have a place for it, for the opcode we can fill a one byte hold and store that as well. For both of them, assign them when we originally read the SQE in io_get_sqring(). Any code that uses sqe->opcode or sqe->user_data is switched to req->opcode and req->user_data. Signed-off-by: Jens Axboe --- fs/io_uring.c | 45 ++++++++++++++++++++------------------------- 1 file changed, 20 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 9d4f8274ee1e..e0fc195d0d2d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -384,6 +384,7 @@ struct io_kiocb { bool has_user; bool in_async; bool needs_fixed_file; + u8 opcode; struct io_ring_ctx *ctx; union { @@ -597,12 +598,10 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) } } -static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) +static inline bool io_req_needs_user(struct io_kiocb *req) { - u8 opcode = READ_ONCE(sqe->opcode); - - return !(opcode == IORING_OP_READ_FIXED || - opcode == IORING_OP_WRITE_FIXED); + return !(req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED); } static inline bool io_prep_async_work(struct io_kiocb *req, @@ -611,7 +610,7 @@ static inline bool io_prep_async_work(struct io_kiocb *req, bool do_hashed = false; if (req->sqe) { - switch (req->sqe->opcode) { + switch (req->opcode) { case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: /* only regular files should be hashed for writes */ @@ -634,7 +633,7 @@ static inline bool io_prep_async_work(struct io_kiocb *req, req->work.flags |= IO_WQ_WORK_UNBOUND; break; } - if (io_sqe_needs_user(req->sqe)) + if (io_req_needs_user(req)) req->work.flags |= IO_WQ_WORK_NEEDS_USER; } @@ -1005,7 +1004,7 @@ static void io_fail_links(struct io_kiocb *req) trace_io_uring_fail_link(req, link); if ((req->flags & REQ_F_LINK_TIMEOUT) && - link->sqe->opcode == IORING_OP_LINK_TIMEOUT) { + link->opcode == IORING_OP_LINK_TIMEOUT) { io_link_cancel_timeout(link); } else { io_cqring_fill_event(link, -ECANCELED); @@ -1648,7 +1647,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, * for that purpose and instead let the caller pass in the read/write * flag. */ - opcode = READ_ONCE(sqe->opcode); + opcode = req->opcode; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { *iovec = NULL; return io_import_fixed(req->ctx, rw, sqe, iter); @@ -3082,7 +3081,7 @@ static int io_req_defer_prep(struct io_kiocb *req) struct iov_iter iter; ssize_t ret; - switch (io->sqe.opcode) { + switch (req->opcode) { case IORING_OP_READV: case IORING_OP_READ_FIXED: /* ensure prep does right import */ @@ -3181,11 +3180,10 @@ __attribute__((nonnull)) static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { - int ret, opcode; struct io_ring_ctx *ctx = req->ctx; + int ret; - opcode = READ_ONCE(req->sqe->opcode); - switch (opcode) { + switch (req->opcode) { case IORING_OP_NOP: ret = io_nop(req); break; @@ -3322,11 +3320,9 @@ static bool io_req_op_valid(int op) return op >= IORING_OP_NOP && op < IORING_OP_LAST; } -static int io_op_needs_file(const struct io_uring_sqe *sqe) +static int io_req_needs_file(struct io_kiocb *req) { - int op = READ_ONCE(sqe->opcode); - - switch (op) { + switch (req->opcode) { case IORING_OP_NOP: case IORING_OP_POLL_REMOVE: case IORING_OP_TIMEOUT: @@ -3335,7 +3331,7 @@ static int io_op_needs_file(const struct io_uring_sqe *sqe) case IORING_OP_LINK_TIMEOUT: return 0; default: - if (io_req_op_valid(op)) + if (io_req_op_valid(req->opcode)) return 1; return -EINVAL; } @@ -3362,7 +3358,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req) if (flags & IOSQE_IO_DRAIN) req->flags |= REQ_F_IO_DRAIN; - ret = io_op_needs_file(req->sqe); + ret = io_req_needs_file(req); if (ret <= 0) return ret; @@ -3482,7 +3478,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, link_list); - if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT) + if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT) return NULL; req->flags |= REQ_F_LINK_TIMEOUT; @@ -3584,8 +3580,6 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, struct io_ring_ctx *ctx = req->ctx; int ret; - req->user_data = req->sqe->user_data; - /* enforce forwards compatibility on users */ if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) { ret = -EINVAL; @@ -3717,6 +3711,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req) */ req->sequence = ctx->cached_sq_head; req->sqe = &ctx->sq_sqes[head]; + req->opcode = READ_ONCE(req->sqe->opcode); + req->user_data = READ_ONCE(req->sqe->user_data); ctx->cached_sq_head++; return true; } @@ -3762,7 +3758,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, break; } - if (io_sqe_needs_user(req->sqe) && !*mm) { + if (io_req_needs_user(req) && !*mm) { mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm); if (!mm_fault) { use_mm(ctx->sqo_mm); @@ -3778,8 +3774,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, req->has_user = *mm != NULL; req->in_async = async; req->needs_fixed_file = async; - trace_io_uring_submit_sqe(ctx, req->sqe->user_data, - true, async); + trace_io_uring_submit_sqe(ctx, req->user_data, true, async); if (!io_submit_sqe(req, statep, &link)) break; /* -- cgit v1.2.3 From e781573e2fb1b75acdba61dcb9bcbfc16f288442 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Dec 2019 19:45:06 -0700 Subject: io_uring: warn about unhandled opcode Now that we have all the opcodes handled in terms of command prep and SQE reuse, add a printk_once() to warn about any potentially new and unhandled ones. Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index e0fc195d0d2d..1d4e7332ccae 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3079,9 +3079,11 @@ static int io_req_defer_prep(struct io_kiocb *req) struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct io_async_ctx *io = req->io; struct iov_iter iter; - ssize_t ret; + ssize_t ret = 0; switch (req->opcode) { + case IORING_OP_NOP: + break; case IORING_OP_READV: case IORING_OP_READ_FIXED: /* ensure prep does right import */ @@ -3141,7 +3143,9 @@ static int io_req_defer_prep(struct io_kiocb *req) ret = io_accept_prep(req); break; default: - ret = 0; + printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", + req->opcode); + ret = -EINVAL; break; } -- cgit v1.2.3 From 7c504e65206a4379ff38fe41d21b32b6c2c3e53e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 18 Dec 2019 19:53:45 +0300 Subject: io_uring: don't wait when under-submitting There is no reliable way to submit and wait in a single syscall, as io_submit_sqes() may under-consume sqes (in case of an early error). Then it will wait for not-yet-submitted requests, deadlocking the user in most cases. Don't wait/poll if can't submit all sqes Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 1d4e7332ccae..81e7fe6dee18 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5135,6 +5135,9 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, submitted = io_submit_sqes(ctx, to_submit, f.file, fd, &cur_mm, false); mutex_unlock(&ctx->uring_lock); + + if (submitted != to_submit) + goto out; } if (flags & IORING_ENTER_GETEVENTS) { unsigned nr_events = 0; @@ -5148,6 +5151,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } } +out: percpu_ref_put(&ctx->refs); out_fput: fdput(f); -- cgit v1.2.3 From fd6c2e4c063d64511657ad0031a1677b6a914859 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 18 Dec 2019 12:19:41 -0700 Subject: io_uring: io_wq_submit_work() should not touch req->rw I've been chasing a weird and obscure crash that was userspace stack corruption, and finally narrowed it down to a bit flip that made a stack address invalid. io_wq_submit_work() unconditionally flips the req->rw.ki_flags IOCB_NOWAIT bit, but since it's a generic work handler, this isn't valid. Normal read/write operations own that part of the request, on other types it could be something else. Move the IOCB_NOWAIT clear to the read/write handlers where it belongs. Signed-off-by: Jens Axboe --- fs/io_uring.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 81e7fe6dee18..6f084e3cf835 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1817,6 +1817,10 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, return ret; } + /* Ensure we clear previously set non-block flag */ + if (!force_nonblock) + req->rw.ki_flags &= ~IOCB_NOWAIT; + file = req->file; io_size = ret; if (req->flags & REQ_F_LINK) @@ -1906,6 +1910,10 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, return ret; } + /* Ensure we clear previously set non-block flag */ + if (!force_nonblock) + req->rw.ki_flags &= ~IOCB_NOWAIT; + file = kiocb->ki_filp; io_size = ret; if (req->flags & REQ_F_LINK) @@ -3274,9 +3282,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr) struct io_kiocb *nxt = NULL; int ret = 0; - /* Ensure we clear previously set non-block flag */ - req->rw.ki_flags &= ~IOCB_NOWAIT; - if (work->flags & IO_WQ_WORK_CANCEL) ret = -ECANCELED; -- cgit v1.2.3