From 0f2122045b946241a9e549c2a76cea54fa58a7ff Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 13 Sep 2020 13:09:39 -0600 Subject: io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: stable@vger.kernel.org # v5.5+ Reviewed-by: Pavel Begunkov Signed-off-by: Jens Axboe --- include/linux/io_uring.h | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/sched.h | 5 +++++ 2 files changed, 58 insertions(+) create mode 100644 include/linux/io_uring.h (limited to 'include') diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h new file mode 100644 index 000000000000..c09135a1ef13 --- /dev/null +++ b/include/linux/io_uring.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_IO_URING_H +#define _LINUX_IO_URING_H + +#include +#include +#include + +struct io_uring_task { + /* submission side */ + struct xarray xa; + struct wait_queue_head wait; + struct file *last; + atomic_long_t req_issue; + + /* completion side */ + bool in_idle ____cacheline_aligned_in_smp; + atomic_long_t req_complete; +}; + +#if defined(CONFIG_IO_URING) +void __io_uring_task_cancel(void); +void __io_uring_files_cancel(struct files_struct *files); +void __io_uring_free(struct task_struct *tsk); + +static inline void io_uring_task_cancel(void) +{ + if (current->io_uring && !xa_empty(¤t->io_uring->xa)) + __io_uring_task_cancel(); +} +static inline void io_uring_files_cancel(struct files_struct *files) +{ + if (current->io_uring && !xa_empty(¤t->io_uring->xa)) + __io_uring_files_cancel(files); +} +static inline void io_uring_free(struct task_struct *tsk) +{ + if (tsk->io_uring) + __io_uring_free(tsk); +} +#else +static inline void io_uring_task_cancel(void) +{ +} +static inline void io_uring_files_cancel(struct files_struct *files) +{ +} +static inline void io_uring_free(struct task_struct *tsk) +{ +} +#endif + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index afe01e232935..8bf2295ebee4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -63,6 +63,7 @@ struct sighand_struct; struct signal_struct; struct task_delay_info; struct task_group; +struct io_uring_task; /* * Task state bitmask. NOTE! These bits are also @@ -935,6 +936,10 @@ struct task_struct { /* Open file information: */ struct files_struct *files; +#ifdef CONFIG_IO_URING + struct io_uring_task *io_uring; +#endif + /* Namespaces: */ struct nsproxy *nsproxy; -- cgit v1.2.3 From a3ec60054082ca2c2145ba487f9fc4de904e2b03 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 18 Sep 2020 20:41:00 -0600 Subject: io_uring: move io_uring_get_socket() into io_uring.h Now we have a io_uring kernel header, move this definition out of fs.h and into io_uring.h where it belongs. Signed-off-by: Jens Axboe --- include/linux/fs.h | 9 --------- include/linux/io_uring.h | 5 +++++ net/unix/scm.c | 1 + 3 files changed, 6 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 7519ae003a08..1c4068428461 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3514,15 +3514,6 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice); -#if defined(CONFIG_IO_URING) -extern struct sock *io_uring_get_socket(struct file *file); -#else -static inline struct sock *io_uring_get_socket(struct file *file) -{ - return NULL; -} -#endif - int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, unsigned int flags); diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index c09135a1ef13..96315cfaf6d1 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -19,6 +19,7 @@ struct io_uring_task { }; #if defined(CONFIG_IO_URING) +struct sock *io_uring_get_socket(struct file *file); void __io_uring_task_cancel(void); void __io_uring_files_cancel(struct files_struct *files); void __io_uring_free(struct task_struct *tsk); @@ -39,6 +40,10 @@ static inline void io_uring_free(struct task_struct *tsk) __io_uring_free(tsk); } #else +static inline struct sock *io_uring_get_socket(struct file *file) +{ + return NULL; +} static inline void io_uring_task_cancel(void) { } diff --git a/net/unix/scm.c b/net/unix/scm.c index 8c40f2b32392..052ae709ce28 100644 --- a/net/unix/scm.c +++ b/net/unix/scm.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "scm.h" -- cgit v1.2.3 From 9d4a75efa200a31deabe9ba1c941aef697e6bb30 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Thu, 27 Aug 2020 16:58:29 +0200 Subject: io_uring: use an enumeration for io_uring_register(2) opcodes The enumeration allows us to keep track of the last io_uring_register(2) opcode available. Behaviour and opcodes names don't change. Signed-off-by: Stefano Garzarella Reviewed-by: Kees Cook Signed-off-by: Jens Axboe --- include/uapi/linux/io_uring.h | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index d65fde732518..5f12ae6a415c 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -255,17 +255,22 @@ struct io_uring_params { /* * io_uring_register(2) opcodes and arguments */ -#define IORING_REGISTER_BUFFERS 0 -#define IORING_UNREGISTER_BUFFERS 1 -#define IORING_REGISTER_FILES 2 -#define IORING_UNREGISTER_FILES 3 -#define IORING_REGISTER_EVENTFD 4 -#define IORING_UNREGISTER_EVENTFD 5 -#define IORING_REGISTER_FILES_UPDATE 6 -#define IORING_REGISTER_EVENTFD_ASYNC 7 -#define IORING_REGISTER_PROBE 8 -#define IORING_REGISTER_PERSONALITY 9 -#define IORING_UNREGISTER_PERSONALITY 10 +enum { + IORING_REGISTER_BUFFERS = 0, + IORING_UNREGISTER_BUFFERS = 1, + IORING_REGISTER_FILES = 2, + IORING_UNREGISTER_FILES = 3, + IORING_REGISTER_EVENTFD = 4, + IORING_UNREGISTER_EVENTFD = 5, + IORING_REGISTER_FILES_UPDATE = 6, + IORING_REGISTER_EVENTFD_ASYNC = 7, + IORING_REGISTER_PROBE = 8, + IORING_REGISTER_PERSONALITY = 9, + IORING_UNREGISTER_PERSONALITY = 10, + + /* this goes last */ + IORING_REGISTER_LAST +}; struct io_uring_files_update { __u32 offset; -- cgit v1.2.3 From 21b55dbc0653018b8cd4513c37cbca303b0f0d50 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Thu, 27 Aug 2020 16:58:30 +0200 Subject: io_uring: add IOURING_REGISTER_RESTRICTIONS opcode The new io_uring_register(2) IOURING_REGISTER_RESTRICTIONS opcode permanently installs a feature allowlist on an io_ring_ctx. The io_ring_ctx can then be passed to untrusted code with the knowledge that only operations present in the allowlist can be executed. The allowlist approach ensures that new features added to io_uring do not accidentally become available when an existing application is launched on a newer kernel version. Currently is it possible to restrict sqe opcodes, sqe flags, and register opcodes. IOURING_REGISTER_RESTRICTIONS can only be made once. Afterwards it is not possible to change restrictions anymore. This prevents untrusted code from removing restrictions. Suggested-by: Stefan Hajnoczi Signed-off-by: Stefano Garzarella Reviewed-by: Kees Cook Signed-off-by: Jens Axboe --- fs/io_uring.c | 124 +++++++++++++++++++++++++++++++++++++++++- include/uapi/linux/io_uring.h | 31 +++++++++++ 2 files changed, 154 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index 05ec385a6094..c4855cecc8f3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -99,6 +99,8 @@ #define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT) #define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1) #define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE) +#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ + IORING_REGISTER_LAST + IORING_OP_LAST) struct io_uring { u32 head ____cacheline_aligned_in_smp; @@ -220,6 +222,13 @@ struct io_buffer { __u16 bid; }; +struct io_restriction { + DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); + DECLARE_BITMAP(sqe_op, IORING_OP_LAST); + u8 sqe_flags_allowed; + u8 sqe_flags_required; +}; + struct io_ring_ctx { struct { struct percpu_ref refs; @@ -232,6 +241,7 @@ struct io_ring_ctx { unsigned int cq_overflow_flushed: 1; unsigned int drain_next: 1; unsigned int eventfd_async: 1; + unsigned int restricted: 1; /* * Ring buffer of indices into array of io_uring_sqe, which is @@ -346,6 +356,7 @@ struct io_ring_ctx { struct llist_head file_put_llist; struct work_struct exit_work; + struct io_restriction restrictions; }; /* @@ -6438,6 +6449,32 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx) ctx->cached_sq_head++; } +/* + * Check SQE restrictions (opcode and flags). + * + * Returns 'true' if SQE is allowed, 'false' otherwise. + */ +static inline bool io_check_restriction(struct io_ring_ctx *ctx, + struct io_kiocb *req, + unsigned int sqe_flags) +{ + if (!ctx->restricted) + return true; + + if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) + return false; + + if ((sqe_flags & ctx->restrictions.sqe_flags_required) != + ctx->restrictions.sqe_flags_required) + return false; + + if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | + ctx->restrictions.sqe_flags_required)) + return false; + + return true; +} + #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ IOSQE_BUFFER_SELECT) @@ -6473,6 +6510,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) return -EINVAL; + if (unlikely(!io_check_restriction(ctx, req, sqe_flags))) + return -EACCES; + if ((sqe_flags & IOSQE_BUFFER_SELECT) && !io_op_defs[req->opcode].buffer_select) return -EOPNOTSUPP; @@ -9077,6 +9117,72 @@ static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) return -EINVAL; } +static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg, + unsigned int nr_args) +{ + struct io_uring_restriction *res; + size_t size; + int i, ret; + + /* We allow only a single restrictions registration */ + if (ctx->restricted) + return -EBUSY; + + if (!arg || nr_args > IORING_MAX_RESTRICTIONS) + return -EINVAL; + + size = array_size(nr_args, sizeof(*res)); + if (size == SIZE_MAX) + return -EOVERFLOW; + + res = memdup_user(arg, size); + if (IS_ERR(res)) + return PTR_ERR(res); + + ret = 0; + + for (i = 0; i < nr_args; i++) { + switch (res[i].opcode) { + case IORING_RESTRICTION_REGISTER_OP: + if (res[i].register_op >= IORING_REGISTER_LAST) { + ret = -EINVAL; + goto out; + } + + __set_bit(res[i].register_op, + ctx->restrictions.register_op); + break; + case IORING_RESTRICTION_SQE_OP: + if (res[i].sqe_op >= IORING_OP_LAST) { + ret = -EINVAL; + goto out; + } + + __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op); + break; + case IORING_RESTRICTION_SQE_FLAGS_ALLOWED: + ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags; + break; + case IORING_RESTRICTION_SQE_FLAGS_REQUIRED: + ctx->restrictions.sqe_flags_required = res[i].sqe_flags; + break; + default: + ret = -EINVAL; + goto out; + } + } + +out: + /* Reset all restrictions if an error happened */ + if (ret != 0) + memset(&ctx->restrictions, 0, sizeof(ctx->restrictions)); + else + ctx->restricted = 1; + + kfree(res); + return ret; +} + static bool io_register_op_must_quiesce(int op) { switch (op) { @@ -9123,6 +9229,18 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, if (ret) { percpu_ref_resurrect(&ctx->refs); ret = -EINTR; + goto out_quiesce; + } + } + + if (ctx->restricted) { + if (opcode >= IORING_REGISTER_LAST) { + ret = -EINVAL; + goto out; + } + + if (!test_bit(opcode, ctx->restrictions.register_op)) { + ret = -EACCES; goto out; } } @@ -9186,15 +9304,19 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, break; ret = io_unregister_personality(ctx, nr_args); break; + case IORING_REGISTER_RESTRICTIONS: + ret = io_register_restrictions(ctx, arg, nr_args); + break; default: ret = -EINVAL; break; } +out: if (io_register_op_must_quiesce(opcode)) { /* bring the ctx back to life */ percpu_ref_reinit(&ctx->refs); -out: +out_quiesce: reinit_completion(&ctx->ref_comp); } return ret; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 5f12ae6a415c..6e7f2e5e917b 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -267,6 +267,7 @@ enum { IORING_REGISTER_PROBE = 8, IORING_REGISTER_PERSONALITY = 9, IORING_UNREGISTER_PERSONALITY = 10, + IORING_REGISTER_RESTRICTIONS = 11, /* this goes last */ IORING_REGISTER_LAST @@ -295,4 +296,34 @@ struct io_uring_probe { struct io_uring_probe_op ops[0]; }; +struct io_uring_restriction { + __u16 opcode; + union { + __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */ + __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */ + __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */ + }; + __u8 resv; + __u32 resv2[3]; +}; + +/* + * io_uring_restriction->opcode values + */ +enum { + /* Allow an io_uring_register(2) opcode */ + IORING_RESTRICTION_REGISTER_OP = 0, + + /* Allow an sqe opcode */ + IORING_RESTRICTION_SQE_OP = 1, + + /* Allow sqe flags */ + IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, + + /* Require sqe flags (these flags must be set on each submission) */ + IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, + + IORING_RESTRICTION_LAST +}; + #endif -- cgit v1.2.3 From 7e84e1c7566a1df470a9e1f49d3db2ce311261a4 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Thu, 27 Aug 2020 16:58:31 +0200 Subject: io_uring: allow disabling rings during the creation This patch adds a new IORING_SETUP_R_DISABLED flag to start the rings disabled, allowing the user to register restrictions, buffers, files, before to start processing SQEs. When IORING_SETUP_R_DISABLED is set, SQE are not processed and SQPOLL kthread is not started. The restrictions registration are allowed only when the rings are disable to prevent concurrency issue while processing SQEs. The rings can be enabled using IORING_REGISTER_ENABLE_RINGS opcode with io_uring_register(2). Suggested-by: Jens Axboe Signed-off-by: Stefano Garzarella Reviewed-by: Kees Cook Signed-off-by: Jens Axboe --- fs/io_uring.c | 61 ++++++++++++++++++++++++++++++++++++++----- include/uapi/linux/io_uring.h | 2 ++ 2 files changed, 56 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index c4855cecc8f3..b8fdb10c23e3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -227,6 +227,7 @@ struct io_restriction { DECLARE_BITMAP(sqe_op, IORING_OP_LAST); u8 sqe_flags_allowed; u8 sqe_flags_required; + bool registered; }; struct io_ring_ctx { @@ -6910,6 +6911,14 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) static void io_sq_thread_stop(struct io_ring_ctx *ctx) { if (ctx->sqo_thread) { + /* + * We may arrive here from the error branch in + * io_sq_offload_create() where the kthread is created + * without being waked up, thus wake it up now to make + * sure the wait will complete. + */ + wake_up_process(ctx->sqo_thread); + wait_for_completion(&ctx->sq_thread_comp); /* * The park is a bit of a work-around, without it we get @@ -7581,8 +7590,8 @@ void __io_uring_free(struct task_struct *tsk) tsk->io_uring = NULL; } -static int io_sq_offload_start(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static int io_sq_offload_create(struct io_ring_ctx *ctx, + struct io_uring_params *p) { int ret; @@ -7619,7 +7628,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, ret = io_uring_alloc_task_context(ctx->sqo_thread); if (ret) goto err; - wake_up_process(ctx->sqo_thread); } else if (p->flags & IORING_SETUP_SQ_AFF) { /* Can't have SQ_AFF without SQPOLL */ ret = -EINVAL; @@ -7636,6 +7644,12 @@ err: return ret; } +static void io_sq_offload_start(struct io_ring_ctx *ctx) +{ + if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sqo_thread) + wake_up_process(ctx->sqo_thread); +} + static inline void __io_unaccount_mem(struct user_struct *user, unsigned long nr_pages) { @@ -8633,6 +8647,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (!percpu_ref_tryget(&ctx->refs)) goto out_fput; + ret = -EBADFD; + if (ctx->flags & IORING_SETUP_R_DISABLED) + goto out; + /* * For SQ polling, the thread will do all submissions and completions. * Just return the requested submit count, and wake the thread if @@ -8975,10 +8993,13 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, if (ret) goto err; - ret = io_sq_offload_start(ctx, p); + ret = io_sq_offload_create(ctx, p); if (ret) goto err; + if (!(p->flags & IORING_SETUP_R_DISABLED)) + io_sq_offload_start(ctx); + memset(&p->sq_off, 0, sizeof(p->sq_off)); p->sq_off.head = offsetof(struct io_rings, sq.head); p->sq_off.tail = offsetof(struct io_rings, sq.tail); @@ -9041,7 +9062,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | - IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ)) + IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | + IORING_SETUP_R_DISABLED)) return -EINVAL; return io_uring_create(entries, &p, params); @@ -9124,8 +9146,12 @@ static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg, size_t size; int i, ret; + /* Restrictions allowed only if rings started disabled */ + if (!(ctx->flags & IORING_SETUP_R_DISABLED)) + return -EBADFD; + /* We allow only a single restrictions registration */ - if (ctx->restricted) + if (ctx->restrictions.registered) return -EBUSY; if (!arg || nr_args > IORING_MAX_RESTRICTIONS) @@ -9177,12 +9203,27 @@ out: if (ret != 0) memset(&ctx->restrictions, 0, sizeof(ctx->restrictions)); else - ctx->restricted = 1; + ctx->restrictions.registered = true; kfree(res); return ret; } +static int io_register_enable_rings(struct io_ring_ctx *ctx) +{ + if (!(ctx->flags & IORING_SETUP_R_DISABLED)) + return -EBADFD; + + if (ctx->restrictions.registered) + ctx->restricted = 1; + + ctx->flags &= ~IORING_SETUP_R_DISABLED; + + io_sq_offload_start(ctx); + + return 0; +} + static bool io_register_op_must_quiesce(int op) { switch (op) { @@ -9304,6 +9345,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, break; ret = io_unregister_personality(ctx, nr_args); break; + case IORING_REGISTER_ENABLE_RINGS: + ret = -EINVAL; + if (arg || nr_args) + break; + ret = io_register_enable_rings(ctx); + break; case IORING_REGISTER_RESTRICTIONS: ret = io_register_restrictions(ctx, arg, nr_args); break; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 6e7f2e5e917b..a0c85e0e9016 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -95,6 +95,7 @@ enum { #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ +#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ enum { IORING_OP_NOP, @@ -268,6 +269,7 @@ enum { IORING_REGISTER_PERSONALITY = 9, IORING_UNREGISTER_PERSONALITY = 10, IORING_REGISTER_RESTRICTIONS = 11, + IORING_REGISTER_ENABLE_RINGS = 12, /* this goes last */ IORING_REGISTER_LAST -- cgit v1.2.3 From ce71bfea207b4d7c21d36f24ec37618ffcea1da8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 31 Aug 2020 12:08:10 -0600 Subject: fs: align IOCB_* flags with RWF_* flags We have a set of flags that are shared between the two and inherired in kiocb_set_rw_flags(), but we check and set these individually. Reorder the IOCB flags so that the bottom part of the space is synced with the RWF flag space, and then we can do them all in one mask and set operation. The only exception is RWF_SYNC, which needs to mark IOCB_SYNC and IOCB_DSYNC. Do that one separately. This shaves 15 bytes of text from kiocb_set_rw_flags() for me. Suggested-by: Matthew Wilcox (Oracle) Signed-off-by: Jens Axboe --- include/linux/fs.h | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 1c4068428461..4ebc14dee421 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -310,17 +310,20 @@ enum rw_hint { WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, }; -#define IOCB_EVENTFD (1 << 0) -#define IOCB_APPEND (1 << 1) -#define IOCB_DIRECT (1 << 2) -#define IOCB_HIPRI (1 << 3) -#define IOCB_DSYNC (1 << 4) -#define IOCB_SYNC (1 << 5) -#define IOCB_WRITE (1 << 6) -#define IOCB_NOWAIT (1 << 7) +/* Match RWF_* bits to IOCB bits */ +#define IOCB_HIPRI (__force int) RWF_HIPRI +#define IOCB_DSYNC (__force int) RWF_DSYNC +#define IOCB_SYNC (__force int) RWF_SYNC +#define IOCB_NOWAIT (__force int) RWF_NOWAIT +#define IOCB_APPEND (__force int) RWF_APPEND + +/* non-RWF related bits - start at 16 */ +#define IOCB_EVENTFD (1 << 16) +#define IOCB_DIRECT (1 << 17) +#define IOCB_WRITE (1 << 18) /* iocb->ki_waitq is valid */ -#define IOCB_WAITQ (1 << 8) -#define IOCB_NOIO (1 << 9) +#define IOCB_WAITQ (1 << 19) +#define IOCB_NOIO (1 << 20) struct kiocb { struct file *ki_filp; @@ -3317,6 +3320,9 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) { int kiocb_flags = 0; + /* make sure there's no overlap between RWF and private IOCB flags */ + BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD); + if (!flags) return 0; if (unlikely(flags & ~RWF_SUPPORTED)) @@ -3325,16 +3331,11 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) if (flags & RWF_NOWAIT) { if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) return -EOPNOTSUPP; - kiocb_flags |= IOCB_NOWAIT | IOCB_NOIO; + kiocb_flags |= IOCB_NOIO; } - if (flags & RWF_HIPRI) - kiocb_flags |= IOCB_HIPRI; - if (flags & RWF_DSYNC) - kiocb_flags |= IOCB_DSYNC; + kiocb_flags |= (__force int) (flags & RWF_SUPPORTED); if (flags & RWF_SYNC) - kiocb_flags |= (IOCB_DSYNC | IOCB_SYNC); - if (flags & RWF_APPEND) - kiocb_flags |= IOCB_APPEND; + kiocb_flags |= IOCB_DSYNC; ki->ki_flags |= kiocb_flags; return 0; -- cgit v1.2.3 From 90554200724d5b280439dc361fe7ee92fe459ea7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 3 Sep 2020 12:12:41 -0600 Subject: io_uring: provide IORING_ENTER_SQ_WAIT for SQPOLL SQ ring waits When using SQPOLL, applications can run into the issue of running out of SQ ring entries because the thread hasn't consumed them yet. The only option for dealing with that is checking later, or busy checking for the condition. Provide IORING_ENTER_SQ_WAIT if applications want to wait on this condition. Signed-off-by: Jens Axboe --- fs/io_uring.c | 40 +++++++++++++++++++++++++++++++++++++--- include/uapi/linux/io_uring.h | 1 + 2 files changed, 38 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index a924ab1cf15b..3ee6ee1785d2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -301,6 +301,7 @@ struct io_ring_ctx { struct io_sq_data *sq_data; /* if using sq thread polling */ + struct wait_queue_head sqo_sq_wait; struct wait_queue_entry sqo_wait_entry; struct list_head sqd_list; @@ -1072,6 +1073,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) goto err; ctx->flags = p->flags; + init_waitqueue_head(&ctx->sqo_sq_wait); INIT_LIST_HEAD(&ctx->sqd_list); init_waitqueue_head(&ctx->cq_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); @@ -1324,6 +1326,13 @@ static void io_commit_cqring(struct io_ring_ctx *ctx) __io_queue_deferred(ctx); } +static inline bool io_sqring_full(struct io_ring_ctx *ctx) +{ + struct io_rings *r = ctx->rings; + + return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries; +} + static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; @@ -6736,6 +6745,10 @@ again: if (likely(!percpu_ref_is_dying(&ctx->refs))) ret = io_submit_sqes(ctx, to_submit); mutex_unlock(&ctx->uring_lock); + + if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait)) + wake_up(&ctx->sqo_sq_wait); + return SQT_DID_WORK; } @@ -8231,8 +8244,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) * io_commit_cqring */ smp_rmb(); - if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head != - ctx->rings->sq_ring_entries) + if (!io_sqring_full(ctx)) mask |= EPOLLOUT | EPOLLWRNORM; if (io_cqring_events(ctx, false)) mask |= EPOLLIN | EPOLLRDNORM; @@ -8801,6 +8813,25 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, #endif /* !CONFIG_MMU */ +static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) +{ + DEFINE_WAIT(wait); + + do { + if (!io_sqring_full(ctx)) + break; + + prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); + + if (!io_sqring_full(ctx)) + break; + + schedule(); + } while (!signal_pending(current)); + + finish_wait(&ctx->sqo_sq_wait, &wait); +} + SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, u32, min_complete, u32, flags, const sigset_t __user *, sig, size_t, sigsz) @@ -8812,7 +8843,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, io_run_task_work(); - if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP)) + if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | + IORING_ENTER_SQ_WAIT)) return -EINVAL; f = fdget(fd); @@ -8843,6 +8875,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, io_cqring_overflow_flush(ctx, false, NULL, NULL); if (flags & IORING_ENTER_SQ_WAKEUP) wake_up(&ctx->sq_data->wait); + if (flags & IORING_ENTER_SQ_WAIT) + io_sqpoll_wait_sq(ctx); submitted = to_submit; } else if (to_submit) { ret = io_uring_add_task_file(f.file); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index a0c85e0e9016..98d8e06dea22 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -225,6 +225,7 @@ struct io_cqring_offsets { */ #define IORING_ENTER_GETEVENTS (1U << 0) #define IORING_ENTER_SQ_WAKEUP (1U << 1) +#define IORING_ENTER_SQ_WAIT (1U << 2) /* * Passed in for io_uring_setup(2). Copied back with updated info on success -- cgit v1.2.3