summaryrefslogtreecommitdiffstats
path: root/io_uring/msg_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/msg_ring.c')
-rw-r--r--io_uring/msg_ring.c164
1 files changed, 117 insertions, 47 deletions
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index afb543aab9f6..2d3cd945a531 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -15,6 +15,8 @@
struct io_msg {
struct file *file;
+ struct file *src_file;
+ struct callback_head tw;
u64 user_data;
u32 len;
u32 cmd;
@@ -23,6 +25,34 @@ struct io_msg {
u32 flags;
};
+void io_msg_ring_cleanup(struct io_kiocb *req)
+{
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+
+ if (WARN_ON_ONCE(!msg->src_file))
+ return;
+
+ fput(msg->src_file);
+ msg->src_file = NULL;
+}
+
+static void io_msg_tw_complete(struct callback_head *head)
+{
+ struct io_msg *msg = container_of(head, struct io_msg, tw);
+ struct io_kiocb *req = cmd_to_io_kiocb(msg);
+ struct io_ring_ctx *target_ctx = req->file->private_data;
+ int ret = 0;
+
+ if (current->flags & PF_EXITING)
+ ret = -EOWNERDEAD;
+ else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = -EOVERFLOW;
+
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_queue_tw_complete(req, ret);
+}
+
static int io_msg_ring_data(struct io_kiocb *req)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
@@ -31,23 +61,29 @@ static int io_msg_ring_data(struct io_kiocb *req)
if (msg->src_fd || msg->dst_fd || msg->flags)
return -EINVAL;
+ if (target_ctx->task_complete && current != target_ctx->submitter_task) {
+ init_task_work(&msg->tw, io_msg_tw_complete);
+ if (task_work_add(target_ctx->submitter_task, &msg->tw,
+ TWA_SIGNAL_NO_IPI))
+ return -EOWNERDEAD;
+
+ atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags);
+ return IOU_ISSUE_SKIP_COMPLETE;
+ }
+
if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
return 0;
return -EOVERFLOW;
}
-static void io_double_unlock_ctx(struct io_ring_ctx *ctx,
- struct io_ring_ctx *octx,
+static void io_double_unlock_ctx(struct io_ring_ctx *octx,
unsigned int issue_flags)
{
- if (issue_flags & IO_URING_F_UNLOCKED)
- mutex_unlock(&ctx->uring_lock);
mutex_unlock(&octx->uring_lock);
}
-static int io_double_lock_ctx(struct io_ring_ctx *ctx,
- struct io_ring_ctx *octx,
+static int io_double_lock_ctx(struct io_ring_ctx *octx,
unsigned int issue_flags)
{
/*
@@ -60,56 +96,49 @@ static int io_double_lock_ctx(struct io_ring_ctx *ctx,
return -EAGAIN;
return 0;
}
-
- /* Always grab smallest value ctx first. We know ctx != octx. */
- if (ctx < octx) {
- mutex_lock(&ctx->uring_lock);
- mutex_lock(&octx->uring_lock);
- } else {
- mutex_lock(&octx->uring_lock);
- mutex_lock(&ctx->uring_lock);
- }
-
+ mutex_lock(&octx->uring_lock);
return 0;
}
-static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
struct io_ring_ctx *ctx = req->ctx;
+ struct file *file = NULL;
unsigned long file_ptr;
- struct file *src_file;
- int ret;
-
- if (target_ctx == ctx)
- return -EINVAL;
-
- ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
- if (unlikely(ret))
- return ret;
-
- ret = -EBADF;
- if (unlikely(msg->src_fd >= ctx->nr_user_files))
- goto out_unlock;
+ int idx = msg->src_fd;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ if (likely(idx < ctx->nr_user_files)) {
+ idx = array_index_nospec(idx, ctx->nr_user_files);
+ file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
+ file = (struct file *) (file_ptr & FFS_MASK);
+ if (file)
+ get_file(file);
+ }
+ io_ring_submit_unlock(ctx, issue_flags);
+ return file;
+}
- msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files);
- file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr;
- if (!file_ptr)
- goto out_unlock;
+static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_ring_ctx *target_ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ struct file *src_file = msg->src_file;
+ int ret;
- src_file = (struct file *) (file_ptr & FFS_MASK);
- get_file(src_file);
+ if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ return -EAGAIN;
ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
- if (ret < 0) {
- fput(src_file);
+ if (ret < 0)
goto out_unlock;
- }
+
+ msg->src_file = NULL;
+ req->flags &= ~REQ_F_NEED_CLEANUP;
if (msg->flags & IORING_MSG_RING_CQE_SKIP)
goto out_unlock;
-
/*
* If this fails, the target still received the file descriptor but
* wasn't notified of the fact. This means that if this request
@@ -119,10 +148,51 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
ret = -EOVERFLOW;
out_unlock:
- io_double_unlock_ctx(ctx, target_ctx, issue_flags);
+ io_double_unlock_ctx(target_ctx, issue_flags);
return ret;
}
+static void io_msg_tw_fd_complete(struct callback_head *head)
+{
+ struct io_msg *msg = container_of(head, struct io_msg, tw);
+ struct io_kiocb *req = cmd_to_io_kiocb(msg);
+ int ret = -EOWNERDEAD;
+
+ if (!(current->flags & PF_EXITING))
+ ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_queue_tw_complete(req, ret);
+}
+
+static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_ring_ctx *target_ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ struct io_ring_ctx *ctx = req->ctx;
+ struct file *src_file = msg->src_file;
+
+ if (target_ctx == ctx)
+ return -EINVAL;
+ if (!src_file) {
+ src_file = io_msg_grab_file(req, issue_flags);
+ if (!src_file)
+ return -EBADF;
+ msg->src_file = src_file;
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+
+ if (target_ctx->task_complete && current != target_ctx->submitter_task) {
+ init_task_work(&msg->tw, io_msg_tw_fd_complete);
+ if (task_work_add(target_ctx->submitter_task, &msg->tw,
+ TWA_SIGNAL))
+ return -EOWNERDEAD;
+
+ return IOU_ISSUE_SKIP_COMPLETE;
+ }
+ return io_msg_install_complete(req, issue_flags);
+}
+
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
@@ -130,6 +200,7 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(sqe->buf_index || sqe->personality))
return -EINVAL;
+ msg->src_file = NULL;
msg->user_data = READ_ONCE(sqe->off);
msg->len = READ_ONCE(sqe->len);
msg->cmd = READ_ONCE(sqe->addr);
@@ -164,12 +235,11 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
}
done:
- if (ret < 0)
+ if (ret < 0) {
+ if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
+ return ret;
req_set_fail(req);
+ }
io_req_set_res(req, ret, 0);
- /* put file to avoid an attempt to IOPOLL the req */
- if (!(req->flags & REQ_F_FIXED_FILE))
- io_put_file(req->file);
- req->file = NULL;
return IOU_OK;
}