summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-07-23 20:25:21 +0300
committerJens Axboe <axboe@kernel.dk>2020-07-24 13:00:46 -0600
commitf56040b81999871973d21f334b4657957422c90e (patch)
tree8dae496b24ca65e22a0922d54b0f061f713904aa /fs/io_uring.c
parentae34817bd93e373a03203a4c6892735c430a14e1 (diff)
downloadlinux-f56040b81999871973d21f334b4657957422c90e.tar.bz2
io_uring: deduplicate io_grab_files() calls
Move io_req_init_async() into io_grab_files(), it's safer this way. Note that io_queue_async_work() does *init_async(), so it's valid to move out of __io_queue_sqe() punt path. Also, add a helper around io_grab_files(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 38e4c3902963..c7e8e9a1b27b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -912,7 +912,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_files_update *ip,
unsigned nr_args);
-static int io_grab_files(struct io_kiocb *req);
+static int io_prep_work_files(struct io_kiocb *req);
static void io_complete_rw_common(struct kiocb *kiocb, long res,
struct io_comp_state *cs);
static void __io_clean_op(struct io_kiocb *req);
@@ -5294,13 +5294,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
if (io_alloc_async_ctx(req))
return -EAGAIN;
-
- if (io_op_defs[req->opcode].file_table) {
- io_req_init_async(req);
- ret = io_grab_files(req);
- if (unlikely(ret))
- return ret;
- }
+ ret = io_prep_work_files(req);
+ if (unlikely(ret))
+ return ret;
switch (req->opcode) {
case IORING_OP_NOP:
@@ -5851,6 +5847,8 @@ static int io_grab_files(struct io_kiocb *req)
int ret = -EBADF;
struct io_ring_ctx *ctx = req->ctx;
+ io_req_init_async(req);
+
if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
return 0;
if (!ctx->ring_file)
@@ -5876,6 +5874,13 @@ static int io_grab_files(struct io_kiocb *req)
return ret;
}
+static inline int io_prep_work_files(struct io_kiocb *req)
+{
+ if (!io_op_defs[req->opcode].file_table)
+ return 0;
+ return io_grab_files(req);
+}
+
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
{
struct io_timeout_data *data = container_of(timer,
@@ -5987,14 +5992,9 @@ again:
goto exit;
}
punt:
- io_req_init_async(req);
-
- if (io_op_defs[req->opcode].file_table) {
- ret = io_grab_files(req);
- if (ret)
- goto err;
- }
-
+ ret = io_prep_work_files(req);
+ if (unlikely(ret))
+ goto err;
/*
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.