summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-09-30 22:57:35 +0300
committerJens Axboe <axboe@kernel.dk>2020-09-30 20:38:46 -0600
commit2d199895d231c0a1af3a49d1f0da777499f352c8 (patch)
tree9be90dabddde38ef7d06c0c640280f1a314a423a /fs/io_uring.c
parent5b09e37e27a878eb50f0eb96fbce8419e932a7d5 (diff)
downloadlinux-2d199895d231c0a1af3a49d1f0da777499f352c8.tar.bz2
io_uring: remove F_NEED_CLEANUP check in *prep()
REQ_F_NEED_CLEANUP is set only by io_*_prep() and they're guaranteed to be called only once, so there is no one who may have set the flag before. Kill REQ_F_NEED_CLEANUP check in these *prep() handlers. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 513b3a59af37..c0248dc3cdf5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3157,7 +3157,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return -EBADF;
/* either don't need iovec imported or already have it */
- if (!req->async_data || req->flags & REQ_F_NEED_CLEANUP)
+ if (!req->async_data)
return 0;
return io_rw_prep_async(req, READ, force_nonblock);
}
@@ -3381,7 +3381,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return -EBADF;
/* either don't need iovec imported or already have it */
- if (!req->async_data || req->flags & REQ_F_NEED_CLEANUP)
+ if (!req->async_data)
return 0;
return io_rw_prep_async(req, WRITE, force_nonblock);
}
@@ -3482,8 +3482,6 @@ static int __io_splice_prep(struct io_kiocb *req,
unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
int ret;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -3693,8 +3691,6 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
mode = READ_ONCE(sqe->len);
flags = READ_ONCE(sqe->open_flags);
req->open.how = build_open_how(flags, mode);
@@ -3709,8 +3705,6 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
len = READ_ONCE(sqe->len);
if (len < OPEN_HOW_SIZE_VER0)
@@ -4218,10 +4212,6 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
return 0;
- /* iovec is already imported */
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
-
ret = io_sendmsg_copy_hdr(req, async_msg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
@@ -4448,10 +4438,6 @@ static int io_recvmsg_prep(struct io_kiocb *req,
if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
return 0;
- /* iovec is already imported */
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
-
ret = io_recvmsg_copy_hdr(req, async_msg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;