summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-09-30 22:57:54 +0300
committerJens Axboe <axboe@kernel.dk>2020-09-30 20:38:46 -0600
commit73debe68b300ca24fdbbe9b6c3787d3e239deb3d (patch)
tree3084a8805bb57076abadbd440f7afd37cf5749ae /fs/io_uring.c
parenta88fc400212fc1d8aa9ca4979f898fd04ca3aab5 (diff)
downloadlinux-73debe68b300ca24fdbbe9b6c3787d3e239deb3d.tar.bz2
io_uring: remove nonblock arg from io_{rw}_prep()
All io_*_prep() functions including io_{read,write}_prep() are called only during submission where @force_nonblock is always true. Don't keep propagating it and instead remove the @force_nonblock argument from prep() altogether. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 32053ad84d1a..b0844296ad90 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3122,14 +3122,13 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
return 0;
}
-static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
- bool force_nonblock)
+static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
{
struct io_async_rw *iorw = req->async_data;
struct iovec *iov = iorw->fast_iov;
ssize_t ret;
- ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock);
+ ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false);
if (unlikely(ret < 0))
return ret;
@@ -3140,8 +3139,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
return 0;
}
-static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
ssize_t ret;
@@ -3155,7 +3153,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/* either don't need iovec imported or already have it */
if (!req->async_data)
return 0;
- return io_rw_prep_async(req, READ, force_nonblock);
+ return io_rw_prep_async(req, READ);
}
/*
@@ -3367,8 +3365,7 @@ out_free:
return ret;
}
-static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
ssize_t ret;
@@ -3382,7 +3379,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/* either don't need iovec imported or already have it */
if (!req->async_data)
return 0;
- return io_rw_prep_async(req, WRITE, force_nonblock);
+ return io_rw_prep_async(req, WRITE);
}
static int io_write(struct io_kiocb *req, bool force_nonblock,
@@ -5615,12 +5612,12 @@ static int io_req_defer_prep(struct io_kiocb *req,
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
- ret = io_read_prep(req, sqe, true);
+ ret = io_read_prep(req, sqe);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
- ret = io_write_prep(req, sqe, true);
+ ret = io_write_prep(req, sqe);
break;
case IORING_OP_POLL_ADD:
ret = io_poll_add_prep(req, sqe);
@@ -5851,7 +5848,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
if (sqe) {
- ret = io_read_prep(req, sqe, force_nonblock);
+ ret = io_read_prep(req, sqe);
if (ret < 0)
break;
}
@@ -5861,7 +5858,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
if (sqe) {
- ret = io_write_prep(req, sqe, force_nonblock);
+ ret = io_write_prep(req, sqe);
if (ret < 0)
break;
}