summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-04 13:52:01 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-04 08:05:46 -0700
commit6bf985dc50dd882a95fffa9c7eef0d1416f512e6 (patch)
tree395df8a66dc5eed0f58e4d7095b3ad37bff15dc7 /fs
parent1a2cc0ce8d18c9e5592733cb6381e9ff5c23d916 (diff)
downloadlinux-6bf985dc50dd882a95fffa9c7eef0d1416f512e6.tar.bz2
io_uring: let io_setup_async_rw take care of iovec
Now we give out ownership of iovec into io_setup_async_rw(), so it either sets request's context right or frees the iovec on error itself. Makes our life a bit easier at call sites. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c24
1 files changed, 9 insertions, 15 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 1d1fa1f77332..f8492d62b6a1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2721,11 +2721,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
ret = io_import_iovec(rw, req, &iovec, &iter, false);
if (ret < 0)
return false;
- ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
- if (!ret)
- return true;
- kfree(iovec);
- return false;
+ return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
}
#endif
@@ -3366,8 +3362,10 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
if (!force && !io_op_defs[req->opcode].needs_async_data)
return 0;
if (!req->async_data) {
- if (__io_alloc_async_data(req))
+ if (__io_alloc_async_data(req)) {
+ kfree(iovec);
return -ENOMEM;
+ }
io_req_map_rw(req, iovec, fast_iov, iter);
}
@@ -3528,9 +3526,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, READ)) {
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
- if (!ret)
- return -EAGAIN;
- goto out_free;
+ return ret ?: -EAGAIN;
}
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
@@ -3565,10 +3561,9 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
}
ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
- if (ret2) {
- ret = ret2;
- goto out_free;
- }
+ if (ret2)
+ return ret2;
+
rw = req->async_data;
/* it's copied and will be cleaned with ->io */
iovec = NULL;
@@ -3703,8 +3698,7 @@ copy_iov:
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
- if (!ret)
- return -EAGAIN;
+ return ret ?: -EAGAIN;
}
out_free:
/* it's reportedly faster than delegating the null check to kfree() */