summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-04-28 13:15:06 -0600
committerJens Axboe <axboe@kernel.dk>2020-04-30 22:24:22 -0600
commitaf197f50ac53fff1241598c73ca606754a3bb808 (patch)
tree81768c80c5b085d1494d6e9e4680c30c6f83e3e4 /fs/io_uring.c
parent5b0bbee4732cbd58aa98213d4c11a366356bba3d (diff)
downloadlinux-af197f50ac53fff1241598c73ca606754a3bb808.tar.bz2
io_uring: enable poll retry for any file with ->read_iter / ->write_iter
We can have files like eventfd where it's perfectly fine to do poll based retry on them, right now io_file_supports_async() doesn't take that into account. Pass in data direction and check the f_op instead of just always needing an async worker. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 084dfade5cda..516a59db73ca 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2038,7 +2038,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
* any file. For now, just ensure that anything potentially problematic is done
* inline.
*/
-static bool io_file_supports_async(struct file *file)
+static bool io_file_supports_async(struct file *file, int rw)
{
umode_t mode = file_inode(file)->i_mode;
@@ -2047,7 +2047,13 @@ static bool io_file_supports_async(struct file *file)
if (S_ISREG(mode) && file->f_op != &io_uring_fops)
return true;
- return false;
+ if (!(file->f_mode & FMODE_NOWAIT))
+ return false;
+
+ if (rw == READ)
+ return file->f_op->read_iter != NULL;
+
+ return file->f_op->write_iter != NULL;
}
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -2575,7 +2581,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
* we know to async punt it even if it was opened O_NONBLOCK
*/
- if (force_nonblock && !io_file_supports_async(req->file))
+ if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
iov_count = iov_iter_count(&iter);
@@ -2666,7 +2672,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
* we know to async punt it even if it was opened O_NONBLOCK
*/
- if (force_nonblock && !io_file_supports_async(req->file))
+ if (force_nonblock && !io_file_supports_async(req->file, WRITE))
goto copy_iov;
/* file path doesn't support NOWAIT for non-direct_IO */
@@ -2760,11 +2766,11 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static bool io_splice_punt(struct file *file)
+static bool io_splice_punt(struct file *file, int rw)
{
if (get_pipe_info(file))
return false;
- if (!io_file_supports_async(file))
+ if (!io_file_supports_async(file, rw))
return true;
return !(file->f_flags & O_NONBLOCK);
}
@@ -2779,7 +2785,7 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
long ret;
if (force_nonblock) {
- if (io_splice_punt(in) || io_splice_punt(out))
+ if (io_splice_punt(in, READ) || io_splice_punt(out, WRITE))
return -EAGAIN;
flags |= SPLICE_F_NONBLOCK;
}