summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-06-15 11:23:03 +0100
committerJens Axboe <axboe@kernel.dk>2022-06-15 05:06:09 -0600
commitf43de1f88841d59f27f761219b6550bd6ce3dcc1 (patch)
treef78022cb0f70f79154cee0ba1438476c0434adf5 /fs/io_uring.c
parent91ef75a7db0d0855284b78d60d3fcec5c353ec5a (diff)
downloadlinux-f43de1f88841d59f27f761219b6550bd6ce3dcc1.tar.bz2
io_uring: unite fill_cqe and the 32B version
We want just one function that will handle both normal cqes and 32B cqes. Combine __io_fill_cqe_req() and __io_fill_cqe_req32(). It's still not entirely correct yet, but saves us from cases when we fill an CQE of a wrong size. Fixes: 76c68fbf1a1f9 ("io_uring: enable CQE32") Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/8085c5b2f74141520f60decd45334f87e389b718.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c61
1 files changed, 42 insertions, 19 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 654c2f897497..eb858cf92af9 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2469,21 +2469,48 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
{
struct io_uring_cqe *cqe;
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
- req->cqe.res, req->cqe.flags, 0, 0);
+ if (!(ctx->flags & IORING_SETUP_CQE32)) {
+ trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags, 0, 0);
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- memcpy(cqe, &req->cqe, sizeof(*cqe));
- return true;
+ /*
+ * If we can't get a cq entry, userspace overflowed the
+ * submission (by quite a lot). Increment the overflow count in
+ * the ring.
+ */
+ cqe = io_get_cqe(ctx);
+ if (likely(cqe)) {
+ memcpy(cqe, &req->cqe, sizeof(*cqe));
+ return true;
+ }
+
+ return io_cqring_event_overflow(ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ 0, 0);
+ } else {
+ u64 extra1 = req->extra1;
+ u64 extra2 = req->extra2;
+
+ trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags, extra1, extra2);
+
+ /*
+ * If we can't get a cq entry, userspace overflowed the
+ * submission (by quite a lot). Increment the overflow count in
+ * the ring.
+ */
+ cqe = io_get_cqe(ctx);
+ if (likely(cqe)) {
+ memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
+ WRITE_ONCE(cqe->big_cqe[0], extra1);
+ WRITE_ONCE(cqe->big_cqe[1], extra2);
+ return true;
+ }
+
+ return io_cqring_event_overflow(ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ extra1, extra2);
}
- return io_cqring_event_overflow(ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags, 0, 0);
}
static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx,
@@ -3175,12 +3202,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list);
- if (!(req->flags & REQ_F_CQE_SKIP)) {
- if (!(ctx->flags & IORING_SETUP_CQE32))
- __io_fill_cqe_req(ctx, req);
- else
- __io_fill_cqe32_req(ctx, req);
- }
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ __io_fill_cqe_req(ctx, req);
}
io_commit_cqring(ctx);