summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-01-27 16:15:06 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-01-27 16:15:06 -0800
commitf851453bf19554a42eb480b65436b9500c3cf392 (patch)
tree45f05ae256a5b4e467e6d6db13ba030132c87edd /io_uring/io_uring.c
parent28cca23da7240df597240a492a7a7d4ce990026b (diff)
parentef5c600adb1d985513d2b612cc90403a148ff287 (diff)
downloadlinux-f851453bf19554a42eb480b65436b9500c3cf392.tar.bz2
Merge tag 'io_uring-6.2-2023-01-27' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: "Two small fixes for this release: - Sanitize how async prep is done for drain requests, so we ensure that it always gets done (Dylan) - A ring provided buffer recycling fix for multishot receive (me)" * tag 'io_uring-6.2-2023-01-27' of git://git.kernel.dk/linux: io_uring: always prep_async for drain requests io_uring/net: cache provided buffer group value for multishot receives
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 0a4efada9b3c..db623b3185c8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1765,17 +1765,12 @@ queue:
}
spin_unlock(&ctx->completion_lock);
- ret = io_req_prep_async(req);
- if (ret) {
-fail:
- io_req_defer_failed(req, ret);
- return;
- }
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de) {
ret = -ENOMEM;
- goto fail;
+ io_req_defer_failed(req, ret);
+ return;
}
spin_lock(&ctx->completion_lock);
@@ -2048,13 +2043,16 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
req->flags &= ~REQ_F_HARDLINK;
req->flags |= REQ_F_LINK;
io_req_defer_failed(req, req->cqe.res);
- } else if (unlikely(req->ctx->drain_active)) {
- io_drain_req(req);
} else {
int ret = io_req_prep_async(req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
io_req_defer_failed(req, ret);
+ return;
+ }
+
+ if (unlikely(req->ctx->drain_active))
+ io_drain_req(req);
else
io_queue_iowq(req, NULL);
}