summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorOlivier Langlois <olivier@trillion01.com>2021-05-31 02:36:37 -0400
committerJens Axboe <axboe@kernel.dk>2021-06-16 06:41:46 -0600
commit236daeae3616b1c62ce1a9f8a348d576ec9e22d9 (patch)
tree666aad4cdb19e880da11cd59a1905dafec4e49d7 /fs/io_uring.c
parent2335f6f5ddf2f4621395fac5fa4b53d075828cc1 (diff)
downloadlinux-236daeae3616b1c62ce1a9f8a348d576ec9e22d9.tar.bz2
io_uring: Add to traces the req pointer when available
The req pointer uniquely identify a specific request. Having it in traces can provide valuable insights that is not possible to have if the calling process is reusing the same user_data value. Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Olivier Langlois <olivier@trillion01.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 16156a655d8b..d916eb2cef09 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5073,7 +5073,7 @@ static void io_async_task_func(struct callback_head *cb)
struct async_poll *apoll = req->apoll;
struct io_ring_ctx *ctx = req->ctx;
- trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
+ trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
if (io_poll_rewait(req, &apoll->poll)) {
spin_unlock_irq(&ctx->completion_lock);
@@ -5206,8 +5206,8 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
return false;
}
spin_unlock_irq(&ctx->completion_lock);
- trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
- apoll->poll.events);
+ trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
+ mask, apoll->poll.events);
return true;
}
@@ -6604,8 +6604,9 @@ fail_req:
goto fail_req;
/* don't need @sqe from now on */
- trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
- true, ctx->flags & IORING_SETUP_SQPOLL);
+ trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
+ req->flags, true,
+ ctx->flags & IORING_SETUP_SQPOLL);
/*
* If we already have a head request, queue this one for async