summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-11-03 06:52:50 -0700
committerJens Axboe <axboe@kernel.dk>2019-11-04 07:07:52 -0700
commit51c3ff62cac635ae9d75f875ce5b7bdafc97abd5 (patch)
tree3e123b88c1a17500eabdf647cb40e863eda9c069
parent364b05fd06e87e53dc03396f73afeac48d8e0998 (diff)
downloadlinux-51c3ff62cac635ae9d75f875ce5b7bdafc97abd5.tar.bz2
io_uring: add completion trace event
We currently don't have a completion event trace, add one of those. And to better be able to match up submissions and completions, add user_data to the submission trace as well. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c6
-rw-r--r--include/trace/events/io_uring.h54
2 files changed, 49 insertions, 11 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a520c4262d85..7813bc7d5b61 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -592,6 +592,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
{
struct io_uring_cqe *cqe;
+ trace_io_uring_complete(ctx, ki_user_data, res);
+
/*
* If we can't get a cq entry, userspace overflowed the
* submission (by quite a lot). Increment the overflow count in
@@ -2733,7 +2735,7 @@ out:
s.has_user = *mm != NULL;
s.in_async = true;
s.needs_fixed_file = true;
- trace_io_uring_submit_sqe(ctx, true, true);
+ trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, true);
io_submit_sqe(ctx, &s, statep, &link);
submitted++;
}
@@ -2913,7 +2915,7 @@ out:
s.needs_fixed_file = false;
s.ring_fd = ring_fd;
submit++;
- trace_io_uring_submit_sqe(ctx, true, false);
+ trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, false);
io_submit_sqe(ctx, &s, statep, &link);
}
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 8f21d8bf20fd..72a4d0174b02 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -285,9 +285,42 @@ TRACE_EVENT(io_uring_fail_link,
);
/**
+ * io_uring_complete - called when completing an SQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @res: result of the request
+ *
+ */
+TRACE_EVENT(io_uring_complete,
+
+ TP_PROTO(void *ctx, u64 user_data, long res),
+
+ TP_ARGS(ctx, user_data, res),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u64, user_data )
+ __field( long, res )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
+ __entry->res = res;
+ ),
+
+ TP_printk("ring %p, user_data 0x%llx, result %ld",
+ __entry->ctx, (unsigned long long)__entry->user_data,
+ __entry->res)
+);
+
+
+/**
* io_uring_submit_sqe - called before submitting one SQE
*
- * @ctx: pointer to a ring context structure
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
* @force_nonblock: whether a context blocking or not
* @sq_thread: true if sq_thread has submitted this SQE
*
@@ -296,24 +329,27 @@ TRACE_EVENT(io_uring_fail_link,
*/
TRACE_EVENT(io_uring_submit_sqe,
- TP_PROTO(void *ctx, bool force_nonblock, bool sq_thread),
+ TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread),
- TP_ARGS(ctx, force_nonblock, sq_thread),
+ TP_ARGS(ctx, user_data, force_nonblock, sq_thread),
TP_STRUCT__entry (
- __field( void *, ctx )
+ __field( void *, ctx )
+ __field( u64, user_data )
__field( bool, force_nonblock )
- __field( bool, sq_thread )
+ __field( bool, sq_thread )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
__entry->force_nonblock = force_nonblock;
- __entry->sq_thread = sq_thread;
+ __entry->sq_thread = sq_thread;
),
- TP_printk("ring %p, non block %d, sq_thread %d",
- __entry->ctx, __entry->force_nonblock, __entry->sq_thread)
+ TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d",
+ __entry->ctx, (unsigned long long) __entry->user_data,
+ __entry->force_nonblock, __entry->sq_thread)
);
#endif /* _TRACE_IO_URING_H */