summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2022-10-26 15:46:59 +0200
committerStephen Boyd <sboyd@kernel.org>2022-12-07 13:54:09 -0800
commit49e62e0d96baf7236615e4ec2878d8db229de9c2 (patch)
tree875f2ea0c077b1d71105c8e61a17d8cd6d1b9286
parentef13f8b64728c4b4d28639bbcf30fe1314b18482 (diff)
downloadlinux-49e62e0d96baf7236615e4ec2878d8db229de9c2.tar.bz2
clk: Add trace events for rate requests
It is currently fairly difficult to follow what clk_rate_request are issued, and how they have been modified once done. Indeed, there's multiple paths that can be taken, some functions are recursive and will just forward the request to its parent, etc. Adding a lot of debug prints is just not very convenient, so let's add trace events for the clock requests, one before they are submitted and one after they are returned. That way we can simply toggle the tracing on without modifying the kernel code and without affecting performances or the kernel logs too much. Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> Signed-off-by: Maxime Ripard <maxime@cerno.tech> Link: https://lore.kernel.org/r/20221018-clk-rate-request-tracing-v2-2-5170b363c413@cerno.tech Signed-off-by: Stephen Boyd <sboyd@kernel.org>
-rw-r--r--drivers/clk/clk.c31
-rw-r--r--include/trace/events/clk.h43
2 files changed, 74 insertions, 0 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 042e32acce0b..f0288499866a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -603,10 +603,15 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
}
clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
return ret;
+ trace_clk_rate_request_done(&parent_req);
+
best = parent_req.rate;
} else if (parent) {
best = clk_core_get_rate_nolock(parent);
@@ -630,10 +635,15 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request parent_req;
clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
continue;
+ trace_clk_rate_request_done(&parent_req);
+
parent_rate = parent_req.rate;
} else {
parent_rate = clk_core_get_rate_nolock(parent);
@@ -1547,10 +1557,15 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request parent_req;
clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
ret = clk_core_round_rate_nolock(core->parent, &parent_req);
if (ret)
return ret;
+ trace_clk_rate_request_done(&parent_req);
+
req->best_parent_rate = parent_req.rate;
req->rate = parent_req.rate;
@@ -1601,10 +1616,14 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
clk_core_init_rate_req(hw->core, &req, rate);
+ trace_clk_rate_request_start(&req);
+
ret = clk_core_round_rate_nolock(hw->core, &req);
if (ret)
return 0;
+ trace_clk_rate_request_done(&req);
+
return req.rate;
}
EXPORT_SYMBOL_GPL(clk_hw_round_rate);
@@ -1633,8 +1652,12 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
clk_core_init_rate_req(clk->core, &req, rate);
+ trace_clk_rate_request_start(&req);
+
ret = clk_core_round_rate_nolock(clk->core, &req);
+ trace_clk_rate_request_done(&req);
+
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
@@ -2126,10 +2149,14 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
clk_core_init_rate_req(core, &req, rate);
+ trace_clk_rate_request_start(&req);
+
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
return NULL;
+ trace_clk_rate_request_done(&req);
+
best_parent_rate = req.best_parent_rate;
new_rate = req.rate;
parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
@@ -2325,8 +2352,12 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
clk_core_init_rate_req(core, &req, req_rate);
+ trace_clk_rate_request_start(&req);
+
ret = clk_core_round_rate_nolock(core, &req);
+ trace_clk_rate_request_done(&req);
+
/* restore the protection */
clk_core_rate_restore_protect(core, cnt);
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index e19edc63ee95..daed3c7a48c1 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -264,6 +264,49 @@ DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle_complete,
TP_ARGS(core, duty)
);
+DECLARE_EVENT_CLASS(clk_rate_request,
+
+ TP_PROTO(struct clk_rate_request *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __string( name, req->core ? req->core->name : "none")
+ __string( pname, req->best_parent_hw ? clk_hw_get_name(req->best_parent_hw) : "none" )
+ __field(unsigned long, min )
+ __field(unsigned long, max )
+ __field(unsigned long, prate )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, req->core ? req->core->name : "none");
+ __assign_str(pname, req->best_parent_hw ? clk_hw_get_name(req->best_parent_hw) : "none");
+ __entry->min = req->min_rate;
+ __entry->max = req->max_rate;
+ __entry->prate = req->best_parent_rate;
+ ),
+
+ TP_printk("%s min %lu max %lu, parent %s (%lu)", __get_str(name),
+ (unsigned long)__entry->min,
+ (unsigned long)__entry->max,
+ __get_str(pname),
+ (unsigned long)__entry->prate)
+);
+
+DEFINE_EVENT(clk_rate_request, clk_rate_request_start,
+
+ TP_PROTO(struct clk_rate_request *req),
+
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(clk_rate_request, clk_rate_request_done,
+
+ TP_PROTO(struct clk_rate_request *req),
+
+ TP_ARGS(req)
+);
+
#endif /* _TRACE_CLK_H */
/* This part must be outside protection */