summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRoy Ben Shlomo <roy.benshlomo@gmail.com>2019-09-20 20:12:53 +0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2019-09-20 16:05:20 -0300
commit9f014e3a66bc936412b6614304a4e6c70c70230e (patch)
tree8ec5ca44c2bbf6800732b65c7dc0a88a129e44c0 /kernel
parent6ef81c55a2b6584cb642917f5fdf3632ef44b670 (diff)
downloadlinux-9f014e3a66bc936412b6614304a4e6c70c70230e.tar.bz2
perf/core: Fix several typos in comments
Fix typos in a few functions' documentation comments. Signed-off-by: Roy Ben Shlomo <royb@sentinelone.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: royb@sentinelone.com Link: http://lore.kernel.org/lkml/20190920171254.31373-1-royb@sentinelone.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4f08b17d6426..275eae05af20 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2239,7 +2239,7 @@ static void __perf_event_disable(struct perf_event *event,
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
- * remains valid. This condition is satisifed when called through
+ * remains valid. This condition is satisfied when called through
* perf_event_for_each_child or perf_event_for_each because they
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in perf_event_exit_event().
@@ -6054,7 +6054,7 @@ static void perf_sample_regs_intr(struct perf_regs *regs_intr,
* Get remaining task size from user stack pointer.
*
* It'd be better to take stack vma map and limit this more
- * precisly, but there's no way to get it safely under interrupt,
+ * precisely, but there's no way to get it safely under interrupt,
* so using TASK_SIZE as limit.
*/
static u64 perf_ustack_task_size(struct pt_regs *regs)
@@ -6616,7 +6616,7 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
- * Either we need PERF_SAMPLE_STACK_USER bit to be allways
+ * Either we need PERF_SAMPLE_STACK_USER bit to be always
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.