summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDennis Zhou <dennisz@fb.com>2017-06-19 19:28:32 -0400
committerTejun Heo <tj@kernel.org>2017-06-20 15:31:43 -0400
commitdf95e795a722892a9e0603ce4b9b62fab9f02967 (patch)
treef26b6c6c78874be2b866d516c42da69a67acc07c /mm
parent30a5b5367ef9d5c9055414e12ec2f02d9de2e70f (diff)
downloadlinux-df95e795a722892a9e0603ce4b9b62fab9f02967.tar.bz2
percpu: add tracepoint support for percpu memory
Add support for tracepoints to the following events: chunk allocation, chunk free, area allocation, area free, and area allocation failure. This should let us replay percpu memory requests and evaluate corresponding decisions. Signed-off-by: Dennis Zhou <dennisz@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu-km.c2
-rw-r--r--mm/percpu-vm.c2
-rw-r--r--mm/percpu.c12
3 files changed, 16 insertions, 0 deletions
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 3bbfa0c9d069..2b79e43c626f 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -73,6 +73,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
spin_unlock_irq(&pcpu_lock);
pcpu_stats_chunk_alloc();
+ trace_percpu_create_chunk(chunk->base_addr);
return chunk;
}
@@ -82,6 +83,7 @@ static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
pcpu_stats_chunk_dealloc();
+ trace_percpu_destroy_chunk(chunk->base_addr);
if (chunk && chunk->data)
__free_pages(chunk->data, order_base_2(nr_pages));
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 5915a224da52..7ad9d94bf547 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -345,6 +345,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
pcpu_stats_chunk_alloc();
+ trace_percpu_create_chunk(chunk->base_addr);
return chunk;
}
@@ -352,6 +353,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
{
pcpu_stats_chunk_dealloc();
+ trace_percpu_destroy_chunk(chunk->base_addr);
if (chunk && chunk->data)
pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
diff --git a/mm/percpu.c b/mm/percpu.c
index 44a1cadf74a7..a5bc3634d2a9 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -76,6 +76,9 @@
#include <asm/tlbflush.h>
#include <asm/io.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/percpu.h>
+
#include "percpu-internal.h"
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
@@ -1015,11 +1018,17 @@ area_found:
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
kmemleak_alloc_percpu(ptr, size, gfp);
+
+ trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
+ chunk->base_addr, off, ptr);
+
return ptr;
fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
fail:
+ trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
+
if (!is_atomic && warn_limit) {
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
size, align, is_atomic, err);
@@ -1269,6 +1278,8 @@ void free_percpu(void __percpu *ptr)
}
}
+ trace_percpu_free_percpu(chunk->base_addr, off, ptr);
+
spin_unlock_irqrestore(&pcpu_lock, flags);
}
EXPORT_SYMBOL_GPL(free_percpu);
@@ -1719,6 +1730,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
pcpu_chunk_relocate(pcpu_first_chunk, -1);
pcpu_stats_chunk_alloc();
+ trace_percpu_create_chunk(base_addr);
/* we're done */
pcpu_base_addr = base_addr;