summaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorAnshuman Khandual <anshuman.khandual@arm.com>2022-03-24 18:10:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 19:06:45 -0700
commit4cc79b3303f224a920f3aff21f3d231749d73384 (patch)
tree65d441557ed1cd72d8436dd9342304f24ad1d4e6 /mm/rmap.c
parent283fd6fe0528ae2fe0222a35ecfdfcbbaeee8159 (diff)
downloadlinux-4cc79b3303f224a920f3aff21f3d231749d73384.tar.bz2
mm/migration: add trace events for base page and HugeTLB migrations
This adds two trace events for base page and HugeTLB page migrations. These events, closely follow the implementation details like setting and removing of PTE migration entries, which are essential operations for migration. The new CREATE_TRACE_POINTS in <mm/rmap.c> covers both <events/migration.h> and <events/tlb.h> based trace events. Hence drop redundant CREATE_TRACE_POINTS from other places which could have otherwise conflicted during build. Link: https://lkml.kernel.org/r/1643368182-9588-3-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Reported-by: kernel test robot <lkp@intel.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index ee1f10df984d..bfcc8e3d412f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -76,7 +76,9 @@
#include <asm/tlbflush.h>
+#define CREATE_TRACE_POINTS
#include <trace/events/tlb.h>
+#include <trace/events/migrate.h>
#include "internal.h"
@@ -1849,6 +1851,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (pte_swp_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+ trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
+ compound_order(&folio->page));
/*
* No need to invalidate here it will synchronize on
* against the special swap migration pte.
@@ -1917,6 +1921,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
+ trace_set_migration_pte(address, pte_val(swp_pte),
+ compound_order(&folio->page));
/*
* No need to invalidate here it will synchronize on
* against the special swap migration pte.