diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-10-09 11:09:27 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2020-10-29 11:00:39 +0100 |
commit | 51b646b2d9f84d6ff6300e3c1d09f2be4329a424 (patch) | |
tree | a8c9b6bded7c1358f87cbc0f582eb6c653791818 /kernel/events | |
parent | 995f088efebe1eba0282a6ffa12411b37f8990c2 (diff) | |
download | linux-51b646b2d9f84d6ff6300e3c1d09f2be4329a424.tar.bz2 |
perf,mm: Handle non-page-table-aligned hugetlbfs
A limited nunmber of architectures support hugetlbfs sizes that do not
align with the page-tables (ARM64, Power, Sparc64). Add support for
this to the generic perf_get_page_size() implementation, and also
allow an architecture to override this implementation.
This latter is only needed when it uses non-page-table aligned huge
pages in its kernel map.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 39 |
1 files changed, 33 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 7f655d19b8c4..b458ed3dc81b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7011,10 +7011,18 @@ static u64 perf_virt_to_phys(u64 virt) #ifdef CONFIG_MMU /* - * Return the MMU page size of a given virtual address + * Return the MMU page size of a given virtual address. + * + * This generic implementation handles page-table aligned huge pages, as well + * as non-page-table aligned hugetlbfs compound pages. + * + * If an architecture supports and uses non-page-table aligned pages in their + * kernel mapping it will need to provide it's own implementation of this + * function. */ -static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr) +__weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr) { + struct page *page; pgd_t *pgd; p4d_t *p4d; pud_t *pud; @@ -7036,15 +7044,27 @@ static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr) if (!pud_present(*pud)) return 0; - if (pud_leaf(*pud)) + if (pud_leaf(*pud)) { +#ifdef pud_page + page = pud_page(*pud); + if (PageHuge(page)) + return page_size(compound_head(page)); +#endif return 1ULL << PUD_SHIFT; + } pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) return 0; - if (pmd_leaf(*pmd)) + if (pmd_leaf(*pmd)) { +#ifdef pmd_page + page = pmd_page(*pmd); + if (PageHuge(page)) + return page_size(compound_head(page)); +#endif return 1ULL << PMD_SHIFT; + } pte = pte_offset_map(pmd, addr); if (!pte_present(*pte)) { @@ -7052,13 +7072,20 @@ static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr) return 0; } + page = pte_page(*pte); + if (PageHuge(page)) { + u64 size = page_size(compound_head(page)); + pte_unmap(pte); + return size; + } + pte_unmap(pte); return PAGE_SIZE; } #else -static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr) +static u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr) { return 0; } @@ -7089,7 +7116,7 @@ static u64 perf_get_page_size(unsigned long addr) mm = &init_mm; } - size = __perf_get_page_size(mm, addr); + size = arch_perf_get_page_size(mm, addr); local_irq_restore(flags); |