[<prev] [next>] [day] [month] [year] [list]
Message-ID: <160396870073.397.5253000962129174657.tip-bot2@tip-bot2>
Date: Thu, 29 Oct 2020 10:51:40 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>,
x86 <x86@...nel.org>, LKML <linux-kernel@...r.kernel.org>
Subject: [tip: perf/core] perf,mm: Handle non-page-table-aligned hugetlbfs
The following commit has been merged into the perf/core branch of tip:
Commit-ID: 51b646b2d9f84d6ff6300e3c1d09f2be4329a424
Gitweb: https://git.kernel.org/tip/51b646b2d9f84d6ff6300e3c1d09f2be4329a424
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Fri, 09 Oct 2020 11:09:27 +02:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Thu, 29 Oct 2020 11:00:39 +01:00
perf,mm: Handle non-page-table-aligned hugetlbfs
A limited nunmber of architectures support hugetlbfs sizes that do not
align with the page-tables (ARM64, Power, Sparc64). Add support for
this to the generic perf_get_page_size() implementation, and also
allow an architecture to override this implementation.
This latter is only needed when it uses non-page-table aligned huge
pages in its kernel map.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
include/linux/perf_event.h | 4 ++++-
kernel/events/core.c | 39 +++++++++++++++++++++++++++++++------
2 files changed, 37 insertions(+), 6 deletions(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e533b03..0defb52 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1590,4 +1590,8 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
+#ifdef CONFIG_MMU
+extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
+#endif
+
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7f655d1..b458ed3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7011,10 +7011,18 @@ static u64 perf_virt_to_phys(u64 virt)
#ifdef CONFIG_MMU
/*
- * Return the MMU page size of a given virtual address
+ * Return the MMU page size of a given virtual address.
+ *
+ * This generic implementation handles page-table aligned huge pages, as well
+ * as non-page-table aligned hugetlbfs compound pages.
+ *
+ * If an architecture supports and uses non-page-table aligned pages in their
+ * kernel mapping it will need to provide it's own implementation of this
+ * function.
*/
-static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr)
+__weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr)
{
+ struct page *page;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
@@ -7036,15 +7044,27 @@ static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr)
if (!pud_present(*pud))
return 0;
- if (pud_leaf(*pud))
+ if (pud_leaf(*pud)) {
+#ifdef pud_page
+ page = pud_page(*pud);
+ if (PageHuge(page))
+ return page_size(compound_head(page));
+#endif
return 1ULL << PUD_SHIFT;
+ }
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
return 0;
- if (pmd_leaf(*pmd))
+ if (pmd_leaf(*pmd)) {
+#ifdef pmd_page
+ page = pmd_page(*pmd);
+ if (PageHuge(page))
+ return page_size(compound_head(page));
+#endif
return 1ULL << PMD_SHIFT;
+ }
pte = pte_offset_map(pmd, addr);
if (!pte_present(*pte)) {
@@ -7052,13 +7072,20 @@ static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr)
return 0;
}
+ page = pte_page(*pte);
+ if (PageHuge(page)) {
+ u64 size = page_size(compound_head(page));
+ pte_unmap(pte);
+ return size;
+ }
+
pte_unmap(pte);
return PAGE_SIZE;
}
#else
-static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr)
+static u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
@@ -7089,7 +7116,7 @@ static u64 perf_get_page_size(unsigned long addr)
mm = &init_mm;
}
- size = __perf_get_page_size(mm, addr);
+ size = arch_perf_get_page_size(mm, addr);
local_irq_restore(flags);
Powered by blists - more mailing lists