lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 24 Dec 2021 12:16:38 +0530
From:   Anshuman Khandual <anshuman.khandual@....com>
To:     linux-mm@...ck.org
Cc:     Anshuman Khandual <anshuman.khandual@....com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ingo Molnar <mingo@...hat.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Zi Yan <ziy@...dia.com>,
        Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
        John Hubbard <jhubbard@...dia.com>,
        linux-kernel@...r.kernel.org
Subject: [RFC] mm/migration: Add trace events for THP migrations

This adds two trace events for PMD based THP migration without split. These
events closely follow the implementation details like setting and removing
of PMD migration entries, which are essential operations for THP migration.

Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Zi Yan <ziy@...dia.com>
Cc: Naoya Horiguchi <n-horiguchi@...jp.nec.com>
Cc: John Hubbard <jhubbard@...dia.com>
Cc: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@....com>
---
This applies on v5.16-rc6

 include/trace/events/thp.h | 61 ++++++++++++++++++++++++++++++++++++++
 mm/huge_memory.c           |  5 ++++
 2 files changed, 66 insertions(+)

diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index d7fbbe551841..be5aeb783af1 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -83,6 +83,67 @@ TRACE_EVENT(hugepage_splitting,
 		      __entry->addr, __entry->pte)
 );
 
+TRACE_EVENT(set_migration_pmd,
+
+	TP_PROTO(struct mm_struct *mm, unsigned long address,
+		 pmd_t *pmdp, struct page *page, unsigned long pmdval),
+
+	TP_ARGS(mm, address, pmdp, page, pmdval),
+
+	TP_STRUCT__entry(
+		__field(struct mm_struct *, mm)
+		__field(unsigned long, address)
+		__field(pmd_t *, pmdp)
+		__field(struct page *, page)
+		__field(unsigned long, pmdval)
+	),
+
+	TP_fast_assign(
+		__entry->mm = mm;
+		__entry->address = address;
+		__entry->pmdp = pmdp;
+		__entry->page = page;
+		__entry->pmdval = pmdval;
+	),
+
+	TP_printk("mm=%p, address=%lx, pmdp=%p, page=%p pmdval=%lx",
+		__entry->mm,
+		__entry->address,
+		__entry->pmdp,
+		__entry->page,
+		__entry->pmdval)
+);
+
+TRACE_EVENT(remove_migration_pmd,
+
+	TP_PROTO(struct mm_struct *mm, unsigned long address,
+		 pmd_t *pmdp, struct page *page, unsigned long pmdval),
+
+	TP_ARGS(mm, address, pmdp, page, pmdval),
+
+	TP_STRUCT__entry(
+		__field(struct mm_struct *, mm)
+		__field(unsigned long, address)
+		__field(pmd_t *, pmdp)
+		__field(struct page *, page)
+		__field(unsigned long, pmdval)
+	),
+
+	TP_fast_assign(
+		__entry->mm = mm;
+		__entry->address = address;
+		__entry->pmdp = pmdp;
+		__entry->page = page;
+		__entry->pmdval = pmdval;
+	),
+
+	TP_printk("mm=%p, address=%lx, pmdp=%p, page=%p pmdval=%lx",
+		__entry->mm,
+		__entry->address,
+		__entry->pmdp,
+		__entry->page,
+		__entry->pmdval)
+);
 #endif /* _TRACE_THP_H */
 
 /* This part must be outside protection */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e5483347291c..611de486e095 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -39,6 +39,9 @@
 #include <asm/pgalloc.h>
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/thp.h>
+
 /*
  * By default, transparent hugepage support is disabled in order to avoid
  * risking an increased memory footprint for applications that are not
@@ -3173,6 +3176,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
 	page_remove_rmap(page, true);
 	put_page(page);
+	trace_set_migration_pmd(mm, address, pvmw->pmd, page, pmd_val(pmdswp));
 }
 
 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
@@ -3206,5 +3210,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
 	if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
 		mlock_vma_page(new);
 	update_mmu_cache_pmd(vma, address, pvmw->pmd);
+	trace_remove_migration_pmd(mm, address, pvmw->pmd, new, pmd_val(pmde));
 }
 #endif
-- 
2.20.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ