lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date: Fri, 22 Mar 2024 19:11:14 +0530
From: Raghavendra K T <raghavendra.kt@....com>
To: <linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>
CC: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
	"Mel Gorman" <mgorman@...e.de>, Andrew Morton <akpm@...ux-foundation.org>,
	"David Hildenbrand" <david@...hat.com>, <rppt@...nel.org>, Juri Lelli
	<juri.lelli@...hat.com>, Vincent Guittot <vincent.guittot@...aro.org>,
	Bharata B Rao <bharata@....com>, Johannes Weiner <jweiner@...com>, "kernel
 test robot" <oliver.sang@...el.com>, Raghavendra K T
	<raghavendra.kt@....com>, Mike Kravetz <mike.kravetz@...cle.com>, Muchun Song
	<muchun.song@...ux.dev>
Subject: [RFC PATCH 3 1/1] sched/numa: Convert 256MB VMA scan limit notion

Currently VMA scanning to introduce PROT_NONE faults to track
tasks's page access pattern is limited to 256MB. This limit works
well for 4K pages. However in cases like VMAs with hugepages, there
is an opportunity to scale up.

One idea is to convert 256MB scanning notion to 64K 4K PTE scanning.
Thus when a 2MB huge page is scanned, we account only 1PMD scan.

However, CPUs could spend more time in migrations than optimally
needed in some cases (mostly microbenchmarks).

Benchmarks with hugepages/THP=on such as hashjoin have shown
good benefit.

TODO:
 - Introduce ratelimiting logic similar to one in CXL case.
 - Tune scan rate to dynamically adopt to rate of migrations.

Inspired by Mels suggestion [1],
"Scan based on page table updates, not address ranges to mitigate
   problems with THP vs base page updates"

[1] Link: https://lore.kernel.org/lkml/20220128052851.17162-1-bharata@amd.com/T/#m38f6bf64f484eb98562f64ed02be86f2768d6fff

Suggested-by: Mel Gorman <mgorman@...hsingularity.net>
Signed-off-by: Raghavendra K T <raghavendra.kt@....com>
---
 include/linux/hugetlb.h |  3 +-
 include/linux/mm.h      | 16 +++++++-
 kernel/sched/fair.c     | 15 ++++---
 mm/hugetlb.c            |  9 +++++
 mm/mempolicy.c          | 11 +++++-
 mm/mprotect.c           | 87 +++++++++++++++++++++++++++++++++--------
 6 files changed, 115 insertions(+), 26 deletions(-)

Note: I think we can do better without passing a struct to get
the detail of how much memory is covered with current VMA scanning.

Currently change_prot_numa returns how many pages succesfully scanned
But we do not get to know how much memory range is covered in the scan.

Ideas??

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c1ee640d87b1..eb6987148e44 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -278,7 +278,8 @@ int pud_huge(pud_t pud);
 long hugetlb_change_protection(struct vm_area_struct *vma,
 		unsigned long address, unsigned long end, pgprot_t newprot,
 		unsigned long cp_flags);
-
+long hugetllb_effective_scanned_ptes(struct vm_area_struct *vma, unsigned long start,
+		unsigned long end);
 bool is_hugetlb_entry_migration(pte_t pte);
 bool is_hugetlb_entry_hwpoisoned(pte_t pte);
 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f5a97dec5169..8c5490db007d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -549,6 +549,10 @@ struct vm_fault {
 					 */
 };
 
+struct pte_info {
+	long nr_huge_pte;
+};
+
 /*
  * These are the virtual MM functions - opening of an area, closing and
  * unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -2547,8 +2551,15 @@ static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma
 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
 extern long change_protection(struct mmu_gather *tlb,
+ 			      struct vm_area_struct *vma, unsigned long start,
+ 			      unsigned long end, unsigned long cp_flags);
+extern long change_protection_n(struct mmu_gather *tlb,
 			      struct vm_area_struct *vma, unsigned long start,
-			      unsigned long end, unsigned long cp_flags);
+			      unsigned long end, unsigned long cp_flags,
+			      struct pte_info *info);
+extern long effective_scanned_ptes(struct vm_area_struct *vma,
+				unsigned long start, unsigned long end,
+				struct pte_info *info);
 extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
 	  struct vm_area_struct *vma, struct vm_area_struct **pprev,
 	  unsigned long start, unsigned long end, unsigned long newflags);
@@ -3535,6 +3546,9 @@ void vma_set_file(struct vm_area_struct *vma, struct file *file);
 #ifdef CONFIG_NUMA_BALANCING
 unsigned long change_prot_numa(struct vm_area_struct *vma,
 			unsigned long start, unsigned long end);
+unsigned long change_prot_numa_n(struct vm_area_struct *vma,
+			unsigned long start, unsigned long end,
+			struct pte_info *info);
 #endif
 
 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6a16129f9a5c..3646a0e14bd4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3200,8 +3200,9 @@ static void task_numa_work(struct callback_head *work)
 	struct vm_area_struct *vma;
 	unsigned long start, end;
 	unsigned long nr_pte_updates = 0;
-	long pages, virtpages;
+	long pages, virtpages, ptes_to_scan, e_scanned_ptes;
 	struct vma_iterator vmi;
+	struct pte_info info = {0};
 	bool vma_pids_skipped;
 	bool vma_pids_forced = false;
 
@@ -3248,6 +3249,8 @@ static void task_numa_work(struct callback_head *work)
 
 	pages = sysctl_numa_balancing_scan_size;
 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
+	/* Consider total number of PTEs to scan rather than sticking to 256MB */
+	ptes_to_scan = pages;
 	virtpages = pages * 8;	   /* Scan up to this much virtual space */
 	if (!pages)
 		return;
@@ -3366,7 +3369,7 @@ static void task_numa_work(struct callback_head *work)
 			start = max(start, vma->vm_start);
 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
 			end = min(end, vma->vm_end);
-			nr_pte_updates = change_prot_numa(vma, start, end);
+			nr_pte_updates = change_prot_numa_n(vma, start, end, &info);
 
 			/*
 			 * Try to scan sysctl_numa_balancing_size worth of
@@ -3376,12 +3379,14 @@ static void task_numa_work(struct callback_head *work)
 			 * PTEs, scan up to virtpages, to skip through those
 			 * areas faster.
 			 */
+			e_scanned_ptes -= effective_scanned_ptes(vma, start, end, &info);
+
 			if (nr_pte_updates)
-				pages -= (end - start) >> PAGE_SHIFT;
-			virtpages -= (end - start) >> PAGE_SHIFT;
+				ptes_to_scan -= e_scanned_ptes;
 
+			virtpages -= e_scanned_ptes;
 			start = end;
-			if (pages <= 0 || virtpages <= 0)
+			if (ptes_to_scan <= 0 || virtpages <= 0)
 				goto out;
 
 			cond_resched();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ed1581b670d4..a5bb13457398 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6996,6 +6996,15 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
 	return pages > 0 ? (pages << h->order) : pages;
 }
 
+long hugetllb_effective_scanned_ptes(struct vm_area_struct *vma, unsigned long start,
+		       unsigned long end)
+{
+	struct hstate *h = hstate_vma(vma);
+
+	return (end - start) >> (PAGE_SHIFT + h->order);
+}
+
+
 /* Return true if reservation was successful, false otherwise.  */
 bool hugetlb_reserve_pages(struct inode *inode,
 					long from, long to,
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 10a590ee1c89..103eca1858e7 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -631,8 +631,9 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
  * an architecture makes a different choice, it will need further
  * changes to the core.
  */
-unsigned long change_prot_numa(struct vm_area_struct *vma,
-			unsigned long addr, unsigned long end)
+unsigned long change_prot_numa_n(struct vm_area_struct *vma,
+			unsigned long addr, unsigned long end,
+			struct pte_info *info)
 {
 	struct mmu_gather tlb;
 	long nr_updated;
@@ -647,6 +648,12 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
 
 	return nr_updated;
 }
+
+unsigned long change_prot_numa(struct vm_area_struct *vma,
+			unsigned long addr, unsigned long end)
+{
+	return change_prot_numa_n(vma, addr, end, NULL);
+}
 #endif /* CONFIG_NUMA_BALANCING */
 
 static int queue_pages_test_walk(unsigned long start, unsigned long end,
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 81991102f785..8e43506705e0 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -352,9 +352,10 @@ pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
 		err;							\
 	})
 
-static inline long change_pmd_range(struct mmu_gather *tlb,
+static inline long change_pmd_range_n(struct mmu_gather *tlb,
 		struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
-		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
+		unsigned long end, pgprot_t newprot, unsigned long cp_flags,
+		struct pte_info *info)
 {
 	pmd_t *pmd;
 	unsigned long next;
@@ -431,14 +432,25 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
 	if (range.start)
 		mmu_notifier_invalidate_range_end(&range);
 
-	if (nr_huge_updates)
+	if (nr_huge_updates) {
 		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
+		if (info)
+			info->nr_huge_pte = nr_huge_updates;
+		}
 	return pages;
 }
 
-static inline long change_pud_range(struct mmu_gather *tlb,
-		struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
+static inline long change_pmd_range(struct mmu_gather *tlb,
+		struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
+{
+	return change_pmd_range_n(tlb, vma, pud, addr, end, newprot, cp_flags, NULL);
+}
+
+static inline long change_pud_range_n(struct mmu_gather *tlb,
+		struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
+		unsigned long end, pgprot_t newprot, unsigned long cp_flags,
+		struct pte_info *info)
 {
 	pud_t *pud;
 	unsigned long next;
@@ -452,17 +464,26 @@ static inline long change_pud_range(struct mmu_gather *tlb,
 			return ret;
 		if (pud_none_or_clear_bad(pud))
 			continue;
-		pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
-					  cp_flags);
+		pages += change_pmd_range_n(tlb, vma, pud, addr, next, newprot,
+					  cp_flags, info);
 	} while (pud++, addr = next, addr != end);
 
 	return pages;
 }
 
-static inline long change_p4d_range(struct mmu_gather *tlb,
-		struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
+static inline long change_pud_range(struct mmu_gather *tlb,
+		struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
 {
+
+	return change_pud_range_n(tlb, vma, p4d, addr, end, newprot, cp_flags, NULL);
+}
+
+static inline long change_p4d_range_n(struct mmu_gather *tlb,
+		struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
+		unsigned long end, pgprot_t newprot, unsigned long cp_flags,
+		struct pte_info *info)
+{
 	p4d_t *p4d;
 	unsigned long next;
 	long pages = 0, ret;
@@ -475,16 +496,24 @@ static inline long change_p4d_range(struct mmu_gather *tlb,
 			return ret;
 		if (p4d_none_or_clear_bad(p4d))
 			continue;
-		pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
-					  cp_flags);
+		pages += change_pud_range_n(tlb, vma, p4d, addr, next, newprot,
+					  cp_flags, info);
 	} while (p4d++, addr = next, addr != end);
 
 	return pages;
 }
 
+static inline long change_p4d_range(struct mmu_gather *tlb,
+		struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
+		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
+{
+	return change_p4d_range_n(tlb, vma, pgd, addr, end, newprot, cp_flags, NULL);
+}
+
 static long change_protection_range(struct mmu_gather *tlb,
 		struct vm_area_struct *vma, unsigned long addr,
-		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
+		unsigned long end, pgprot_t newprot, unsigned long cp_flags,
+		struct pte_info *info)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	pgd_t *pgd;
@@ -503,8 +532,8 @@ static long change_protection_range(struct mmu_gather *tlb,
 		}
 		if (pgd_none_or_clear_bad(pgd))
 			continue;
-		pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
-					  cp_flags);
+		pages += change_p4d_range_n(tlb, vma, pgd, addr, next, newprot,
+					  cp_flags, info);
 	} while (pgd++, addr = next, addr != end);
 
 	tlb_end_vma(tlb, vma);
@@ -512,9 +541,10 @@ static long change_protection_range(struct mmu_gather *tlb,
 	return pages;
 }
 
-long change_protection(struct mmu_gather *tlb,
+long change_protection_n(struct mmu_gather *tlb,
 		       struct vm_area_struct *vma, unsigned long start,
-		       unsigned long end, unsigned long cp_flags)
+		       unsigned long end, unsigned long cp_flags,
+			struct pte_info *info)
 {
 	pgprot_t newprot = vma->vm_page_prot;
 	long pages;
@@ -538,11 +568,34 @@ long change_protection(struct mmu_gather *tlb,
 						  cp_flags);
 	else
 		pages = change_protection_range(tlb, vma, start, end, newprot,
-						cp_flags);
+						cp_flags, info);
 
 	return pages;
 }
 
+long change_protection(struct mmu_gather *tlb,
+		       struct vm_area_struct *vma, unsigned long start,
+		       unsigned long end, unsigned long cp_flags)
+{
+	return change_protection_n(tlb, vma, start, end, cp_flags, NULL);
+}
+
+long effective_scanned_ptes(struct vm_area_struct *vma, unsigned long start,
+		       unsigned long end, struct pte_info *info)
+{
+	long ptes = (end - start) >> PAGE_SHIFT;
+
+	if (is_vm_hugetlb_page(vma))
+		return hugetllb_effective_scanned_ptes(vma, start, end);
+
+	if (info && info->nr_huge_pte) {
+		ptes -= info->nr_huge_pte / HPAGE_PMD_SIZE;
+		ptes += info->nr_huge_pte;
+	}
+
+	return ptes;
+}
+
 static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
 			       unsigned long next, struct mm_walk *walk)
 {
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ