lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241201153818.2633616-7-raghavendra.kt@amd.com>
Date: Sun, 1 Dec 2024 15:38:14 +0000
From: Raghavendra K T <raghavendra.kt@....com>
To: <linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>, <gourry@...rry.net>,
	<nehagholkar@...a.com>, <abhishekd@...a.com>, <david@...hat.com>,
	<ying.huang@...el.com>, <nphamcs@...il.com>, <akpm@...ux-foundation.org>,
	<hannes@...xchg.org>, <feng.tang@...el.com>, <kbusch@...a.com>,
	<bharata@....com>, <Hasan.Maruf@....com>, <sj@...nel.org>
CC: <willy@...radead.org>, <kirill.shutemov@...ux.intel.com>,
	<mgorman@...hsingularity.net>, <vbabka@...e.cz>, <hughd@...gle.com>,
	<rientjes@...gle.com>, <shy828301@...il.com>, <Liam.Howlett@...cle.com>,
	<peterz@...radead.org>, <mingo@...hat.com>, Raghavendra K T
	<raghavendra.kt@....com>
Subject: [RFC PATCH V0 06/10] mm: Add throttling of mm scanning using scan_size

Before this patch, scanning is done on entire virtual address space
of all the tasks. Now the scan size is shrunk or expanded based on the
useful pages found in the last scan.

This helps to quickly get out of unnecessary scanning thus burning
lesser CPU.

Drawback: If a useful chunk is at the other end of the VMA space, it
will delay scanning and migration.

Shrink/expand algorithm for scan_size:
X : Number of useful pages in the last scan.
Y : Number of useful pages found in current scan.
Initial scan_size is 4GB
 case 1: (X = 0, Y = 0)
  Decrease scan_size by 2
 case 2: (X = 0, Y > 0)
  Aggressively change to MAX (16GB)
 case 3: (X > 0, Y = 0 )
   No change
 case 4: (X > 0, Y > 0)
   Increase scan_size by 2

Scan size is clamped between MIN (512MB) and MAX (16GB)).
TBD:  Tuning this based on real workload

Experiment:
============
Abench microbenchmark,
- Allocates 8GB/32GB of memory on CXL node
- 64 threads created, and each thread randomly accesses pages in 4K
  granularity.
- 512 iterations.

SUT: 512 CPU, 2 node 256GB, AMD EPYC.

3 runs, command:  abench -m 2 -d 1 -i 512 -s <size>

Calculates how much time is taken to complete the task, lower is better.
Expectation is CXL node memory is expected to be migrated as fast as
possible.

Base case: 6.11-rc6    w/ numab mode = 2 (hot page promotion is enabled).
patched case: 6.11-rc6 w/ numab mode = 0 (numa balancing is disabled).
we expect daemon to do page promotion.

Result:
========
         base                    patched
         time in sec  (%stdev)   time in sec  (%stdev)     %gain
 8GB     133.66       ( 0.38 )        113.77  ( 1.83 )     14.88
32GB     584.77       ( 0.19 )        542.79  ( 0.11 )      7.17

Overhead:
The below time is calculated using patch 10. Actual overhead for patched
case may be even lesser.

               (scan + migration)  time in sec
Total memory   base kernel    patched kernel       %gain
8GB             65.743          13.93              78.8114324
32GB           153.95          132.12              14.17992855

Signed-off-by: Raghavendra K T <raghavendra.kt@....com>
---
 mm/kmmscand.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 52 insertions(+), 3 deletions(-)

diff --git a/mm/kmmscand.c b/mm/kmmscand.c
index 589aed604cd6..2efef53f9402 100644
--- a/mm/kmmscand.c
+++ b/mm/kmmscand.c
@@ -28,6 +28,16 @@
 static struct task_struct *kmmscand_thread __read_mostly;
 static DEFINE_MUTEX(kmmscand_mutex);
 
+/*
+ * Total VMA size to cover during scan.
+ * Min: 512MB default: 4GB max: 16GB
+ */
+#define KMMSCAND_SCAN_SIZE_MIN	(512 * 1024 * 1024UL)
+#define KMMSCAND_SCAN_SIZE_MAX	(16 * 1024 * 1024 * 1024UL)
+#define KMMSCAND_SCAN_SIZE	(4 * 1024 * 1024 * 1024UL)
+
+static unsigned long kmmscand_scan_size __read_mostly = KMMSCAND_SCAN_SIZE;
+
 /*
  * Scan period for each mm.
  * Min: 400ms default: 2sec Max: 5sec
@@ -74,6 +84,8 @@ struct kmmscand_mm_slot {
 	unsigned long next_scan;
 	/* Tracks how many useful pages obtained for migration in the last scan */
 	unsigned long scan_delta;
+	/* Determines how much VMA address space to be covered in the scanning */
+	unsigned long scan_size;
 	long address;
 };
 
@@ -484,6 +496,7 @@ static void kmmscand_migrate_folio(void)
  */
 #define KMMSCAND_IGNORE_SCAN_THR	100
 
+#define SCAN_SIZE_CHANGE_SCALE	1
 /*
  * X : Number of useful pages in the last scan.
  * Y : Number of useful pages found in current scan.
@@ -497,11 +510,22 @@ static void kmmscand_migrate_folio(void)
  *		Increase scan_period by (2 << SCAN_PERIOD_CHANGE_SCALE).
  *	case 4: (X > 0, Y > 0)
  *		Decrease scan_period by SCAN_PERIOD_TUNE_PERCENT.
+ * Tuning scan_size:
+ * Initial scan_size is 4GB
+ *	case 1: (X = 0, Y = 0)
+ *		Decrease scan_size by (1 << SCAN_SIZE_CHANGE_SCALE).
+ *	case 2: (X = 0, Y > 0)
+ *		scan_size = KMMSCAND_SCAN_SIZE_MAX
+ *  case 3: (X > 0, Y = 0 )
+ *		No change
+ *  case 4: (X > 0, Y > 0)
+ *		Increase scan_size by (1 << SCAN_SIZE_CHANGE_SCALE).
  */
 static inline void kmmscand_update_mmslot_info(struct kmmscand_mm_slot *mm_slot, unsigned long total)
 {
 	unsigned int scan_period;
 	unsigned long now;
+	unsigned long scan_size;
 	unsigned long old_scan_delta;
 
 	/* XXX: Hack to get rid of continuously failing/unmigrateable pages */
@@ -509,6 +533,7 @@ static inline void kmmscand_update_mmslot_info(struct kmmscand_mm_slot *mm_slot,
 		total = 0;
 
 	scan_period = mm_slot->scan_period;
+	scan_size = mm_slot->scan_size;
 
 	old_scan_delta = mm_slot->scan_delta;
 
@@ -522,30 +547,38 @@ static inline void kmmscand_update_mmslot_info(struct kmmscand_mm_slot *mm_slot,
 	if (!old_scan_delta && !total) {
 		scan_period = (100 + SCAN_PERIOD_TUNE_PERCENT) * scan_period;
 		scan_period /= 100;
+		scan_size = scan_size >> SCAN_SIZE_CHANGE_SCALE;
 	} else if (old_scan_delta && total) {
 		scan_period = (100 - SCAN_PERIOD_TUNE_PERCENT) * scan_period;
 		scan_period /= 100;
+		scan_size = scan_size << SCAN_SIZE_CHANGE_SCALE;
 	} else if (old_scan_delta && !total) {
 		scan_period = scan_period << SCAN_PERIOD_CHANGE_SCALE;
 	} else {
 		scan_period = scan_period >> SCAN_PERIOD_CHANGE_SCALE;
+		scan_size = KMMSCAND_SCAN_SIZE_MAX;
 	}
 
 	scan_period = clamp(scan_period, KMMSCAND_SCAN_PERIOD_MIN, KMMSCAND_SCAN_PERIOD_MAX);
+	scan_size = clamp(scan_size, KMMSCAND_SCAN_SIZE_MIN, KMMSCAND_SCAN_SIZE_MAX);
 
 	now = jiffies;
 	mm_slot->next_scan = now + msecs_to_jiffies(scan_period);
 	mm_slot->scan_period = scan_period;
+	mm_slot->scan_size = scan_size;
 	mm_slot->scan_delta = total;
 }
 
 static unsigned long kmmscand_scan_mm_slot(void)
 {
+	bool next_mm = false;
 	bool update_mmslot_info = false;
 
 	unsigned int mm_slot_scan_period;
 	unsigned long now;
 	unsigned long mm_slot_next_scan;
+	unsigned long mm_slot_scan_size;
+	unsigned long scanned_size = 0;
 	unsigned long address;
 	unsigned long folio_nr_access_s, folio_nr_access_e, total = 0;
 
@@ -572,6 +605,7 @@ static unsigned long kmmscand_scan_mm_slot(void)
 
 	mm_slot_next_scan = mm_slot->next_scan;
 	mm_slot_scan_period = mm_slot->scan_period;
+	mm_slot_scan_size = mm_slot->scan_size;
 	mm = slot->mm;
 
 	spin_unlock(&kmmscand_mm_lock);
@@ -579,8 +613,10 @@ static unsigned long kmmscand_scan_mm_slot(void)
 	if (unlikely(!mmap_read_trylock(mm)))
 		goto outerloop_mmap_lock;
 
-	if (unlikely(kmmscand_test_exit(mm)))
+	if (unlikely(kmmscand_test_exit(mm))) {
+		next_mm = true;
 		goto outerloop;
+	}
 
 	now = jiffies;
 	/*
@@ -598,8 +634,20 @@ static unsigned long kmmscand_scan_mm_slot(void)
 	for_each_vma(vmi, vma) {
 		/* Count the scanned pages here to decide exit */
 		kmmscand_walk_page_vma(vma);
-
+		scanned_size += vma->vm_end - vma->vm_start;
 		address = vma->vm_end;
+
+		if (scanned_size >= mm_slot_scan_size) {
+			folio_nr_access_e = atomic_long_read(&mm->nr_accessed);
+			total = folio_nr_access_e - folio_nr_access_s;
+			/* If we had got accessed pages, ignore the current scan_size threshold */
+			if (total > KMMSCAND_IGNORE_SCAN_THR) {
+				mm_slot_scan_size = KMMSCAND_SCAN_SIZE_MAX;
+				continue;
+			}
+			next_mm = true;
+			break;
+		}
 	}
 	folio_nr_access_e = atomic_long_read(&mm->nr_accessed);
 	total = folio_nr_access_e - folio_nr_access_s;
@@ -627,7 +675,7 @@ static unsigned long kmmscand_scan_mm_slot(void)
 	 * Release the current mm_slot if this mm is about to die, or
 	 * if we scanned all vmas of this mm.
 	 */
-	if (unlikely(kmmscand_test_exit(mm)) || !vma) {
+	if (unlikely(kmmscand_test_exit(mm)) || !vma || next_mm) {
 		/*
 		 * Make sure that if mm_users is reaching zero while
 		 * kmmscand runs here, kmmscand_exit will find
@@ -711,6 +759,7 @@ void __kmmscand_enter(struct mm_struct *mm)
 
 	kmmscand_slot->address = 0;
 	kmmscand_slot->scan_period = kmmscand_mm_scan_period_ms;
+	kmmscand_slot->scan_size = kmmscand_scan_size;
 	kmmscand_slot->next_scan = 0;
 	kmmscand_slot->scan_delta = 0;
 
-- 
2.39.3


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ