[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250624055617.1291159-8-raghavendra.kt@amd.com>
Date: Tue, 24 Jun 2025 05:56:11 +0000
From: Raghavendra K T <raghavendra.kt@....com>
To: <raghavendra.kt@....com>
CC: <AneeshKumar.KizhakeVeetil@....com>, <Hasan.Maruf@....com>,
<Michael.Day@....com>, <akpm@...ux-foundation.org>, <bharata@....com>,
<dave.hansen@...el.com>, <david@...hat.com>, <dongjoo.linux.dev@...il.com>,
<feng.tang@...el.com>, <gourry@...rry.net>, <hannes@...xchg.org>,
<honggyu.kim@...com>, <hughd@...gle.com>, <jhubbard@...dia.com>,
<jon.grimm@....com>, <k.shutemov@...il.com>, <kbusch@...a.com>,
<kmanaouil.dev@...il.com>, <leesuyeon0506@...il.com>, <leillc@...gle.com>,
<liam.howlett@...cle.com>, <linux-kernel@...r.kernel.org>,
<linux-mm@...ck.org>, <mgorman@...hsingularity.net>, <mingo@...hat.com>,
<nadav.amit@...il.com>, <nphamcs@...il.com>, <peterz@...radead.org>,
<riel@...riel.com>, <rientjes@...gle.com>, <rppt@...nel.org>,
<santosh.shukla@....com>, <shivankg@....com>, <shy828301@...il.com>,
<sj@...nel.org>, <vbabka@...e.cz>, <weixugc@...gle.com>,
<willy@...radead.org>, <ying.huang@...ux.alibaba.com>, <ziy@...dia.com>,
<Jonathan.Cameron@...wei.com>, <dave@...olabs.net>, <yuanchu@...gle.com>,
<kinseyho@...gle.com>, <hdanton@...a.com>
Subject: [RFC PATCH V2 07/13] mm: Add throttling of mm scanning using scan_size
Before this patch, scanning is done on entire virtual address space
of all the tasks. Now the scan size is shrunk or expanded based on the
useful pages found in the last scan.
This helps to quickly get out of unnecessary scanning thus burning
lesser CPU.
Drawback: If a useful chunk is at the other end of the VMA space, it
will delay scanning and migration.
Shrink/expand algorithm for scan_size:
X : Number of useful pages in the last scan.
Y : Number of useful pages found in current scan.
Initial scan_size is 1GB
case 1: (X = 0, Y = 0)
Decrease scan_size by 2
case 2: (X = 0, Y > 0)
Aggressively change to MAX (4GB)
case 3: (X > 0, Y = 0 )
No change
case 4: (X > 0, Y > 0)
Increase scan_size by 2
Scan size is clamped between MIN (256MB) and MAX (4GB)).
TBD: Tuning based on real workloads
Signed-off-by: Raghavendra K T <raghavendra.kt@....com>
---
mm/kscand.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/mm/kscand.c b/mm/kscand.c
index fca4b7b4a81f..26b40865d3e5 100644
--- a/mm/kscand.c
+++ b/mm/kscand.c
@@ -28,10 +28,15 @@
static struct task_struct *kscand_thread __read_mostly;
static DEFINE_MUTEX(kscand_mutex);
+
/*
* Total VMA size to cover during scan.
+ * Min: 256MB default: 1GB max: 4GB
*/
+#define KSCAND_SCAN_SIZE_MIN (256 * 1024 * 1024UL)
+#define KSCAND_SCAN_SIZE_MAX (4 * 1024 * 1024 * 1024UL)
#define KSCAND_SCAN_SIZE (1 * 1024 * 1024 * 1024UL)
+
static unsigned long kscand_scan_size __read_mostly = KSCAND_SCAN_SIZE;
/*
@@ -94,6 +99,8 @@ struct kscand_mm_slot {
unsigned long next_scan;
/* Tracks how many useful pages obtained for migration in the last scan */
unsigned long scan_delta;
+ /* Determines how much VMA address space to be covered in the scanning */
+ unsigned long scan_size;
long address;
bool is_scanned;
};
@@ -744,6 +751,8 @@ static void kmigrated_migrate_folio(void)
*/
#define KSCAND_IGNORE_SCAN_THR 256
+#define SCAN_SIZE_CHANGE_SHIFT 1
+
/* Maintains stability of scan_period by decaying last time accessed pages */
#define SCAN_DECAY_SHIFT 4
/*
@@ -759,14 +768,26 @@ static void kmigrated_migrate_folio(void)
* Increase scan_period by (2 << SCAN_PERIOD_CHANGE_SCALE).
* case 4: (X > 0, Y > 0)
* Decrease scan_period by SCAN_PERIOD_TUNE_PERCENT.
+ * Tuning scan_size:
+ * Initial scan_size is 4GB
+ * case 1: (X = 0, Y = 0)
+ * Decrease scan_size by (1 << SCAN_SIZE_CHANGE_SHIFT).
+ * case 2: (X = 0, Y > 0)
+ * scan_size = KSCAND_SCAN_SIZE_MAX
+ * case 3: (X > 0, Y = 0 )
+ * No change
+ * case 4: (X > 0, Y > 0)
+ * Increase scan_size by (1 << SCAN_SIZE_CHANGE_SHIFT).
*/
static inline void kscand_update_mmslot_info(struct kscand_mm_slot *mm_slot,
unsigned long total)
{
unsigned int scan_period;
unsigned long now;
+ unsigned long scan_size;
unsigned long old_scan_delta;
+ scan_size = mm_slot->scan_size;
scan_period = mm_slot->scan_period;
old_scan_delta = mm_slot->scan_delta;
@@ -787,20 +808,25 @@ static inline void kscand_update_mmslot_info(struct kscand_mm_slot *mm_slot,
if (!old_scan_delta && !total) {
scan_period = (100 + SCAN_PERIOD_TUNE_PERCENT) * scan_period;
scan_period /= 100;
+ scan_size = scan_size >> SCAN_SIZE_CHANGE_SHIFT;
} else if (old_scan_delta && total) {
scan_period = (100 - SCAN_PERIOD_TUNE_PERCENT) * scan_period;
scan_period /= 100;
+ scan_size = scan_size << SCAN_SIZE_CHANGE_SHIFT;
} else if (old_scan_delta && !total) {
scan_period = scan_period << SCAN_PERIOD_CHANGE_SCALE;
} else {
scan_period = scan_period >> SCAN_PERIOD_CHANGE_SCALE;
+ scan_size = KSCAND_SCAN_SIZE_MAX;
}
scan_period = clamp(scan_period, KSCAND_SCAN_PERIOD_MIN, KSCAND_SCAN_PERIOD_MAX);
+ scan_size = clamp(scan_size, KSCAND_SCAN_SIZE_MIN, KSCAND_SCAN_SIZE_MAX);
now = jiffies;
mm_slot->next_scan = now + msecs_to_jiffies(scan_period);
mm_slot->scan_period = scan_period;
+ mm_slot->scan_size = scan_size;
mm_slot->scan_delta = total;
}
@@ -812,6 +838,7 @@ static unsigned long kscand_scan_mm_slot(void)
unsigned int mm_slot_scan_period;
unsigned long now;
unsigned long mm_slot_next_scan;
+ unsigned long mm_slot_scan_size;
unsigned long vma_scanned_size = 0;
unsigned long address;
unsigned long total = 0;
@@ -841,6 +868,7 @@ static unsigned long kscand_scan_mm_slot(void)
mm_slot->is_scanned = true;
mm_slot_next_scan = mm_slot->next_scan;
mm_slot_scan_period = mm_slot->scan_period;
+ mm_slot_scan_size = mm_slot->scan_size;
spin_unlock(&kscand_mm_lock);
if (unlikely(!mmap_read_trylock(mm)))
@@ -992,6 +1020,7 @@ void __kscand_enter(struct mm_struct *mm)
kscand_slot->address = 0;
kscand_slot->scan_period = kscand_mm_scan_period_ms;
+ kscand_slot->scan_size = kscand_scan_size;
kscand_slot->next_scan = 0;
kscand_slot->scan_delta = 0;
--
2.34.1
Powered by blists - more mailing lists