[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220803100543.1653550-1-xu.xin16@zte.con.cn>
Date: Wed, 3 Aug 2022 10:05:43 +0000
From: cgel.zte@...il.com
To: akpm@...ux-foundation.org
Cc: hughd@...gle.com, izik.eidus@...ellosystems.com,
willy@...radead.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, xu xin <xu.xin16@....com.cn>,
CGEL <cgel.zte@...il.com>
Subject: [RFC PATCH 3/4] ksm: let ksmd work automatically with memory threshold
From: xu xin <xu.xin16@....com.cn>
When memory is sufficient, merging pages to save memory is not very
much needed, and it also inceases delays of COW for user application.
So set a memory threshold, when free memory is lower than the threshold,
ksmd will be triggered to compare and merge pages. And to avoid ping-pong
effect due to the threshold, ksmd needs to try to merge pages until free
memory is larger than (threshold + total_memory * 1/16).
Before free memory is lower than the threshold, ksmd will still scan pages
at a very low speed, to calculate their checksum but not to compare and
merge pages.
|
| ----(Threshold + total_memory/16)--------
| |
------Threshold------ |
| |
|_____ksmd try to merge pages__|
We also add a new sysfs klob auto_threshold_percent for user to be able
to tune.
Signed-off-by: xu xin <xu.xin16@....com.cn>
Signed-off-by: CGEL <cgel.zte@...il.com>
---
mm/ksm.c | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 92 insertions(+), 3 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index 8acc893e4d61..78819b56efec 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -294,6 +294,17 @@ static unsigned int ksm_max_scanning_factor = 32;
*/
static unsigned int scanning_factor = 1;
+/*
+ * Work in auto-mode.
+ * Default ksm_auto_threshold_percent: 20, means 20%. When free memory
+ * is lower than total memory * ksm_auto_threshold_percent/100, ksmd will
+ * be triggered to compare and merge pages.
+ */
+unsigned int ksm_auto_threshold_percent = 20;
+
+/* Work in auto-mode. Whether trigger ksmd to compare and merge pages */
+static bool auto_triggered;
+
#ifdef CONFIG_NUMA
/* Zeroed when merging across nodes is not allowed */
static unsigned int ksm_merge_across_nodes = 1;
@@ -2446,11 +2457,46 @@ static void ksm_do_scan(unsigned int scan_npages)
rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item)
return;
- cmp_and_merge_page(page, rmap_item);
+ if (ksm_run & KSM_RUN_AUTO && !auto_triggered) {
+ /*
+ * This should happens only when ksm_run is KSM_RUN_AUTO
+ * and free memory threshold still not reached.
+ * The reason to calculate it's checksum is to reduce the
+ * waiting time the rmap_item is added to unstable tree.
+ */
+ rmap_item->oldchecksum = calc_checksum(page);
+ } else
+ cmp_and_merge_page(page, rmap_item);
+
put_page(page);
}
}
+/* Work in auto mode, should reset auto_triggered ? */
+static bool should_untrigger_ksmd_to_merge(void)
+{
+ unsigned long total_ram_pages, free_pages;
+
+ total_ram_pages = totalram_pages();
+ free_pages = global_zone_page_state(NR_FREE_PAGES);
+
+ return free_pages > (total_ram_pages *
+ ksm_auto_threshold_percent / 100) +
+ (total_ram_pages >> 4);
+}
+
+/* Work in auto mode, should ksmd start to merge ? */
+static bool should_trigger_ksmd_to_merge(void)
+{
+ unsigned long total_ram_pages, free_pages;
+
+ total_ram_pages = totalram_pages();
+ free_pages = global_zone_page_state(NR_FREE_PAGES);
+
+ return free_pages < (total_ram_pages *
+ ksm_auto_threshold_percent / 100);
+}
+
static int ksmd_should_run(void)
{
if (!list_empty(&ksm_mm_head.mm_list))
@@ -2469,8 +2515,18 @@ static int ksm_scan_thread(void *nothing)
mutex_lock(&ksm_thread_mutex);
wait_while_offlining();
if (ksmd_should_run()) {
- if (ksm_run & KSM_RUN_AUTO)
- ksm_do_scan(ksm_thread_pages_to_scan * scanning_factor);
+ if (ksm_run & KSM_RUN_AUTO) {
+ /*
+ * If free memory is not lower than threshold, we only scan
+ * 5 pages and just calculate pages' checksum and not compare
+ * and merge them. Otherwise, do real scanning and merging as
+ * scanning-enhanced algorithm.
+ */
+ if (!auto_triggered)
+ ksm_do_scan(5);
+ else
+ ksm_do_scan(ksm_thread_pages_to_scan * scanning_factor);
+ }
else
ksm_do_scan(ksm_thread_pages_to_scan);
}
@@ -2491,6 +2547,11 @@ static int ksm_scan_thread(void *nothing)
reset_scanning_factor();
ksm_scan.new_ksmpages_of_this_scanning = 0;
+
+ if (should_trigger_ksmd_to_merge())
+ auto_triggered = true;
+ else if (should_untrigger_ksmd_to_merge())
+ auto_triggered = false;
}
try_to_freeze();
@@ -3035,6 +3096,32 @@ static ssize_t max_scanning_factor_store(struct kobject *kobj,
}
KSM_ATTR(max_scanning_factor);
+static ssize_t auto_threshold_percent_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", ksm_auto_threshold_percent);
+}
+
+static ssize_t auto_threshold_percent_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ if (value > 100)
+ return -EINVAL;
+
+ ksm_auto_threshold_percent = value;
+
+ return count;
+}
+KSM_ATTR(auto_threshold_percent);
+
#ifdef CONFIG_NUMA
static ssize_t merge_across_nodes_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -3246,6 +3333,7 @@ static struct attribute *ksm_attrs[] = {
&pages_to_scan_attr.attr,
&run_attr.attr,
&max_scanning_factor_attr.attr,
+ &auto_threshold_percent_attr.attr,
&pages_shared_attr.attr,
&pages_sharing_attr.attr,
&pages_unshared_attr.attr,
@@ -3277,6 +3365,7 @@ static int __init ksm_init(void)
zero_checksum = calc_checksum(ZERO_PAGE(0));
/* Default to false for backwards compatibility */
ksm_use_zero_pages = false;
+ auto_triggered = false;
err = ksm_slab_init();
if (err)
--
2.25.1
Powered by blists - more mailing lists