[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240405053510.1948982-3-yosryahmed@google.com>
Date: Fri, 5 Apr 2024 05:35:07 +0000
From: Yosry Ahmed <yosryahmed@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>, Nhat Pham <nphamcs@...il.com>,
Chengming Zhou <chengming.zhou@...ux.dev>, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Yosry Ahmed <yosryahmed@...gle.com>
Subject: [PATCH v2 2/5] mm: zswap: calculate limits only when updated
Currently, we calculate the zswap global limit, and potentially the
acceptance threshold in the zswap, in pages in the zswap store path.
This is unnecessary because the values rarely change.
Instead, precalculate the them when the module parameters are updated,
which should be rare. Since we are adding custom handlers for setting
the percentages now, add proper validation that they are <= 100.
Suggested-by: Johannes Weiner <hannes@...xchg.org>
Signed-off-by: Yosry Ahmed <yosryahmed@...gle.com>
---
mm/zswap.c | 86 ++++++++++++++++++++++++++++++++++++++++++++----------
1 file changed, 70 insertions(+), 16 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 1cf3ab4b22e64..832e3f56232f0 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -116,12 +116,29 @@ module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
/* The maximum percentage of memory that the compressed pool can occupy */
static unsigned int zswap_max_pool_percent = 20;
-module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
+static unsigned long zswap_max_pages;
+
+static int zswap_max_pool_param_set(const char *,
+ const struct kernel_param *);
+static const struct kernel_param_ops zswap_max_pool_param_ops = {
+ .set = zswap_max_pool_param_set,
+ .get = param_get_uint,
+};
+module_param_cb(max_pool_percent, &zswap_max_pool_param_ops,
+ &zswap_max_pool_percent, 0644);
/* The threshold for accepting new pages after the max_pool_percent was hit */
static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
-module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
- uint, 0644);
+unsigned long zswap_accept_thr_pages;
+
+static int zswap_accept_thr_param_set(const char *,
+ const struct kernel_param *);
+static const struct kernel_param_ops zswap_accept_thr_param_ops = {
+ .set = zswap_accept_thr_param_set,
+ .get = param_get_uint,
+};
+module_param_cb(accept_threshold_percent, &zswap_accept_thr_param_ops,
+ &zswap_accept_thr_percent, 0644);
/*
* Enable/disable handling same-value filled pages (enabled by default).
@@ -490,14 +507,16 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
return NULL;
}
-static unsigned long zswap_max_pages(void)
+static void zswap_update_max_pages(void)
{
- return totalram_pages() * zswap_max_pool_percent / 100;
+ WRITE_ONCE(zswap_max_pages,
+ totalram_pages() * zswap_max_pool_percent / 100);
}
-static unsigned long zswap_accept_thr_pages(void)
+static void zswap_update_accept_thr_pages(void)
{
- return zswap_max_pages() * zswap_accept_thr_percent / 100;
+ WRITE_ONCE(zswap_accept_thr_pages,
+ READ_ONCE(zswap_max_pages) * zswap_accept_thr_percent / 100);
}
unsigned long zswap_total_pages(void)
@@ -684,6 +703,43 @@ static int zswap_enabled_param_set(const char *val,
return ret;
}
+static int __zswap_percent_param_set(const char *val,
+ const struct kernel_param *kp)
+{
+ unsigned int n;
+ int ret;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret || n > 100)
+ return -EINVAL;
+
+ return param_set_uint(val, kp);
+}
+
+static int zswap_max_pool_param_set(const char *val,
+ const struct kernel_param *kp)
+{
+ int err = __zswap_percent_param_set(val, kp);
+
+ if (!err) {
+ zswap_update_max_pages();
+ zswap_update_accept_thr_pages();
+ }
+
+ return err;
+}
+
+static int zswap_accept_thr_param_set(const char *val,
+ const struct kernel_param *kp)
+{
+ int err = __zswap_percent_param_set(val, kp);
+
+ if (!err)
+ zswap_update_accept_thr_pages();
+
+ return err;
+}
+
/*********************************
* lru functions
**********************************/
@@ -1305,10 +1361,6 @@ static void shrink_worker(struct work_struct *w)
{
struct mem_cgroup *memcg;
int ret, failures = 0;
- unsigned long thr;
-
- /* Reclaim down to the accept threshold */
- thr = zswap_accept_thr_pages();
/* global reclaim will select cgroup in a round-robin fashion. */
do {
@@ -1358,7 +1410,8 @@ static void shrink_worker(struct work_struct *w)
break;
resched:
cond_resched();
- } while (zswap_total_pages() > thr);
+ /* Reclaim down to the accept threshold */
+ } while (zswap_total_pages() > READ_ONCE(zswap_accept_thr_pages));
}
static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
@@ -1424,16 +1477,14 @@ bool zswap_store(struct folio *folio)
/* Check global limits */
cur_pages = zswap_total_pages();
- max_pages = zswap_max_pages();
-
- if (cur_pages >= max_pages) {
+ if (cur_pages >= READ_ONCE(zswap_max_pages)) {
zswap_pool_limit_hit++;
zswap_pool_reached_full = true;
goto reject;
}
if (zswap_pool_reached_full) {
- if (cur_pages > zswap_accept_thr_pages())
+ if (cur_pages > READ_ONCE(zswap_accept_thr_pages))
goto reject;
else
zswap_pool_reached_full = false;
@@ -1734,6 +1785,9 @@ static int zswap_setup(void)
zswap_enabled = false;
}
+ zswap_update_max_pages();
+ zswap_update_accept_thr_pages();
+
if (zswap_debugfs_init())
pr_warn("debugfs initialization failed\n");
zswap_init_state = ZSWAP_INIT_SUCCEED;
--
2.44.0.478.gd926399ef9-goog
Powered by blists - more mailing lists