[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250407234223.1059191-6-nphamcs@gmail.com>
Date: Mon, 7 Apr 2025 16:42:06 -0700
From: Nhat Pham <nphamcs@...il.com>
To: linux-mm@...ck.org
Cc: akpm@...ux-foundation.org,
hannes@...xchg.org,
hughd@...gle.com,
yosry.ahmed@...ux.dev,
mhocko@...nel.org,
roman.gushchin@...ux.dev,
shakeel.butt@...ux.dev,
muchun.song@...ux.dev,
len.brown@...el.com,
chengming.zhou@...ux.dev,
kasong@...cent.com,
chrisl@...nel.org,
huang.ying.caritas@...il.com,
ryan.roberts@....com,
viro@...iv.linux.org.uk,
baohua@...nel.org,
osalvador@...e.de,
lorenzo.stoakes@...cle.com,
christophe.leroy@...roup.eu,
pavel@...nel.org,
kernel-team@...a.com,
linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org,
linux-pm@...r.kernel.org
Subject: [RFC PATCH 05/14] zswap: unify zswap tree for virtualized swap
Similar to swap cache, the zswap tree code, specifically the range
partition logic, can no longer easily be reused for the new virtual swap
space design. Use a simple unified zswap tree in the new implementation
for now. As in the case of swap cache, range partitioning is planned as
a follow up work.
Signed-off-by: Nhat Pham <nphamcs@...il.com>
---
mm/zswap.c | 38 ++++++++++++++++++++++++++++++++------
1 file changed, 32 insertions(+), 6 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 23365e76a3ce..c1327569ce80 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -203,8 +203,6 @@ struct zswap_entry {
struct list_head lru;
};
-static struct xarray *zswap_trees[MAX_SWAPFILES];
-static unsigned int nr_zswap_trees[MAX_SWAPFILES];
/* RCU-protected iteration */
static LIST_HEAD(zswap_pools);
@@ -231,12 +229,28 @@ static bool zswap_has_pool;
* helpers and fwd declarations
**********************************/
+#ifdef CONFIG_VIRTUAL_SWAP
+static DEFINE_XARRAY(zswap_tree);
+
+static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
+{
+ return &zswap_tree;
+}
+
+#define zswap_tree_index(entry) entry.val
+#else
+static struct xarray *zswap_trees[MAX_SWAPFILES];
+static unsigned int nr_zswap_trees[MAX_SWAPFILES];
+
static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
{
return &zswap_trees[swp_type(swp)][swp_offset(swp)
>> SWAP_ADDRESS_SPACE_SHIFT];
}
+#define zswap_tree_index(entry) swp_offset(entry)
+#endif
+
#define zswap_pool_debug(msg, p) \
pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
zpool_get_type((p)->zpool))
@@ -1047,7 +1061,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
swp_entry_t swpentry)
{
struct xarray *tree;
- pgoff_t offset = swp_offset(swpentry);
+ pgoff_t offset = zswap_tree_index(swpentry);
struct folio *folio;
struct mempolicy *mpol;
bool folio_was_allocated;
@@ -1463,7 +1477,7 @@ static bool zswap_store_page(struct page *page,
goto compress_failed;
old = xa_store(swap_zswap_tree(page_swpentry),
- swp_offset(page_swpentry),
+ zswap_tree_index(page_swpentry),
entry, GFP_KERNEL);
if (xa_is_err(old)) {
int err = xa_err(old);
@@ -1612,7 +1626,7 @@ bool zswap_store(struct folio *folio)
bool zswap_load(struct folio *folio)
{
swp_entry_t swp = folio->swap;
- pgoff_t offset = swp_offset(swp);
+ pgoff_t offset = zswap_tree_index(swp);
bool swapcache = folio_test_swapcache(folio);
struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
@@ -1670,7 +1684,7 @@ bool zswap_load(struct folio *folio)
void zswap_invalidate(swp_entry_t swp)
{
- pgoff_t offset = swp_offset(swp);
+ pgoff_t offset = zswap_tree_index(swp);
struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
@@ -1682,6 +1696,16 @@ void zswap_invalidate(swp_entry_t swp)
zswap_entry_free(entry);
}
+#ifdef CONFIG_VIRTUAL_SWAP
+int zswap_swapon(int type, unsigned long nr_pages)
+{
+ return 0;
+}
+
+void zswap_swapoff(int type)
+{
+}
+#else
int zswap_swapon(int type, unsigned long nr_pages)
{
struct xarray *trees, *tree;
@@ -1718,6 +1742,8 @@ void zswap_swapoff(int type)
nr_zswap_trees[type] = 0;
zswap_trees[type] = NULL;
}
+#endif /* CONFIG_VIRTUAL_SWAP */
+
/*********************************
* debugfs functions
--
2.47.1
Powered by blists - more mailing lists