[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240405053510.1948982-5-yosryahmed@google.com>
Date: Fri, 5 Apr 2024 05:35:09 +0000
From: Yosry Ahmed <yosryahmed@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>, Nhat Pham <nphamcs@...il.com>,
Chengming Zhou <chengming.zhou@...ux.dev>, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Yosry Ahmed <yosryahmed@...gle.com>
Subject: [PATCH v2 4/5] mm: zswap: move more same-filled pages checks outside
of zswap_store()
Currently, zswap_store() checks zswap_same_filled_pages_enabled, kmaps
the folio, then calls zswap_is_page_same_filled() to check the folio
contents. Move this logic into zswap_is_page_same_filled() as well (and
rename it to use 'folio' while we are at it).
This makes zswap_store() cleaner, and makes following changes to that
logic contained within the helper. While we are at it, rename the
insert_entry label to store_entry to match xa_store().
No functional change intended.
Signed-off-by: Yosry Ahmed <yosryahmed@...gle.com>
Reviewed-by: Nhat Pham <nphamcs@...il.com>
Reviewed-by: Chengming Zhou <chengming.zhou@...ux.dev>
Acked-by: Johannes Weiner <hannes@...xchg.org>
---
mm/zswap.c | 39 +++++++++++++++++++--------------------
1 file changed, 19 insertions(+), 20 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index ab3cd43cdfc9d..13869d18c13bd 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1414,26 +1414,32 @@ static void shrink_worker(struct work_struct *w)
} while (zswap_total_pages() > READ_ONCE(zswap_accept_thr_pages));
}
-static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
+static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
{
unsigned long *page;
unsigned long val;
unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
+ bool ret = false;
- page = (unsigned long *)ptr;
+ if (!zswap_same_filled_pages_enabled)
+ return false;
+
+ page = kmap_local_folio(folio, 0);
val = page[0];
if (val != page[last_pos])
- return 0;
+ goto out;
for (pos = 1; pos < last_pos; pos++) {
if (val != page[pos])
- return 0;
+ goto out;
}
*value = val;
-
- return 1;
+ ret = true;
+out:
+ kunmap_local(page);
+ return ret;
}
static void zswap_fill_page(void *ptr, unsigned long value)
@@ -1466,6 +1472,7 @@ bool zswap_store(struct folio *folio)
struct zswap_entry *entry, *old;
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg = NULL;
+ unsigned long value;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
@@ -1498,19 +1505,11 @@ bool zswap_store(struct folio *folio)
goto reject;
}
- if (zswap_same_filled_pages_enabled) {
- unsigned long value;
- u8 *src;
-
- src = kmap_local_folio(folio, 0);
- if (zswap_is_page_same_filled(src, &value)) {
- kunmap_local(src);
- entry->length = 0;
- entry->value = value;
- atomic_inc(&zswap_same_filled_pages);
- goto insert_entry;
- }
- kunmap_local(src);
+ if (zswap_is_folio_same_filled(folio, &value)) {
+ entry->length = 0;
+ entry->value = value;
+ atomic_inc(&zswap_same_filled_pages);
+ goto store_entry;
}
if (!zswap_non_same_filled_pages_enabled)
@@ -1533,7 +1532,7 @@ bool zswap_store(struct folio *folio)
if (!zswap_compress(folio, entry))
goto put_pool;
-insert_entry:
+store_entry:
entry->swpentry = swp;
entry->objcg = objcg;
--
2.44.0.478.gd926399ef9-goog
Powered by blists - more mailing lists