[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250318150614.6415-7-shikemeng@huaweicloud.com>
Date: Tue, 18 Mar 2025 23:06:12 +0800
From: Kemeng Shi <shikemeng@...weicloud.com>
To: akpm@...ux-foundation.org
Cc: tim.c.chen@...ux.intel.com,
ryncsn@...il.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 6/8] mm: swap: free each cluster individually in swap_entries_put_map_nr()
1. Factor out general swap_entries_put_map() helper to drop entries belong
to one cluster. If entries are last map, free entries in batch, otherwise
put entries with cluster lock acquired and released only once.
2. Iterate and call swap_entries_put_map() for each cluster in
swap_entries_put_nr() to leverage batch-remove for last map belong to one
cluster and reduce lock acquire/release in fallback case.
3. As swap_entries_put_nr() won't handle SWAP_HSA_CACHE drop, rename it to
swap_entries_put_map_nr().
4. As we won't drop each entry invidually with swap_entry_put() now, do
reclaim in free_swap_and_cache_nr() is because swap_entries_put_map_nr()
is general routine to drop reference and the relcaim work should only be
done in free_swap_and_cache_nr(). Remove stale comment accordingly.
Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
---
mm/swapfile.c | 70 +++++++++++++++++++++++----------------------------
1 file changed, 32 insertions(+), 38 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 134b061ef1ae..370509eb2f1f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1455,25 +1455,10 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry)
return NULL;
}
-static unsigned char swap_entry_put(struct swap_info_struct *si,
- swp_entry_t entry)
-{
- struct swap_cluster_info *ci;
- unsigned long offset = swp_offset(entry);
- unsigned char usage;
-
- ci = lock_cluster(si, offset);
- usage = swap_entry_put_locked(si, ci, entry, 1);
- unlock_cluster(ci);
-
- return usage;
-}
-
-static bool swap_entries_put_nr(struct swap_info_struct *si,
- swp_entry_t entry, int nr)
+static bool swap_entries_put_map(struct swap_info_struct *si,
+ swp_entry_t entry, int nr)
{
unsigned long offset = swp_offset(entry);
- unsigned int type = swp_type(entry);
struct swap_cluster_info *ci;
bool has_cache = false;
unsigned char count;
@@ -1484,14 +1469,10 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
count = swap_count(data_race(si->swap_map[offset]));
if (count != 1 && count != SWAP_MAP_SHMEM)
goto fallback;
- /* cross into another cluster */
- if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
- goto fallback;
ci = lock_cluster(si, offset);
if (!swap_is_last_map(si, offset, nr, &has_cache)) {
- unlock_cluster(ci);
- goto fallback;
+ goto locked_fallback;
}
if (!has_cache)
swap_entries_free(si, ci, entry, nr);
@@ -1503,15 +1484,34 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
return has_cache;
fallback:
- for (i = 0; i < nr; i++) {
- if (data_race(si->swap_map[offset + i])) {
- count = swap_entry_put(si, swp_entry(type, offset + i));
- if (count == SWAP_HAS_CACHE)
- has_cache = true;
- } else {
- WARN_ON_ONCE(1);
- }
+ ci = lock_cluster(si, offset);
+locked_fallback:
+ for (i = 0; i < nr; i++, entry.val++) {
+ count = swap_entry_put_locked(si, ci, entry, 1);
+ if (count == SWAP_HAS_CACHE)
+ has_cache = true;
+ }
+ unlock_cluster(ci);
+ return has_cache;
+
+}
+
+static bool swap_entries_put_map_nr(struct swap_info_struct *si,
+ swp_entry_t entry, int nr)
+{
+ int cluster_nr, cluster_rest;
+ unsigned long offset = swp_offset(entry);
+ bool has_cache = false;
+
+ cluster_rest = SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER;
+ while (nr) {
+ cluster_nr = min(nr, cluster_rest);
+ has_cache |= swap_entries_put_map(si, entry, cluster_nr);
+ cluster_rest = SWAPFILE_CLUSTER;
+ nr -= cluster_nr;
+ entry.val += cluster_nr;
}
+
return has_cache;
}
@@ -1798,7 +1798,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
/*
* First free all entries in the range.
*/
- any_only_cache = swap_entries_put_nr(si, entry, nr);
+ any_only_cache = swap_entries_put_map_nr(si, entry, nr);
/*
* Short-circuit the below loop if none of the entries had their
@@ -1808,13 +1808,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
goto out;
/*
- * Now go back over the range trying to reclaim the swap cache. This is
- * more efficient for large folios because we will only try to reclaim
- * the swap once per folio in the common case. If we do
- * swap_entry_put() and __try_to_reclaim_swap() in the same loop, the
- * latter will get a reference and lock the folio for every individual
- * page but will only succeed once the swap slot for every subpage is
- * zero.
+ * Now go back over the range trying to reclaim the swap cache.
*/
for (offset = start_offset; offset < end_offset; offset += nr) {
nr = 1;
--
2.30.0
Powered by blists - more mailing lists