[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240408183946.2991168-2-ryan.roberts@arm.com>
Date: Mon, 8 Apr 2024 19:39:40 +0100
From: Ryan Roberts <ryan.roberts@....com>
To: Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Huang Ying <ying.huang@...el.com>,
Gao Xiang <xiang@...nel.org>,
Yu Zhao <yuzhao@...gle.com>,
Yang Shi <shy828301@...il.com>,
Michal Hocko <mhocko@...e.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>,
Barry Song <21cnbao@...il.com>,
Chris Li <chrisl@...nel.org>,
Lance Yang <ioworker0@...il.com>
Cc: Ryan Roberts <ryan.roberts@....com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v7 1/7] mm: swap: Remove CLUSTER_FLAG_HUGE from swap_cluster_info:flags
As preparation for supporting small-sized THP in the swap-out path,
without first needing to split to order-0, Remove the CLUSTER_FLAG_HUGE,
which, when present, always implies PMD-sized THP, which is the same as
the cluster size.
The only use of the flag was to determine whether a swap entry refers to
a single page or a PMD-sized THP in swap_page_trans_huge_swapped().
Instead of relying on the flag, we now pass in order, which originates
from the folio's order. This allows the logic to work for folios of any
order.
The one snag is that one of the swap_page_trans_huge_swapped() call
sites does not have the folio. But it was only being called there to
shortcut a call __try_to_reclaim_swap() in some cases.
__try_to_reclaim_swap() gets the folio and (via some other functions)
calls swap_page_trans_huge_swapped(). So I've removed the problematic
call site and believe the new logic should be functionally equivalent.
That said, removing the fast path means that we will take a reference
and trylock a large folio much more often, which we would like to avoid.
The next patch will solve this.
Removing CLUSTER_FLAG_HUGE also means we can remove split_swap_cluster()
which used to be called during folio splitting, since
split_swap_cluster()'s only job was to remove the flag.
Reviewed-by: "Huang, Ying" <ying.huang@...el.com>
Acked-by: Chris Li <chrisl@...nel.org>
Acked-by: David Hildenbrand <david@...hat.com>
Signed-off-by: Ryan Roberts <ryan.roberts@....com>
---
include/linux/swap.h | 10 ----------
mm/huge_memory.c | 3 ---
mm/swapfile.c | 47 ++++++++------------------------------------
3 files changed, 8 insertions(+), 52 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a211a0383425..f6f78198f000 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -259,7 +259,6 @@ struct swap_cluster_info {
};
#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
-#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
/*
* We assign a cluster to each CPU, so each CPU can allocate swap entry from
@@ -590,15 +589,6 @@ static inline int add_swap_extent(struct swap_info_struct *sis,
}
#endif /* CONFIG_SWAP */
-#ifdef CONFIG_THP_SWAP
-extern int split_swap_cluster(swp_entry_t entry);
-#else
-static inline int split_swap_cluster(swp_entry_t entry)
-{
- return 0;
-}
-#endif
-
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b106baec7260..5b875f0fc923 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2892,9 +2892,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
shmem_uncharge(folio->mapping->host, nr_dropped);
remap_page(folio, nr);
- if (folio_test_swapcache(folio))
- split_swap_cluster(folio->swap);
-
/*
* set page to its compound_head when split to non order-0 pages, so
* we can skip unlocking it below, since PG_locked is transferred to
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5e6d2304a2a4..1ded6d1dcab4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -343,18 +343,6 @@ static inline void cluster_set_null(struct swap_cluster_info *info)
info->data = 0;
}
-static inline bool cluster_is_huge(struct swap_cluster_info *info)
-{
- if (IS_ENABLED(CONFIG_THP_SWAP))
- return info->flags & CLUSTER_FLAG_HUGE;
- return false;
-}
-
-static inline void cluster_clear_huge(struct swap_cluster_info *info)
-{
- info->flags &= ~CLUSTER_FLAG_HUGE;
-}
-
static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
unsigned long offset)
{
@@ -1027,7 +1015,7 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
offset = idx * SWAPFILE_CLUSTER;
ci = lock_cluster(si, offset);
alloc_cluster(si, idx);
- cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
+ cluster_set_count(ci, SWAPFILE_CLUSTER);
memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
unlock_cluster(ci);
@@ -1365,7 +1353,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
ci = lock_cluster_or_swap_info(si, offset);
if (size == SWAPFILE_CLUSTER) {
- VM_BUG_ON(!cluster_is_huge(ci));
map = si->swap_map + offset;
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
val = map[i];
@@ -1373,7 +1360,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
if (val == SWAP_HAS_CACHE)
free_entries++;
}
- cluster_clear_huge(ci);
if (free_entries == SWAPFILE_CLUSTER) {
unlock_cluster_or_swap_info(si, ci);
spin_lock(&si->lock);
@@ -1395,23 +1381,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
unlock_cluster_or_swap_info(si, ci);
}
-#ifdef CONFIG_THP_SWAP
-int split_swap_cluster(swp_entry_t entry)
-{
- struct swap_info_struct *si;
- struct swap_cluster_info *ci;
- unsigned long offset = swp_offset(entry);
-
- si = _swap_info_get(entry);
- if (!si)
- return -EBUSY;
- ci = lock_cluster(si, offset);
- cluster_clear_huge(ci);
- unlock_cluster(ci);
- return 0;
-}
-#endif
-
static int swp_entry_cmp(const void *ent1, const void *ent2)
{
const swp_entry_t *e1 = ent1, *e2 = ent2;
@@ -1519,22 +1488,23 @@ int swp_swapcount(swp_entry_t entry)
}
static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
- swp_entry_t entry)
+ swp_entry_t entry, int order)
{
struct swap_cluster_info *ci;
unsigned char *map = si->swap_map;
+ unsigned int nr_pages = 1 << order;
unsigned long roffset = swp_offset(entry);
- unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
+ unsigned long offset = round_down(roffset, nr_pages);
int i;
bool ret = false;
ci = lock_cluster_or_swap_info(si, offset);
- if (!ci || !cluster_is_huge(ci)) {
+ if (!ci || nr_pages == 1) {
if (swap_count(map[roffset]))
ret = true;
goto unlock_out;
}
- for (i = 0; i < SWAPFILE_CLUSTER; i++) {
+ for (i = 0; i < nr_pages; i++) {
if (swap_count(map[offset + i])) {
ret = true;
break;
@@ -1556,7 +1526,7 @@ static bool folio_swapped(struct folio *folio)
if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
return swap_swapcount(si, entry) != 0;
- return swap_page_trans_huge_swapped(si, entry);
+ return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
}
/**
@@ -1622,8 +1592,7 @@ int free_swap_and_cache(swp_entry_t entry)
}
count = __swap_entry_free(p, entry);
- if (count == SWAP_HAS_CACHE &&
- !swap_page_trans_huge_swapped(p, entry))
+ if (count == SWAP_HAS_CACHE)
__try_to_reclaim_swap(p, swp_offset(entry),
TTRS_UNMAPPED | TTRS_FULL);
put_swap_device(p);
--
2.25.1
Powered by blists - more mailing lists