[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251205231721.104505-9-mfo@igalia.com>
Date: Fri, 5 Dec 2025 20:17:20 -0300
From: Mauricio Faria de Oliveira <mfo@...lia.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...nel.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Michal Hocko <mhocko@...e.com>,
Vlastimil Babka <vbabka@...e.cz>,
Oscar Salvador <osalvador@...e.de>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
kernel-dev@...lia.com
Subject: [PATCH RFC 8/9] mm: call arch-specific swap hooks from generic swap hooks
Add generic swap hooks that just call the arch-specific ones (e.g., arm64).
Signed-off-by: Mauricio Faria de Oliveira <mfo@...lia.com>
---
include/linux/pgtable.h | 20 ++++++++++++++++++++
mm/memory.c | 2 +-
mm/page_io.c | 2 +-
mm/shmem.c | 2 +-
mm/swapfile.c | 6 +++---
5 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 652f287c1ef6..23d30afe439e 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1220,6 +1220,26 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
}
#endif
+static inline int hook_prepare_to_swap(struct folio *folio)
+{
+ return arch_prepare_to_swap(folio);
+}
+
+static inline void hook_swap_invalidate_page(int type, pgoff_t offset)
+{
+ arch_swap_invalidate_page(type, offset);
+}
+
+static inline void hook_swap_invalidate_area(int type)
+{
+ arch_swap_invalidate_area(type);
+}
+
+static inline void hook_swap_restore(swp_entry_t entry, struct folio *folio)
+{
+ arch_swap_restore(entry, folio);
+}
+
#ifndef __HAVE_ARCH_MOVE_PTE
#define move_pte(pte, old_addr, new_addr) (pte)
#endif
diff --git a/mm/memory.c b/mm/memory.c
index 2a55edc48a65..6a5a5bd3eeed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4945,7 +4945,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* when reading from swap. This metadata may be indexed by swap entry
* so this must be called before swap_free().
*/
- arch_swap_restore(folio_swap(entry, folio), folio);
+ hook_swap_restore(folio_swap(entry, folio), folio);
/*
* Remove the swap entry and conditionally try to free up the swapcache.
diff --git a/mm/page_io.c b/mm/page_io.c
index 3c342db77ce3..b62c53a86a59 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -248,7 +248,7 @@ int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug)
* Arch code may have to preserve more data than just the page
* contents, e.g. memory tags.
*/
- ret = arch_prepare_to_swap(folio);
+ ret = hook_prepare_to_swap(folio);
if (ret) {
folio_mark_dirty(folio);
goto out_unlock;
diff --git a/mm/shmem.c b/mm/shmem.c
index 3f194c9842a8..ef53c14156fb 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2395,7 +2395,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
* Some architectures may have to restore extra metadata to the
* folio after reading from swap.
*/
- arch_swap_restore(folio_swap(swap, folio), folio);
+ hook_swap_restore(folio_swap(swap, folio), folio);
if (shmem_should_replace_folio(folio, gfp)) {
error = shmem_replace_folio(&folio, gfp, info, index, vma);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 46d2008e4b99..e156f5cb6e9f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1270,7 +1270,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
else
swap_slot_free_notify = NULL;
while (offset <= end) {
- arch_swap_invalidate_page(si->type, offset);
+ hook_swap_invalidate_page(si->type, offset);
if (swap_slot_free_notify)
swap_slot_free_notify(si->bdev, offset);
offset++;
@@ -2196,7 +2196,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
* when reading from swap. This metadata may be indexed by swap entry
* so this must be called before swap_free().
*/
- arch_swap_restore(folio_swap(entry, folio), folio);
+ hook_swap_restore(folio_swap(entry, folio), folio);
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
@@ -2931,7 +2931,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->cluster_info = NULL;
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
- arch_swap_invalidate_area(p->type);
+ hook_swap_invalidate_area(p->type);
zswap_swapoff(p->type);
mutex_unlock(&swapon_mutex);
kfree(p->global_cluster);
--
2.51.0
Powered by blists - more mailing lists