[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230823131350.114942-33-alexandru.elisei@arm.com>
Date: Wed, 23 Aug 2023 14:13:45 +0100
From: Alexandru Elisei <alexandru.elisei@....com>
To: catalin.marinas@....com, will@...nel.org, oliver.upton@...ux.dev,
maz@...nel.org, james.morse@....com, suzuki.poulose@....com,
yuzenghui@...wei.com, arnd@...db.de, akpm@...ux-foundation.org,
mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com, mhiramat@...nel.org,
rppt@...nel.org, hughd@...gle.com
Cc: pcc@...gle.com, steven.price@....com, anshuman.khandual@....com,
vincenzo.frascino@....com, david@...hat.com, eugenis@...gle.com,
kcc@...gle.com, hyesoo.yu@...sung.com,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
kvmarm@...ts.linux.dev, linux-fsdevel@...r.kernel.org,
linux-arch@...r.kernel.org, linux-mm@...ck.org,
linux-trace-kernel@...r.kernel.org
Subject: [PATCH RFC 32/37] mm: Call arch_swap_prepare_to_restore() before arch_swap_restore()
arch_swap_restore() allows an architecture to restore metadata before the
page is swapped in and it's called in atomic context (with the ptl lock
held). Introduce arch_swap_prepare_to_restore() to allow such architectures
to perform extra work in a blocking context.
Signed-off-by: Alexandru Elisei <alexandru.elisei@....com>
---
include/linux/pgtable.h | 7 +++++++
mm/memory.c | 11 +++++++++++
mm/shmem.c | 4 ++++
mm/swapfile.c | 4 ++++
4 files changed, 26 insertions(+)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 0119ffa2c0ab..0bce12f9eaab 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -816,6 +816,13 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
}
#endif
+#ifndef __HAVE_ARCH_SWAP_PREPARE_TO_RESTORE
+static inline int arch_swap_prepare_to_restore(swp_entry_t entry, struct folio *folio)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
#endif
diff --git a/mm/memory.c b/mm/memory.c
index 6c4a6151c7b2..5f7587109ac2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3724,6 +3724,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
swp_entry_t entry;
pte_t pte;
int locked;
+ int error;
vm_fault_t ret = 0;
void *shadow = NULL;
@@ -3892,6 +3893,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_throttle_swaprate(folio, GFP_KERNEL);
+ /*
+ * Some architecture may need to perform certain operations before
+ * arch_swap_restore() in preemptible context (like memory allocations).
+ */
+ error = arch_swap_prepare_to_restore(entry, folio);
+ if (error) {
+ ret = VM_FAULT_ERROR;
+ goto out_page;
+ }
+
/*
* Back out if somebody else already faulted in this pte.
*/
diff --git a/mm/shmem.c b/mm/shmem.c
index 0b772ec34caa..4704be6a4e9b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1796,6 +1796,10 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
}
folio_wait_writeback(folio);
+ error = arch_swap_prepare_to_restore(swap, folio);
+ if (error)
+ goto unlock;
+
/*
* Some architectures may have to restore extra metadata to the
* folio after reading from swap.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6d719ed5c616..387971e2c5f0 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1756,6 +1756,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
else if (unlikely(PTR_ERR(page) == -EHWPOISON))
hwposioned = true;
+ ret = arch_swap_prepare_to_restore(entry, folio);
+ if (ret)
+ return ret;
+
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
swp_entry_to_pte(entry)))) {
--
2.41.0
Powered by blists - more mailing lists