[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240418134435.6092-2-ioworker0@gmail.com>
Date: Thu, 18 Apr 2024 21:44:32 +0800
From: Lance Yang <ioworker0@...il.com>
To: akpm@...ux-foundation.org
Cc: ryan.roberts@....com,
david@...hat.com,
21cnbao@...il.com,
mhocko@...e.com,
fengwei.yin@...el.com,
zokeefe@...gle.com,
shy828301@...il.com,
xiehuan09@...il.com,
wangkefeng.wang@...wei.com,
songmuchun@...edance.com,
peterx@...hat.com,
minchan@...nel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Lance Yang <ioworker0@...il.com>
Subject: [PATCH v10 1/4] mm/madvise: introduce clear_young_dirty_ptes() batch helper
This commit introduces clear_young_dirty_ptes() to replace mkold_ptes().
By doing so, we can use the same function for both use cases
(madvise_pageout and madvise_free), and it also provides the flexibility
to only clear the dirty flag in the future if needed.
Suggested-by: Ryan Roberts <ryan.roberts@....com>
Acked-by: David Hildenbrand <david@...hat.com>
Reviewed-by: Ryan Roberts <ryan.roberts@....com>
Signed-off-by: Lance Yang <ioworker0@...il.com>
---
include/linux/mm_types.h | 9 +++++
include/linux/pgtable.h | 74 ++++++++++++++++++++++++----------------
mm/madvise.c | 3 +-
3 files changed, 55 insertions(+), 31 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index db0adf5721cc..24323c7d0bd4 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1368,6 +1368,15 @@ enum fault_flag {
typedef unsigned int __bitwise zap_flags_t;
+/* Flags for clear_young_dirty_ptes(). */
+typedef int __bitwise cydp_t;
+
+/* Clear the access bit */
+#define CYDP_CLEAR_YOUNG ((__force cydp_t)BIT(0))
+
+/* Clear the dirty bit */
+#define CYDP_CLEAR_DIRTY ((__force cydp_t)BIT(1))
+
/*
* FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
* other. Here is what they mean, and how to use them:
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index e2f45e22a6d1..18019f037bae 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -361,36 +361,6 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
}
#endif
-#ifndef mkold_ptes
-/**
- * mkold_ptes - Mark PTEs that map consecutive pages of the same folio as old.
- * @vma: VMA the pages are mapped into.
- * @addr: Address the first page is mapped at.
- * @ptep: Page table pointer for the first entry.
- * @nr: Number of entries to mark old.
- *
- * May be overridden by the architecture; otherwise, implemented as a simple
- * loop over ptep_test_and_clear_young().
- *
- * Note that PTE bits in the PTE range besides the PFN can differ. For example,
- * some PTEs might be write-protected.
- *
- * Context: The caller holds the page table lock. The PTEs map consecutive
- * pages that belong to the same folio. The PTEs are all in the same PMD.
- */
-static inline void mkold_ptes(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep, unsigned int nr)
-{
- for (;;) {
- ptep_test_and_clear_young(vma, addr, ptep);
- if (--nr == 0)
- break;
- ptep++;
- addr += PAGE_SIZE;
- }
-}
-#endif
-
#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
@@ -489,6 +459,50 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
}
#endif
+#ifndef clear_young_dirty_ptes
+/**
+ * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the
+ * same folio as old/clean.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to mark old/clean.
+ * @flags: Flags to modify the PTE batch semantics.
+ *
+ * May be overridden by the architecture; otherwise, implemented by
+ * get_and_clear/modify/set for each pte in the range.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, cydp_t flags)
+{
+ pte_t pte;
+
+ for (;;) {
+ if (flags == CYDP_CLEAR_YOUNG)
+ ptep_test_and_clear_young(vma, addr, ptep);
+ else {
+ pte = ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ if (flags & CYDP_CLEAR_YOUNG)
+ pte = pte_mkold(pte);
+ if (flags & CYDP_CLEAR_DIRTY)
+ pte = pte_mkclean(pte);
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+ }
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+#endif
+
static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/mm/madvise.c b/mm/madvise.c
index 4b869b682fd5..f5e3699e7b54 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -507,7 +507,8 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
continue;
if (!pageout && pte_young(ptent)) {
- mkold_ptes(vma, addr, pte, nr);
+ clear_young_dirty_ptes(vma, addr, pte, nr,
+ CYDP_CLEAR_YOUNG);
tlb_remove_tlb_entries(tlb, pte, nr, addr);
}
--
2.33.1
Powered by blists - more mailing lists