lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240408042437.10951-3-ioworker0@gmail.com>
Date: Mon,  8 Apr 2024 12:24:37 +0800
From: Lance Yang <ioworker0@...il.com>
To: akpm@...ux-foundation.org
Cc: ryan.roberts@....com,
	david@...hat.com,
	21cnbao@...il.com,
	mhocko@...e.com,
	fengwei.yin@...el.com,
	zokeefe@...gle.com,
	shy828301@...il.com,
	xiehuan09@...il.com,
	wangkefeng.wang@...wei.com,
	songmuchun@...edance.com,
	peterx@...hat.com,
	minchan@...nel.org,
	linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	Lance Yang <ioworker0@...il.com>
Subject: [PATCH v5 2/2] mm/arm64: override mkold_clean_ptes() batch helper

The per-pte get_and_clear/modify/set approach would result in
unfolding/refolding for contpte mappings on arm64. So we need
to override mkold_clean_ptes() for arm64 to avoid it.

Suggested-by: David Hildenbrand <david@...hat.com>
Suggested-by: Barry Song <21cnbao@...il.com>
Suggested-by: Ryan Roberts <ryan.roberts@....com>
Signed-off-by: Lance Yang <ioworker0@...il.com>
---
 arch/arm64/include/asm/pgtable.h | 55 ++++++++++++++++++++++++++++++++
 arch/arm64/mm/contpte.c          | 15 +++++++++
 2 files changed, 70 insertions(+)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9fd8613b2db2..395754638a9a 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1223,6 +1223,34 @@ static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address,
 		__ptep_set_wrprotect(mm, address, ptep);
 }
 
+static inline void ___ptep_mkold_clean(struct mm_struct *mm, unsigned long addr,
+				       pte_t *ptep, pte_t pte)
+{
+	pte_t old_pte;
+
+	do {
+		old_pte = pte;
+		pte = pte_mkclean(pte_mkold(pte));
+		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+					       pte_val(old_pte), pte_val(pte));
+	} while (pte_val(pte) != pte_val(old_pte));
+}
+
+static inline void __ptep_mkold_clean(struct mm_struct *mm, unsigned long addr,
+				      pte_t *ptep)
+{
+	___ptep_mkold_clean(mm, addr, ptep, __ptep_get(ptep));
+}
+
+static inline void __mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
+				      pte_t *ptep, unsigned int nr)
+{
+	unsigned int i;
+
+	for (i = 0; i < nr; i++, addr += PAGE_SIZE, ptep++)
+		__ptep_mkold_clean(mm, addr, ptep);
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
@@ -1379,6 +1407,8 @@ extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
 extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
 				unsigned long addr, pte_t *ptep,
 				pte_t entry, int dirty);
+extern void contpte_mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep, unsigned int nr);
 
 static __always_inline void contpte_try_fold(struct mm_struct *mm,
 				unsigned long addr, pte_t *ptep, pte_t pte)
@@ -1603,6 +1633,30 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
 	return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty);
 }
 
+#define mkold_clean_ptes mkold_clean_ptes
+static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
+				    pte_t *ptep, unsigned int nr)
+{
+	if (likely(nr == 1)) {
+		/*
+		 * Optimization: mkold_clean_ptes() can only be called for present
+		 * ptes so we only need to check contig bit as condition for unfold,
+		 * and we can remove the contig bit from the pte we read to avoid
+		 * re-reading. This speeds up madvise(MADV_FREE) which is sensitive
+		 * for order-0 folios. Equivalent to contpte_try_unfold().
+		 */
+		pte_t orig_pte = __ptep_get(ptep);
+
+		if (unlikely(pte_cont(orig_pte))) {
+			__contpte_try_unfold(mm, addr, ptep, orig_pte);
+			orig_pte = pte_mknoncont(orig_pte);
+		}
+		___ptep_mkold_clean(mm, addr, ptep, orig_pte);
+	} else {
+		contpte_mkold_clean_ptes(mm, addr, ptep, nr);
+	}
+}
+
 #else /* CONFIG_ARM64_CONTPTE */
 
 #define ptep_get				__ptep_get
@@ -1622,6 +1676,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
 #define wrprotect_ptes				__wrprotect_ptes
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define ptep_set_access_flags			__ptep_set_access_flags
+#define mkold_clean_ptes			__mkold_clean_ptes
 
 #endif /* CONFIG_ARM64_CONTPTE */
 
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index 1b64b4c3f8bf..dbff9c5e9eff 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -361,6 +361,21 @@ void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
 }
 EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes);
 
+void contpte_mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, unsigned int nr)
+{
+	/*
+	 * If clearing the young and dirty bits for an entire contig range, we can
+	 * avoid unfolding. Just set old/clean and wait for the later mmu_gather
+	 * flush to invalidate the tlb. If it's a partial range though, we need to
+	 * unfold.
+	 */
+
+	contpte_try_unfold_partial(mm, addr, ptep, nr);
+	__mkold_clean_ptes(mm, addr, ptep, nr);
+}
+EXPORT_SYMBOL_GPL(contpte_mkold_clean_ptes);
+
 int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
 					unsigned long addr, pte_t *ptep,
 					pte_t entry, int dirty)
-- 
2.33.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ