lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230810090008.26122-1-yan.y.zhao@intel.com>
Date:   Thu, 10 Aug 2023 17:00:08 +0800
From:   Yan Zhao <yan.y.zhao@...el.com>
To:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        kvm@...r.kernel.org
Cc:     pbonzini@...hat.com, seanjc@...gle.com, mike.kravetz@...cle.com,
        apopple@...dia.com, jgg@...dia.com, rppt@...nel.org,
        akpm@...ux-foundation.org, kevin.tian@...el.com, david@...hat.com,
        Yan Zhao <yan.y.zhao@...el.com>
Subject: [RFC PATCH v2 3/5] mm/mmu_notifier: introduce a new callback .numa_protect

This .numa_protect callback is called when PROT_NONE is set for sure on a
PTE or a huge PMD for numa migration purpose.

With this callback, subscriber of mmu notifier, (e.g. KVM), can unmap NUMA
migration protected pages only in the handler, rather than unmap a wider
range containing pages that are obvious none-NUMA-migratble.

Signed-off-by: Yan Zhao <yan.y.zhao@...el.com>
---
 include/linux/mmu_notifier.h | 15 +++++++++++++++
 mm/mmu_notifier.c            | 18 ++++++++++++++++++
 2 files changed, 33 insertions(+)

diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index a6dc829a4bce..a173db83b071 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -132,6 +132,10 @@ struct mmu_notifier_ops {
 			   unsigned long address,
 			   pte_t pte);
 
+	void (*numa_protect)(struct mmu_notifier *subscription,
+			     struct mm_struct *mm,
+			     unsigned long start,
+			     unsigned long end);
 	/*
 	 * invalidate_range_start() and invalidate_range_end() must be
 	 * paired and are called only when the mmap_lock and/or the
@@ -395,6 +399,9 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
 				     unsigned long address);
 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
 				      unsigned long address, pte_t pte);
+extern void __mmu_notifier_numa_protect(struct mm_struct *mm,
+					unsigned long start,
+					unsigned long end);
 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
 				  bool only_end);
@@ -448,6 +455,14 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
 		__mmu_notifier_change_pte(mm, address, pte);
 }
 
+static inline void mmu_notifier_numa_protect(struct mm_struct *mm,
+					     unsigned long start,
+					     unsigned long end)
+{
+	if (mm_has_notifiers(mm))
+		__mmu_notifier_numa_protect(mm, start, end);
+}
+
 static inline void
 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
 {
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 50c0dde1354f..fc96fbd46e1d 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -382,6 +382,24 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 	return young;
 }
 
+void __mmu_notifier_numa_protect(struct mm_struct *mm,
+				 unsigned long start,
+				 unsigned long end)
+{
+	struct mmu_notifier *subscription;
+	int id;
+
+	id = srcu_read_lock(&srcu);
+	hlist_for_each_entry_rcu(subscription,
+				 &mm->notifier_subscriptions->list, hlist,
+				 srcu_read_lock_held(&srcu)) {
+		if (subscription->ops->numa_protect)
+			subscription->ops->numa_protect(subscription, mm, start,
+							end);
+	}
+	srcu_read_unlock(&srcu, id);
+}
+
 int __mmu_notifier_clear_young(struct mm_struct *mm,
 			       unsigned long start,
 			       unsigned long end)
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ