lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250820010415.699353-14-anthony.yznaga@oracle.com>
Date: Tue, 19 Aug 2025 18:04:06 -0700
From: Anthony Yznaga <anthony.yznaga@...cle.com>
To: linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, andreyknvl@...il.com, arnd@...db.de,
        bp@...en8.de, brauner@...nel.org, bsegall@...gle.com, corbet@....net,
        dave.hansen@...ux.intel.com, david@...hat.com,
        dietmar.eggemann@....com, ebiederm@...ssion.com, hpa@...or.com,
        jakub.wartak@...lbox.org, jannh@...gle.com, juri.lelli@...hat.com,
        khalid@...nel.org, liam.howlett@...cle.com, linyongting@...edance.com,
        lorenzo.stoakes@...cle.com, luto@...nel.org, markhemm@...glemail.com,
        maz@...nel.org, mhiramat@...nel.org, mgorman@...e.de, mhocko@...e.com,
        mingo@...hat.com, muchun.song@...ux.dev, neilb@...e.de,
        osalvador@...e.de, pcc@...gle.com, peterz@...radead.org,
        pfalcato@...e.de, rostedt@...dmis.org, rppt@...nel.org,
        shakeel.butt@...ux.dev, surenb@...gle.com, tglx@...utronix.de,
        vasily.averin@...ux.dev, vbabka@...e.cz, vincent.guittot@...aro.org,
        viro@...iv.linux.org.uk, vschneid@...hat.com, willy@...radead.org,
        x86@...nel.org, xhao@...ux.alibaba.com, linux-doc@...r.kernel.org,
        linux-kernel@...r.kernel.org, linux-arch@...r.kernel.org
Subject: [PATCH v3 13/22] mm/mshare: prepare for page table sharing support

From: Khalid Aziz <khalid@...nel.org>

In preparation for enabling the handling of page faults in an mshare
region provide a way to link an mshare shared page table to a process
page table and otherwise find the actual vma in order to handle a page
fault. Implement an unmap_page_range vm_ops function for msharefs VMAs
to unlink shared page tables when a process exits or an mshare region
is explicitly unmapped.

Signed-off-by: Khalid Aziz <khalid@...nel.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Signed-off-by: Anthony Yznaga <anthony.yznaga@...cle.com>
---
 include/linux/mm.h |   6 +++
 mm/memory.c        |   6 +++
 mm/mshare.c        | 107 +++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 119 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index c8dfa5c6e7d4..3a8dddb5925a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1009,11 +1009,17 @@ static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false;
 int vma_is_stack_for_current(struct vm_area_struct *vma);
 
 #ifdef CONFIG_MSHARE
+vm_fault_t find_shared_vma(struct vm_area_struct **vma, unsigned long *addrp);
 static inline bool vma_is_mshare(const struct vm_area_struct *vma)
 {
 	return vma->vm_flags & VM_MSHARE;
 }
 #else
+static inline vm_fault_t find_shared_vma(struct vm_area_struct **vma, unsigned long *addrp)
+{
+	WARN_ON_ONCE(1);
+	return VM_FAULT_SIGBUS;
+}
 static inline bool vma_is_mshare(const struct vm_area_struct *vma)
 {
 	return false;
diff --git a/mm/memory.c b/mm/memory.c
index 4e3bb49b95e2..177eb53475cb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6475,6 +6475,12 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 	if (ret)
 		goto out;
 
+	if (unlikely(vma_is_mshare(vma))) {
+		WARN_ON_ONCE(1);
+		ret = VM_FAULT_SIGBUS;
+		goto out;
+	}
+
 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
 					    flags & FAULT_FLAG_INSTRUCTION,
 					    flags & FAULT_FLAG_REMOTE)) {
diff --git a/mm/mshare.c b/mm/mshare.c
index be7cae739225..f7b7904f0405 100644
--- a/mm/mshare.c
+++ b/mm/mshare.c
@@ -21,6 +21,8 @@
 #include <linux/falloc.h>
 #include <asm/tlbflush.h>
 
+#include <asm/tlb.h>
+
 const unsigned long mshare_align = P4D_SIZE;
 const unsigned long mshare_base = mshare_align;
 
@@ -50,6 +52,66 @@ static const struct mmu_notifier_ops mshare_mmu_ops = {
 	.arch_invalidate_secondary_tlbs = mshare_invalidate_tlbs,
 };
 
+static p4d_t *walk_to_p4d(struct mm_struct *mm, unsigned long addr)
+{
+	pgd_t *pgd;
+	p4d_t *p4d;
+
+	pgd = pgd_offset(mm, addr);
+	p4d = p4d_alloc(mm, pgd, addr);
+	if (!p4d)
+		return NULL;
+
+	return p4d;
+}
+
+/* Returns holding the host mm's lock for read.  Caller must release. */
+vm_fault_t
+find_shared_vma(struct vm_area_struct **vmap, unsigned long *addrp)
+{
+	struct vm_area_struct *vma, *guest = *vmap;
+	struct mshare_data *m_data = guest->vm_private_data;
+	struct mm_struct *host_mm = m_data->mm;
+	unsigned long host_addr;
+	p4d_t *p4d, *guest_p4d;
+
+	mmap_read_lock_nested(host_mm, SINGLE_DEPTH_NESTING);
+	host_addr = *addrp - guest->vm_start + host_mm->mmap_base;
+	p4d = walk_to_p4d(host_mm, host_addr);
+	guest_p4d = walk_to_p4d(guest->vm_mm, *addrp);
+	if (!p4d_same(*guest_p4d, *p4d)) {
+		spinlock_t *guest_ptl = &guest->vm_mm->page_table_lock;
+
+		spin_lock(guest_ptl);
+		if (!p4d_same(*guest_p4d, *p4d)) {
+			pud_t *pud = p4d_pgtable(*p4d);
+
+			ptdesc_pud_pts_inc(virt_to_ptdesc(pud));
+			set_p4d(guest_p4d, *p4d);
+			spin_unlock(guest_ptl);
+			mmap_read_unlock(host_mm);
+			return VM_FAULT_NOPAGE;
+		}
+		spin_unlock(guest_ptl);
+	}
+
+	*addrp = host_addr;
+	vma = find_vma(host_mm, host_addr);
+
+	/* XXX: expand stack? */
+	if (vma && vma->vm_start > host_addr)
+		vma = NULL;
+
+	*vmap = vma;
+
+	/*
+	 * release host mm lock unless a matching vma is found
+	 */
+	if (!vma)
+		mmap_read_unlock(host_mm);
+	return 0;
+}
+
 static int mshare_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
 {
 	return -EINVAL;
@@ -61,9 +123,54 @@ static int mshare_vm_op_mprotect(struct vm_area_struct *vma, unsigned long start
 	return -EINVAL;
 }
 
+/*
+ * Unlink any shared page tables in the range and ensure TLBs are flushed.
+ * Pages in the mshare region itself are not unmapped.
+ */
+static void mshare_vm_op_unshare_page_range(struct mmu_gather *tlb,
+				struct vm_area_struct *vma,
+				unsigned long addr, unsigned long end,
+				struct zap_details *details)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	spinlock_t *ptl = &mm->page_table_lock;
+	unsigned long sz = mshare_align;
+	pgd_t *pgd;
+	p4d_t *p4d;
+	pud_t *pud;
+
+	WARN_ON(!vma_is_mshare(vma));
+
+	tlb_start_vma(tlb, vma);
+
+	for (; addr < end ; addr += sz) {
+		spin_lock(ptl);
+
+		pgd = pgd_offset(mm, addr);
+		if (!pgd_present(*pgd)) {
+			spin_unlock(ptl);
+			continue;
+		}
+		p4d = p4d_offset(pgd, addr);
+		if (!p4d_present(*p4d)) {
+			spin_unlock(ptl);
+			continue;
+		}
+		pud = p4d_pgtable(*p4d);
+		ptdesc_pud_pts_dec(virt_to_ptdesc(pud));
+
+		p4d_clear(p4d);
+		spin_unlock(ptl);
+		tlb_flush_p4d_range(tlb, addr, sz);
+	}
+
+	tlb_end_vma(tlb, vma);
+}
+
 static const struct vm_operations_struct msharefs_vm_ops = {
 	.may_split = mshare_vm_op_split,
 	.mprotect = mshare_vm_op_mprotect,
+	.unmap_page_range = mshare_vm_op_unshare_page_range,
 };
 
 /*
-- 
2.47.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ