[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250820010415.699353-15-anthony.yznaga@oracle.com>
Date: Tue, 19 Aug 2025 18:04:07 -0700
From: Anthony Yznaga <anthony.yznaga@...cle.com>
To: linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, andreyknvl@...il.com, arnd@...db.de,
bp@...en8.de, brauner@...nel.org, bsegall@...gle.com, corbet@....net,
dave.hansen@...ux.intel.com, david@...hat.com,
dietmar.eggemann@....com, ebiederm@...ssion.com, hpa@...or.com,
jakub.wartak@...lbox.org, jannh@...gle.com, juri.lelli@...hat.com,
khalid@...nel.org, liam.howlett@...cle.com, linyongting@...edance.com,
lorenzo.stoakes@...cle.com, luto@...nel.org, markhemm@...glemail.com,
maz@...nel.org, mhiramat@...nel.org, mgorman@...e.de, mhocko@...e.com,
mingo@...hat.com, muchun.song@...ux.dev, neilb@...e.de,
osalvador@...e.de, pcc@...gle.com, peterz@...radead.org,
pfalcato@...e.de, rostedt@...dmis.org, rppt@...nel.org,
shakeel.butt@...ux.dev, surenb@...gle.com, tglx@...utronix.de,
vasily.averin@...ux.dev, vbabka@...e.cz, vincent.guittot@...aro.org,
viro@...iv.linux.org.uk, vschneid@...hat.com, willy@...radead.org,
x86@...nel.org, xhao@...ux.alibaba.com, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-arch@...r.kernel.org
Subject: [PATCH v3 14/22] x86/mm: enable page table sharing
Enable x86 support for handling page faults in an mshare region by
redirecting page faults to operate on the mshare mm_struct and vmas
contained in it.
Some permissions checks are done using vma flags in architecture-specfic
fault handling code so the actual vma needed to complete the handling
is acquired before calling handle_mm_fault(). Because of this an
ARCH_SUPPORTS_MSHARE config option is added.
Signed-off-by: Anthony Yznaga <anthony.yznaga@...cle.com>
---
arch/Kconfig | 3 +++
arch/x86/Kconfig | 1 +
arch/x86/mm/fault.c | 40 +++++++++++++++++++++++++++++++++++++++-
mm/Kconfig | 2 +-
4 files changed, 44 insertions(+), 2 deletions(-)
diff --git a/arch/Kconfig b/arch/Kconfig
index d1b4ffd6e085..2e10a11fc442 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1676,6 +1676,9 @@ config HAVE_ARCH_PFN_VALID
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
bool
+config ARCH_SUPPORTS_MSHARE
+ bool
+
config ARCH_SUPPORTS_PAGE_TABLE_CHECK
bool
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 58d890fe2100..1ad252eec417 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -124,6 +124,7 @@ config X86
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_HUGETLBFS
+ select ARCH_SUPPORTS_MSHARE if X86_64
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if X86_64
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 998bd807fc7b..2a7df3aa13b4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1215,6 +1215,8 @@ void do_user_addr_fault(struct pt_regs *regs,
struct mm_struct *mm;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
+ bool is_shared_vma;
+ unsigned long addr;
tsk = current;
mm = tsk->mm;
@@ -1328,6 +1330,12 @@ void do_user_addr_fault(struct pt_regs *regs,
if (!vma)
goto lock_mmap;
+ /* mshare does not support per-VMA locks yet */
+ if (vma_is_mshare(vma)) {
+ vma_end_read(vma);
+ goto lock_mmap;
+ }
+
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address, NULL, vma);
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
@@ -1356,17 +1364,38 @@ void do_user_addr_fault(struct pt_regs *regs,
lock_mmap:
retry:
+ addr = address;
+ is_shared_vma = false;
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
+ if (unlikely(vma_is_mshare(vma))) {
+ fault = find_shared_vma(&vma, &addr);
+
+ if (fault) {
+ mmap_read_unlock(mm);
+ goto done;
+ }
+
+ if (!vma) {
+ mmap_read_unlock(mm);
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+
+ is_shared_vma = true;
+ }
+
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
if (unlikely(access_error(error_code, vma))) {
+ if (unlikely(is_shared_vma))
+ mmap_read_unlock(vma->vm_mm);
bad_area_access_error(regs, error_code, address, mm, vma);
return;
}
@@ -1384,7 +1413,14 @@ void do_user_addr_fault(struct pt_regs *regs,
* userland). The return to userland is identified whenever
* FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
*/
- fault = handle_mm_fault(vma, address, flags, regs);
+ fault = handle_mm_fault(vma, addr, flags, regs);
+
+ /*
+ * If the lock on the shared mm has been released, release the lock
+ * on the task's mm now.
+ */
+ if (unlikely(is_shared_vma) && (fault & (VM_FAULT_COMPLETED | VM_FAULT_RETRY)))
+ mmap_read_unlock(mm);
if (fault_signal_pending(fault, regs)) {
/*
@@ -1412,6 +1448,8 @@ void do_user_addr_fault(struct pt_regs *regs,
goto retry;
}
+ if (unlikely(is_shared_vma))
+ mmap_read_unlock(vma->vm_mm);
mmap_read_unlock(mm);
done:
if (likely(!(fault & VM_FAULT_ERROR)))
diff --git a/mm/Kconfig b/mm/Kconfig
index 8b50e9785729..824da2a481f9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1402,7 +1402,7 @@ config FIND_NORMAL_PAGE
config MSHARE
bool "Mshare"
- depends on MMU
+ depends on MMU && ARCH_SUPPORTS_MSHARE
help
Enable msharefs: A pseudo filesystem that allows multiple processes
to share kernel resources for mapping shared pages. A file created on
--
2.47.1
Powered by blists - more mailing lists