lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250812134655.68614-1-linyongting@bytedance.com>
Date: Tue, 12 Aug 2025 21:46:55 +0800
From: Yongting Lin <linyongting@...edance.com>
To: anthony.yznaga@...cle.com
Cc: akpm@...ux-foundation.org,
	andreyknvl@...il.com,
	arnd@...db.de,
	brauner@...nel.org,
	catalin.marinas@....com,
	dave.hansen@...el.com,
	david@...hat.com,
	ebiederm@...ssion.com,
	khalid@...nel.org,
	linux-arch@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	linux-mm@...ck.org,
	luto@...nel.org,
	markhemm@...glemail.com,
	maz@...nel.org,
	mhiramat@...nel.org,
	neilb@...e.de,
	pcc@...gle.com,
	rostedt@...dmis.org,
	vasily.averin@...ux.dev,
	viro@...iv.linux.org.uk,
	willy@...radead.org,
	xhao@...ux.alibaba.com
Subject: Re: [PATCH v2 13/20] x86/mm: enable page table sharing

Hi,

On 4/4/25 10:18 AM, Anthony Yznaga wrote:
> Enable x86 support for handling page faults in an mshare region by
> redirecting page faults to operate on the mshare mm_struct and vmas
> contained in it.
> Some permissions checks are done using vma flags in architecture-specfic
> fault handling code so the actual vma needed to complete the handling
> is acquired before calling handle_mm_fault(). Because of this an
> ARCH_SUPPORTS_MSHARE config option is added.
>
> Signed-off-by: Anthony Yznaga <anthony.yznaga@...cle.com>
> ---
>  arch/Kconfig        |  3 +++
>  arch/x86/Kconfig    |  1 +
>  arch/x86/mm/fault.c | 37 ++++++++++++++++++++++++++++++++++++-
>  mm/Kconfig          |  2 +-
>  4 files changed, 41 insertions(+), 2 deletions(-)
>
> diff --git a/arch/Kconfig b/arch/Kconfig
> index 9f6eb09ef12d..2e000fefe9b3 100644
> --- a/arch/Kconfig
> +++ b/arch/Kconfig
> @@ -1652,6 +1652,9 @@ config HAVE_ARCH_PFN_VALID
>  config ARCH_SUPPORTS_DEBUG_PAGEALLOC
>  	bool
>  
> +config ARCH_SUPPORTS_MSHARE
> +	bool
> +
>  config ARCH_SUPPORTS_PAGE_TABLE_CHECK
>  	bool
>  
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 1502fd0c3c06..1f1779decb44 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -125,6 +125,7 @@ config X86
>  	select ARCH_SUPPORTS_ACPI
>  	select ARCH_SUPPORTS_ATOMIC_RMW
>  	select ARCH_SUPPORTS_DEBUG_PAGEALLOC
> +	select ARCH_SUPPORTS_MSHARE		if X86_64
>  	select ARCH_SUPPORTS_PAGE_TABLE_CHECK	if X86_64
>  	select ARCH_SUPPORTS_NUMA_BALANCING	if X86_64
>  	select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP	if NR_CPUS <= 4096
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index 296d294142c8..49659d2f9316 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -1216,6 +1216,8 @@ void do_user_addr_fault(struct pt_regs *regs,
>  	struct mm_struct *mm;
>  	vm_fault_t fault;
>  	unsigned int flags = FAULT_FLAG_DEFAULT;
> +	bool is_shared_vma;
> +	unsigned long addr;
>  
>  	tsk = current;
>  	mm = tsk->mm;
> @@ -1329,6 +1331,12 @@ void do_user_addr_fault(struct pt_regs *regs,
>  	if (!vma)
>  		goto lock_mmap;
>  
> +	/* mshare does not support per-VMA locks yet */
> +	if (vma_is_mshare(vma)) {
> +		vma_end_read(vma);
> +		goto lock_mmap;
> +	}
> +
>  	if (unlikely(access_error(error_code, vma))) {
>  		bad_area_access_error(regs, error_code, address, NULL, vma);
>  		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> @@ -1357,17 +1365,38 @@ void do_user_addr_fault(struct pt_regs *regs,
>  lock_mmap:
>  
>  retry:
> +	addr = address;
> +	is_shared_vma = false;
>  	vma = lock_mm_and_find_vma(mm, address, regs);
>  	if (unlikely(!vma)) {
>  		bad_area_nosemaphore(regs, error_code, address);
>  		return;
>  	}
>  
> +	if (unlikely(vma_is_mshare(vma))) {
> +		fault = find_shared_vma(&vma, &addr);
> +
> +		if (fault) {
> +			mmap_read_unlock(mm);
> +			goto done;
> +		}
> +
> +		if (!vma) {
> +			mmap_read_unlock(mm);
> +			bad_area_nosemaphore(regs, error_code, address);
> +			return;
> +		}
> +
> +		is_shared_vma = true;
> +	}
> +
>  	/*
>  	 * Ok, we have a good vm_area for this memory access, so
>  	 * we can handle it..
>  	 */
>  	if (unlikely(access_error(error_code, vma))) {
> +		if (unlikely(is_shared_vma))
> +			mmap_read_unlock(vma->vm_mm);
>  		bad_area_access_error(regs, error_code, address, mm, vma);
>  		return;
>  	}
> @@ -1385,7 +1414,11 @@ void do_user_addr_fault(struct pt_regs *regs,
>  	 * userland). The return to userland is identified whenever
>  	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
>  	 */
> -	fault = handle_mm_fault(vma, address, flags, regs);
> +	fault = handle_mm_fault(vma, addr, flags, regs);
> +
> +	if (unlikely(is_shared_vma) && ((fault & VM_FAULT_COMPLETED) ||
> +	    (fault & VM_FAULT_RETRY) || fault_signal_pending(fault, regs)))
> +		mmap_read_unlock(mm);

I was backporting these patches of mshare to 5.15 kernel and trying to do some
basic tests. Then found a potential issue.

Reaching here means find_shared_vma function has been executed successfully 
and host_mm->mmap_lock has got locked.

When returned fault variable has VM_FAULT_COMPLETED or VM_FAULT_RETRY flags,
or fault_signal_pending(fault, regs) takes true, there is not chance to release
locks of both mm and host_mm(i.e. vma->vm_mm) in the following Snippet of Code.

As a result, needs to release vma->vm_mm.mmap_lock as well.

So it is supposed to be like below:

-	fault = handle_mm_fault(vma, address, flags, regs);
+	fault = handle_mm_fault(vma, addr, flags, regs);
+
+	if (unlikely(is_shared_vma) && ((fault & VM_FAULT_COMPLETED) ||
+	    (fault & VM_FAULT_RETRY) || fault_signal_pending(fault, regs))) {
+		mmap_read_unlock(vma->vm_mm);
+		mmap_read_unlock(mm);
+	}

>  
>  	if (fault_signal_pending(fault, regs)) {
>  		/*
> @@ -1413,6 +1446,8 @@ void do_user_addr_fault(struct pt_regs *regs,
>  		goto retry;
>  	}
>  
> +	if (unlikely(is_shared_vma))
> +		mmap_read_unlock(vma->vm_mm);
>  	mmap_read_unlock(mm);
>  done:
>  	if (likely(!(fault & VM_FAULT_ERROR)))
> diff --git a/mm/Kconfig b/mm/Kconfig
> index e6c90db83d01..8a5a159457f2 100644
> --- a/mm/Kconfig
> +++ b/mm/Kconfig
> @@ -1344,7 +1344,7 @@ config PT_RECLAIM
>  
>  config MSHARE
>  	bool "Mshare"
> -	depends on MMU
> +	depends on MMU && ARCH_SUPPORTS_MSHARE
>  	help
>  	  Enable msharefs: A ram-based filesystem that allows multiple
>  	  processes to share page table entries for shared pages. A file

Yongting Lin.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ