[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAhV-H5ny_e9G5qRUMYW-d5LqjXv8rvsGMz9BQPt6+npyPmCOA@mail.gmail.com>
Date: Sun, 28 Sep 2025 21:58:10 +0800
From: Huacai Chen <chenhuacai@...nel.org>
To: Wentao Guan <guanwentao@...ontech.com>
Cc: kernel@...0n.name, linux-kernel@...r.kernel.org, loongarch@...ts.linux.dev,
zhanjun@...ontech.com, niecheng1@...ontech.com
Subject: Re: [PATCH] LoongArch: mm: try VMA lock-based page fault handling first
Hi, Wentao,
Thanks for you patch, but it is full of bugs...
On Thu, Sep 25, 2025 at 5:33 PM Wentao Guan <guanwentao@...ontech.com> wrote:
>
> Attempt VMA lock-based page fault handling first, and fall back to the
> existing mmap_lock-based handling if that fails.
>
> The "ebizzy -mTRp" on 3A6000 shows that PER_VMA_LOCK can
> improve the benchmark by about 20.7%.
>
> This is the loongarch variant of "x86/mm: try VMA lock-based page fault
> handling first".
>
> Signed-off-by: Wentao Guan <guanwentao@...ontech.com>
> ---
> ebizzy-0.3(can download by phoronix-test-suite):
> before patch:
> 97800 records/s
> real 10.00 s user 0.25 s sys 13.54 s
> 97835 records/s
> real 10.00 s user 0.27 s sys 13.51 s
> 97929 records/s
> real 10.00 s user 0.26 s sys 13.53 s
> 97736 records/s
> real 10.00 s user 0.31 s sys 13.48 s
> 97914 records/s
> real 10.00 s user 0.30 s sys 13.50 s
> 97916 records/s
> real 10.00 s user 0.31 s sys 13.48 s
> 97857 records/s
> real 10.00 s user 0.28 s sys 13.51 s
> 97927 records/s
> real 10.00 s user 0.24 s sys 13.55 s
> 97962 records/s
> real 10.00 s user 0.41 s sys 13.39 s
> 97501 records/s
> real 10.00 s user 0.20 s sys 13.53 s
> after patch:
> 117645 records/s
> real 10.00 s user 0.31 s sys 23.17 s
> 118207 records/s
> real 10.00 s user 0.37 s sys 23.18 s
> 118426 records/s
> real 10.00 s user 0.32 s sys 23.14 s
> 118172 records/s
> real 10.00 s user 0.44 s sys 23.07 s
> 118548 records/s
> real 10.00 s user 0.45 s sys 23.04 s
> 118011 records/s
> real 10.00 s user 0.32 s sys 23.15 s
> 118143 records/s
> real 10.00 s user 0.41 s sys 23.09 s
> 118181 records/s
> real 10.00 s user 0.42 s sys 23.12 s
> 117721 records/s
> real 10.00 s user 0.34 s sys 23.17 s
> 118138 records/s
> real 10.00 s user 0.42 s sys 23.04 s
> ---
> ---
> arch/loongarch/Kconfig | 1 +
> arch/loongarch/mm/fault.c | 49 +++++++++++++++++++++++++++++++++++++++
> 2 files changed, 50 insertions(+)
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index 0631a6b11281b..1c517954157c0 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -69,6 +69,7 @@ config LOONGARCH
> select ARCH_SUPPORTS_LTO_CLANG_THIN
> select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
> select ARCH_SUPPORTS_NUMA_BALANCING
> + select ARCH_SUPPORTS_PER_VMA_LOCK
> select ARCH_SUPPORTS_RT
> select ARCH_USE_BUILTIN_BSWAP
> select ARCH_USE_CMPXCHG_LOCKREF
> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
> index deefd9617d008..d2dc3e194dd76 100644
> --- a/arch/loongarch/mm/fault.c
> +++ b/arch/loongarch/mm/fault.c
> @@ -215,6 +215,53 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
> flags |= FAULT_FLAG_USER;
>
> perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
> + if (!(flags & FAULT_FLAG_USER))
> + goto lock_mmap;
> +
> + vma = lock_vma_under_rcu(mm, address);
> + if (!vma)
> + goto lock_mmap;
> +
> + if (write) {
> + flags |= FAULT_FLAG_WRITE;
> + if (!(vma->vm_flags & VM_WRITE)) {
> + vma_end_read(vma);
> + si_code = SEGV_ACCERR;
Need count_vm_vma_lock_event(VMA_LOCK_SUCCESS) here.
> + goto bad_area_nosemaphore;
> + }
> + } else {
> + if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)){
> + vma_end_read(vma);
> + si_code = SEGV_ACCERR;
The same.
> + goto bad_area_nosemaphore;
> + }
> + if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)){
> + vma_end_read(vma);
> + si_code = SEGV_ACCERR;
The same.
> + goto bad_area_nosemaphore;
> + }
> + }
> +
> + fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
> + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> + vma_end_read(vma);
> +
> + if (!(fault & VM_FAULT_RETRY)) {
> + count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> + goto done;
> + }
> + count_vm_vma_lock_event(VMA_LOCK_RETRY);
> + if (fault & VM_FAULT_MAJOR)
> + flags |= FAULT_FLAG_TRIED;
> +
> + /* Quick path to respond to signals */
> + if (fault_signal_pending(fault, regs)) {
> + if (!user_mode(regs))
> + no_context(regs, write, address);
> + return;
> + }
> +lock_mmap:
lock_mmap is exactly the same as retry, but since other architectures
are also like this, let's keep it as is...
> +
> retry:
> vma = lock_mm_and_find_vma(mm, address, regs);
> if (unlikely(!vma))
> @@ -292,6 +339,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
> }
>
> mmap_read_unlock(mm);
> +done:
We need error handling here.
> + return;
> }
>
> asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
I think the whole correct patch is like this:
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index deefd9617d00..c87e7d38090d 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -215,6 +215,58 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+ if (!(flags & FAULT_FLAG_USER))
+ goto lock_mmap;
+
+ vma = lock_vma_under_rcu(mm, address);
+ if (!vma)
+ goto lock_mmap;
+
+ if (write) {
+ flags |= FAULT_FLAG_WRITE;
+ if (!(vma->vm_flags & VM_WRITE)) {
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ } else {
+ if (!(vma->vm_flags & VM_EXEC) && address ==
exception_era(regs)){
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address
!= exception_era(regs)){
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ }
+
+ fault = handle_mm_fault(vma, address, flags |
FAULT_FLAG_VMA_LOCK, regs);
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
+
+ if (!(fault & VM_FAULT_RETRY)) {
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto done;
+ }
+
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, write, address);
+ return;
+ }
+lock_mmap:
+
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
@@ -276,8 +328,10 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
*/
goto retry;
}
+ mmap_read_unlock(mm);
+
+done:
if (unlikely(fault & VM_FAULT_ERROR)) {
- mmap_read_unlock(mm);
if (fault & VM_FAULT_OOM) {
do_out_of_memory(regs, write, address);
return;
@@ -290,8 +344,6 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
}
BUG();
}
-
- mmap_read_unlock(mm);
}
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
Huacai
> --
> 2.20.1
>
Powered by blists - more mailing lists