[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20250928174341.307295-1-guanwentao@uniontech.com>
Date: Mon, 29 Sep 2025 01:43:41 +0800
From: Wentao Guan <guanwentao@...ontech.com>
To: chenhuacai@...nel.org
Cc: kernel@...0n.name,
linux-kernel@...r.kernel.org,
loongarch@...ts.linux.dev,
zhanjun@...ontech.com,
niecheng1@...ontech.com,
Wentao Guan <guanwentao@...ontech.com>
Subject: [PATCH v2] LoongArch: mm: try VMA lock-based page fault handling first
Attempt VMA lock-based page fault handling first, and fall back to the
existing mmap_lock-based handling if that fails.
The "ebizzy -mTRp" on 3A6000 shows that PER_VMA_LOCK can
improve the benchmark by about 17.9%(97837.7 to 115430.8).
This is the loongarch variant of "x86/mm: try VMA lock-based page fault
handling first".
Let us discuss the four fault cases after handle_mm_fault
1.fault_signal_pending(fault, regs):
handle before goto done.
2.fault & VM_FAULT_COMPLETED:
fallthrough to return.
3.fault & VM_FAULT_RETRY:
handle before goto done.
4.fault & VM_FAULT_ERROR:
reuse the origin way to handle.
Signed-off-by: Wentao Guan <guanwentao@...ontech.com>
---
v2 changelog:
1. fix bug when VM_FAULT_ERROR and miss some count_vm_vma_lock_event
2. update test result.
ebizzy-0.3(can download by phoronix-test-suite):
before patch:
97800 records/s
real 10.00 s user 0.25 s sys 13.54 s
97835 records/s
real 10.00 s user 0.27 s sys 13.51 s
97929 records/s
real 10.00 s user 0.26 s sys 13.53 s
97736 records/s
real 10.00 s user 0.31 s sys 13.48 s
97914 records/s
real 10.00 s user 0.30 s sys 13.50 s
97916 records/s
real 10.00 s user 0.31 s sys 13.48 s
97857 records/s
real 10.00 s user 0.28 s sys 13.51 s
97927 records/s
real 10.00 s user 0.24 s sys 13.55 s
97962 records/s
real 10.00 s user 0.41 s sys 13.39 s
97501 records/s
real 10.00 s user 0.20 s sys 13.53 s
after patch:
117938 records/s
real 10.00 s user 0.40 s sys 23.48 s
116762 records/s
real 10.00 s user 0.39 s sys 23.20 s
116412 records/s
real 10.00 s user 0.37 s sys 23.22 s
116047 records/s
real 10.00 s user 0.45 s sys 23.04 s
116324 records/s
real 10.00 s user 0.45 s sys 23.08 s
115854 records/s
real 10.00 s user 0.33 s sys 23.17 s
116350 records/s
real 10.00 s user 0.38 s sys 23.15 s
116040 records/s
real 10.00 s user 0.34 s sys 23.16 s
116021 records/s
real 10.00 s user 0.36 s sys 23.10 s
116560 records/s
real 10.00 s user 0.37 s sys 23.23 s
---
Signed-off-by: Wentao Guan <guanwentao@...ontech.com>
---
arch/loongarch/Kconfig | 1 +
arch/loongarch/mm/fault.c | 55 ++++++++++++++++++++++++++++++++++++---
2 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 0631a6b11281b..1c517954157c0 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -69,6 +69,7 @@ config LOONGARCH
select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_SUPPORTS_RT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index deefd9617d008..425b20f3b6b83 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -215,6 +215,56 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ if (!(flags & FAULT_FLAG_USER))
+ goto lock_mmap;
+
+ vma = lock_vma_under_rcu(mm, address);
+ if (!vma)
+ goto lock_mmap;
+
+ if (write) {
+ flags |= FAULT_FLAG_WRITE;
+ if (!(vma->vm_flags & VM_WRITE)) {
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ } else {
+ if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)){
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)){
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ }
+
+ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
+
+ if (!(fault & VM_FAULT_RETRY)) {
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto done;
+ }
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, write, address);
+ return;
+ }
+lock_mmap:
+
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
@@ -276,8 +326,9 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
*/
goto retry;
}
+ mmap_read_unlock(mm);
+done:
if (unlikely(fault & VM_FAULT_ERROR)) {
- mmap_read_unlock(mm);
if (fault & VM_FAULT_OOM) {
do_out_of_memory(regs, write, address);
return;
@@ -290,8 +341,6 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
}
BUG();
}
-
- mmap_read_unlock(mm);
}
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
--
2.20.1
Powered by blists - more mailing lists