[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <ee4d5664-a426-48d3-ad40-b33ac5f44a7f@os.amperecomputing.com>
Date: Wed, 5 Jun 2024 10:31:51 -0700
From: Yang Shi <yang@...amperecomputing.com>
To: Catalin Marinas <catalin.marinas@....com>
Cc: will@...nel.org, anshuman.khandual@....com, scott@...amperecomputing.com,
cl@...two.org, linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org
Subject: Re: [v3 PATCH] arm64: mm: force write fault for atomic RMW
instructions
>> @@ -557,6 +587,11 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
>> /* It was write fault */
>> vm_flags = VM_WRITE;
>> mm_flags |= FAULT_FLAG_WRITE;
>> + } else if (is_el0_atomic_instr(regs)) {
>> + /* Force write fault */
>> + vm_flags = VM_WRITE;
>> + mm_flags |= FAULT_FLAG_WRITE;
>> + force_write = true;
>> } else {
>> /* It was read fault */
>> vm_flags = VM_READ;
>> @@ -586,6 +621,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
>> if (!vma)
>> goto lock_mmap;
>>
>> + /* vma flags don't allow write, undo force write */
>> + if (force_write && !(vma->vm_flags & VM_WRITE)) {
>> + vm_flags |= VM_READ;
>> + if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN))
>> + vm_flags |= VM_EXEC;
>> + mm_flags &= ~FAULT_FLAG_WRITE;
>> + }
> Ah, this revert to the non-write flags doesn't look great as we
> basically duplicate the 'else' block in the original check. So, it
> probably look better as per your earlier patch to just do the
> instruction read just before the !(vma->vm_flags & flags) check,
> something like:
>
> if ((vma->vm_flags & VM_WRITE) && is_el0_atomic_instr(regs)) {
> vm_flags = VM_WRITE;
> mm_flags |= FAULT_FLAG_WRITE;
> }
>
> This way we also only read the instruction if the vma is writeable. I
> think it's fine to do this under the vma lock since we have
> pagefault_disable() for the insn read.
I think we also need to skip this for write fault and instruction fault.
Some something like:
@@ -529,6 +557,7 @@ static int __kprobes do_page_fault(unsigned long
far, unsigned long esr,
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma;
+ bool may_force_write = false;
if (kprobe_page_fault(regs, esr))
return 0;
@@ -565,6 +594,7 @@ static int __kprobes do_page_fault(unsigned long
far, unsigned long esr,
/* If EPAN is absent then exec implies read */
if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN))
vm_flags |= VM_EXEC;
+ may_force_write = true;
}
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr,
regs)) {
@@ -586,6 +616,12 @@ static int __kprobes do_page_fault(unsigned long
far, unsigned long esr,
if (!vma)
goto lock_mmap;
+ if (may_force_write && (vma->vm_flags & VM_WRITE) &&
+ is_el0_atomic_instr(regs)) {
+ vm_flags = VM_WRITE;
+ mm_flags |= FAULT_FLAG_WRITE;
+ }
+
if (!(vma->vm_flags & vm_flags)) {
vma_end_read(vma);
goto lock_mmap;
>
Powered by blists - more mailing lists