From 1e3060ecdb479a3dfd587a5870e0351e0b1b5ddc Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sat, 28 Feb 2015 17:38:17 +1100 Subject: [PATCH 1/2] Move bulk of x86 __do_page_fault() to a generic_page_fault() (Add add various hooks that other archs will need etc...) Signed-off-by: Benjamin Herrenschmidt --- arch/x86/include/asm/fault.h | 99 +++++++++++++++++ arch/x86/mm/fault.c | 253 +++---------------------------------------- include/linux/fault.h | 11 ++ mm/Makefile | 2 +- mm/fault.c | 171 +++++++++++++++++++++++++++++ 5 files changed, 296 insertions(+), 240 deletions(-) create mode 100644 arch/x86/include/asm/fault.h create mode 100644 include/linux/fault.h create mode 100644 mm/fault.c diff --git a/arch/x86/include/asm/fault.h b/arch/x86/include/asm/fault.h new file mode 100644 index 0000000..7c1712e1 --- /dev/null +++ b/arch/x86/include/asm/fault.h @@ -0,0 +1,99 @@ +#ifndef _ASM_X86_FAULT_H +#define _ASM_X86_FAULT_H + +#include +#include + +/* + * Page fault error code bits: + * + * bit 0 == 0: no page found 1: protection fault + * bit 1 == 0: read access 1: write access + * bit 2 == 0: kernel-mode access 1: user-mode access + * bit 3 == 1: use of reserved bit detected + * bit 4 == 1: fault was an instruction fetch + */ +enum x86_pf_error_code { + + PF_PROT = 1 << 0, + PF_WRITE = 1 << 1, + PF_USER = 1 << 2, + PF_RSVD = 1 << 3, + PF_INSTR = 1 << 4, +}; + +static inline bool fault_is_user(struct pt_regs *regs, unsigned long err_code) +{ + return err_code & PF_USER; +} + +static inline bool fault_is_write(struct pt_regs *regs, unsigned long err_code) +{ + return err_code & PF_WRITE; +} + +/* Return type for do_page_fault */ +typedef void gpf_ret_t; + +#define FAULT_NO_ERR + +/* Check if the stack is allowed to grow during a user page fault */ +static inline bool stack_can_grow(struct pt_regs *regs, unsigned long err_code, + unsigned long address, + struct vm_area_struct *vma) +{ + /* + * Accessing the stack below %sp is always a bug. + * The large cushion allows instructions like enter + * and pusha to work. ("enter $65535, $31" pushes + * 32 pointers and then decrements %sp by 65535.) + */ + return address + 65536 + 32 * sizeof(unsigned long) >= regs->sp; +} + +/* Access validity check */ +static inline bool access_error(struct pt_regs *regs, unsigned long err_code, + struct vm_area_struct *vma) +{ + if (err_code & PF_WRITE) { + /* write, present and write, not present: */ + if (unlikely(!(vma->vm_flags & VM_WRITE))) + return true; + return false; + } + + /* read, present: */ + if (unlikely(err_code & PF_PROT)) + return true; + + /* read, not present: */ + if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) + return true; + + return false; +} + +/* Error handlers */ + +gpf_ret_t handle_bad_area(struct pt_regs *regs, unsigned long error_code, + unsigned long address, int si_code); + + +void no_context(struct pt_regs *regs, unsigned long error_code, + unsigned long address, int signal, int si_code); + +static inline gpf_ret_t handle_kernel_fault(struct pt_regs *regs, + unsigned long error_code, + unsigned long address, int sig, + int si_code) +{ + no_context(regs, error_code, address, sig, si_code); +} + +gpf_ret_t do_sigbus(struct pt_regs *regs, unsigned long error_code, + unsigned long address, unsigned int fault); + +static inline void arch_account_major_fault(void) { } + + +#endif /* _ASM_X86_FAULT_H */ diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index ede025f..b7ca60a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -13,6 +13,7 @@ #include /* hstate_index_to_shift */ #include /* prefetchw */ #include /* exception_enter(), ... */ +#include #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ @@ -24,24 +25,6 @@ #include /* - * Page fault error code bits: - * - * bit 0 == 0: no page found 1: protection fault - * bit 1 == 0: read access 1: write access - * bit 2 == 0: kernel-mode access 1: user-mode access - * bit 3 == 1: use of reserved bit detected - * bit 4 == 1: fault was an instruction fetch - */ -enum x86_pf_error_code { - - PF_PROT = 1 << 0, - PF_WRITE = 1 << 1, - PF_USER = 1 << 2, - PF_RSVD = 1 << 3, - PF_INSTR = 1 << 4, -}; - -/* * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: */ @@ -643,7 +626,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code, oops_end(flags, regs, sig); } -static noinline void +noinline void no_context(struct pt_regs *regs, unsigned long error_code, unsigned long address, int signal, int si_code) { @@ -748,8 +731,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, printk(KERN_CONT "\n"); } -static void -__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, +static void __bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address, int si_code) { struct task_struct *tsk = current; @@ -804,44 +786,20 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, no_context(regs, error_code, address, SIGSEGV, si_code); } -static noinline void -bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address) -{ - __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); -} - -static void -__bad_area(struct pt_regs *regs, unsigned long error_code, - unsigned long address, int si_code) -{ - struct mm_struct *mm = current->mm; - - /* - * Something tried to access memory that isn't in our memory map.. - * Fix it, but check if it's kernel or user first.. - */ - up_read(&mm->mmap_sem); - - __bad_area_nosemaphore(regs, error_code, address, si_code); -} - -static noinline void -bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) +static inline void bad_area(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { __bad_area(regs, error_code, address, SEGV_MAPERR); } -static noinline void -bad_area_access_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address) +gpf_ret_t handle_bad_area(struct pt_regs *regs, unsigned long error_code, + unsigned long address, int si_code) { - __bad_area(regs, error_code, address, SEGV_ACCERR); + __bad_area(regs, error_code, address, si_code); } -static void -do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, - unsigned int fault) +gpf_ret_t do_sigbus(struct pt_regs *regs, unsigned long error_code, + unsigned long address, unsigned int fault) { struct task_struct *tsk = current; int code = BUS_ADRERR; @@ -871,40 +829,6 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, force_sig_info_fault(SIGBUS, code, address, tsk, fault); } -static noinline void -mm_fault_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address, unsigned int fault) -{ - if (fatal_signal_pending(current) && !(error_code & PF_USER)) { - no_context(regs, error_code, address, 0, 0); - return; - } - - if (fault & VM_FAULT_OOM) { - /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) { - no_context(regs, error_code, address, - SIGSEGV, SEGV_MAPERR); - return; - } - - /* - * We ran out of memory, call the OOM killer, and return the - * userspace (which will retry the fault, or kill us if we got - * oom-killed): - */ - pagefault_out_of_memory(); - } else { - if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| - VM_FAULT_HWPOISON_LARGE)) - do_sigbus(regs, error_code, address, fault); - else if (fault & VM_FAULT_SIGSEGV) - bad_area_nosemaphore(regs, error_code, address); - else - BUG(); - } -} - static int spurious_fault_check(unsigned long error_code, pte_t *pte) { if ((error_code & PF_WRITE) && !pte_write(*pte)) @@ -998,27 +922,6 @@ NOKPROBE_SYMBOL(spurious_fault); int show_unhandled_signals = 1; -static inline int -access_error(unsigned long error_code, struct vm_area_struct *vma) -{ - if (error_code & PF_WRITE) { - /* write, present and write, not present: */ - if (unlikely(!(vma->vm_flags & VM_WRITE))) - return 1; - return 0; - } - - /* read, present: */ - if (unlikely(error_code & PF_PROT)) - return 1; - - /* read, not present: */ - if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) - return 1; - - return 0; -} - static int fault_in_kernel_space(unsigned long address) { return address >= TASK_SIZE_MAX; @@ -1054,11 +957,8 @@ static noinline void __do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) { - struct vm_area_struct *vma; struct task_struct *tsk; struct mm_struct *mm; - int fault, major = 0; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; tsk = current; mm = tsk->mm; @@ -1107,7 +1007,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock: */ - bad_area_nosemaphore(regs, error_code, address); + bad_area(regs, error_code, address); return; } @@ -1120,7 +1020,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, pgtable_bad(regs, error_code, address); if (unlikely(smap_violation(error_code, regs))) { - bad_area_nosemaphore(regs, error_code, address); + bad_area(regs, error_code, address); return; } @@ -1129,7 +1029,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * in an atomic region then we must not take the fault: */ if (unlikely(in_atomic() || !mm)) { - bad_area_nosemaphore(regs, error_code, address); + bad_area(regs, error_code, address); return; } @@ -1143,137 +1043,12 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (user_mode_vm(regs)) { local_irq_enable(); error_code |= PF_USER; - flags |= FAULT_FLAG_USER; } else { if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - - if (error_code & PF_WRITE) - flags |= FAULT_FLAG_WRITE; - - /* - * When running in the kernel we expect faults to occur only to - * addresses in user space. All other faults represent errors in - * the kernel and should generate an OOPS. Unfortunately, in the - * case of an erroneous fault occurring in a code path which already - * holds mmap_sem we will deadlock attempting to validate the fault - * against the address space. Luckily the kernel only validly - * references user space from well defined areas of code, which are - * listed in the exceptions table. - * - * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibility of a - * deadlock. Attempt to lock the address space, if we cannot we then - * validate the source. If this is invalid we can skip the address - * space check, thus avoiding the deadlock: - */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { - if ((error_code & PF_USER) == 0 && - !search_exception_tables(regs->ip)) { - bad_area_nosemaphore(regs, error_code, address); - return; - } -retry: - down_read(&mm->mmap_sem); - } else { - /* - * The above down_read_trylock() might have succeeded in - * which case we'll have missed the might_sleep() from - * down_read(): - */ - might_sleep(); - } - - vma = find_vma(mm, address); - if (unlikely(!vma)) { - bad_area(regs, error_code, address); - return; - } - if (likely(vma->vm_start <= address)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, error_code, address); - return; - } - if (error_code & PF_USER) { - /* - * Accessing the stack below %sp is always a bug. - * The large cushion allows instructions like enter - * and pusha to work. ("enter $65535, $31" pushes - * 32 pointers and then decrements %sp by 65535.) - */ - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { - bad_area(regs, error_code, address); - return; - } - } - if (unlikely(expand_stack(vma, address))) { - bad_area(regs, error_code, address); - return; - } - - /* - * Ok, we have a good vm_area for this memory access, so - * we can handle it.. - */ -good_area: - if (unlikely(access_error(error_code, vma))) { - bad_area_access_error(regs, error_code, address); - return; - } - - /* - * If for any reason at all we couldn't handle the fault, - * make sure we exit gracefully rather than endlessly redo - * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if - * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. - */ - fault = handle_mm_fault(mm, vma, address, flags); - major |= fault & VM_FAULT_MAJOR; - - /* - * If we need to retry the mmap_sem has already been released, - * and if there is a fatal signal pending there is no guarantee - * that we made any progress. Handle this case first. - */ - if (unlikely(fault & VM_FAULT_RETRY)) { - /* Retry at most once */ - if (flags & FAULT_FLAG_ALLOW_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; - if (!fatal_signal_pending(tsk)) - goto retry; - } - - /* User mode? Just return to handle the fatal exception */ - if (flags & FAULT_FLAG_USER) - return; - - /* Not returning to user mode? Handle exceptions or die: */ - no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); - return; - } - - up_read(&mm->mmap_sem); - if (unlikely(fault & VM_FAULT_ERROR)) { - mm_fault_error(regs, error_code, address, fault); - return; - } - - /* - * Major/minor page fault accounting. If any of the events - * returned VM_FAULT_MAJOR, we account it as a major fault. - */ - if (major) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); - } + generic_page_fault(regs, tsk, error_code, address); check_v8086_mode(regs, address, tsk); } diff --git a/include/linux/fault.h b/include/linux/fault.h new file mode 100644 index 0000000..590d909 --- /dev/null +++ b/include/linux/fault.h @@ -0,0 +1,11 @@ +#ifndef __FAULT_H +#define __FAULT_H + +/* Generic page fault stuff */ + +#include + +gpf_ret_t generic_page_fault(struct pt_regs *regs, struct task_struct *tsk, + unsigned long error_code, unsigned long address); + +#endif /* __FAULT_H */ diff --git a/mm/Makefile b/mm/Makefile index 3c1caa2..f647ff1 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -8,7 +8,7 @@ KASAN_SANITIZE_slub.o := n mmu-y := nommu.o mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ - vmalloc.o pagewalk.o pgtable-generic.o + vmalloc.o pagewalk.o pgtable-generic.o fault.o ifdef CONFIG_CROSS_MEMORY_ATTACH mmu-$(CONFIG_MMU) += process_vm_access.o diff --git a/mm/fault.c b/mm/fault.c new file mode 100644 index 0000000..bfeee0b --- /dev/null +++ b/mm/fault.c @@ -0,0 +1,171 @@ +#include +#include +#include +#include + +#include + +static noinline gpf_ret_t mm_fault_error(struct pt_regs *regs, + unsigned long error_code, + unsigned long address, + unsigned int fault) +{ + if (fatal_signal_pending(current) && !fault_is_user(regs, error_code)) + return handle_kernel_fault(regs, error_code, address, 0, 0); + + if (fault & VM_FAULT_OOM) { + /* Kernel mode? Handle exceptions or die: */ + if (!fault_is_user(regs, error_code)) + return handle_kernel_fault(regs, error_code, address, + SIGSEGV, SEGV_MAPERR); + + /* + * We ran out of memory, call the OOM killer, and return the + * userspace (which will retry the fault, or kill us if we got + * oom-killed): + */ + pagefault_out_of_memory(); + } else { + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) + return do_sigbus(regs, error_code, address, fault); + else if (fault & VM_FAULT_SIGSEGV) + return handle_bad_area(regs, error_code, address, + SEGV_MAPERR); + else + BUG(); + } + return FAULT_NO_ERR; +} + +gpf_ret_t generic_page_fault(struct pt_regs *regs, struct task_struct *tsk, + unsigned long error_code, unsigned long address) +{ + struct vm_area_struct *vma; + struct mm_struct *mm; + int fault, major = 0; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + + mm = tsk->mm; + + if (fault_is_user(regs, error_code)) + flags |= FAULT_FLAG_USER; + + if (fault_is_write(regs, error_code)) + flags |= FAULT_FLAG_WRITE; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + + /* + * When running in the kernel we expect faults to occur only to + * addresses in user space. All other faults represent errors in + * the kernel and should generate an OOPS. Unfortunately, in the + * case of an erroneous fault occurring in a code path which already + * holds mmap_sem we will deadlock attempting to validate the fault + * against the address space. Luckily the kernel only validly + * references user space from well defined areas of code, which are + * listed in the exceptions table. + * + * As the vast majority of faults will be valid we will only perform + * the source reference check when there is a possibility of a + * deadlock. Attempt to lock the address space, if we cannot we then + * validate the source. If this is invalid we can skip the address + * space check, thus avoiding the deadlock: + */ + if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (!fault_is_user(regs, error_code) && + !search_exception_tables(GET_IP(regs))) { + return handle_bad_area(regs, error_code, address, + SEGV_MAPERR); + } +retry: + down_read(&mm->mmap_sem); + } else { + /* + * The above down_read_trylock() might have succeeded in + * which case we'll have missed the might_sleep() from + * down_read(): + */ + might_sleep(); + } + + vma = find_vma(mm, address); + if (unlikely(!vma)) + goto bad_area; + if (likely(vma->vm_start <= address)) + goto good_area; + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) + goto bad_area; + if (unlikely(fault_is_user(regs, error_code) && + !stack_can_grow(regs, error_code, address, vma))) + goto bad_area; + if (unlikely(expand_stack(vma, address))) + goto bad_area; + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + if (unlikely(access_error(regs, error_code, vma))) + goto bad_access; + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if + * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. + */ + fault = handle_mm_fault(mm, vma, address, flags); + major |= fault & VM_FAULT_MAJOR; + + /* + * If we need to retry the mmap_sem has already been released, + * and if there is a fatal signal pending there is no guarantee + * that we made any progress. Handle this case first. + */ + if (unlikely(fault & VM_FAULT_RETRY)) { + /* Retry at most once */ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + if (!fatal_signal_pending(tsk)) + goto retry; + } + + /* User mode? Just return to handle the fatal exception */ + if (flags & FAULT_FLAG_USER) + return FAULT_NO_ERR; + + /* Not returning to user mode? Handle exceptions or die: */ + return handle_kernel_fault(regs, error_code, address, + SIGBUS, BUS_ADRERR); + } + + up_read(&mm->mmap_sem); + if (unlikely(fault & VM_FAULT_ERROR)) + return mm_fault_error(regs, error_code, address, fault); + + /* + * Major/minor page fault accounting. If any of the events + * returned VM_FAULT_MAJOR, we account it as a major fault. + */ + if (major) { + tsk->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + + /* Some archs want extra counting here */ + arch_account_major_fault(); + } else { + tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + } + return FAULT_NO_ERR; + + bad_area: + up_read(&mm->mmap_sem); + return handle_bad_area(regs, error_code, address, SEGV_MAPERR); + bad_access: + up_read(&mm->mmap_sem); + return handle_bad_area(regs, error_code, address, SEGV_ACCERR); +} -- 2.1.0