[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <389cd0bc-c42f-26c5-8402-53270cdb03d5@redhat.com>
Date: Mon, 2 Mar 2020 20:04:12 +0100
From: David Hildenbrand <david@...hat.com>
To: Peter Xu <peterx@...hat.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: Andrea Arcangeli <aarcange@...hat.com>,
Martin Cracauer <cracauer@...s.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Mike Rapoport <rppt@...ux.vnet.ibm.com>,
"Kirill A . Shutemov" <kirill@...temov.name>,
Johannes Weiner <hannes@...xchg.org>,
"Dr . David Alan Gilbert" <dgilbert@...hat.com>,
Bobby Powers <bobbypowers@...il.com>,
Maya Gokhale <gokhale2@...l.gov>,
Jerome Glisse <jglisse@...hat.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Matthew Wilcox <willy@...radead.org>,
Marty McFadden <mcfadden8@...l.gov>,
Mel Gorman <mgorman@...e.de>, Hugh Dickins <hughd@...gle.com>,
Brian Geffon <bgeffon@...gle.com>,
Denis Plotnikov <dplotnikov@...tuozzo.com>,
Pavel Emelyanov <xemul@...tuozzo.com>
Subject: Re: [PATCH RESEND v6 03/16] mm: Introduce fault_signal_pending()
On 20.02.20 16:53, Peter Xu wrote:
> For most architectures, we've got a quick path to detect fatal signal
> after a handle_mm_fault(). Introduce a helper for that quick path.
>
> It cleans the current codes a bit so we don't need to duplicate the
> same check across archs. More importantly, this will be an unified
> place that we handle the signal immediately right after an interrupted
> page fault, so it'll be much easier for us if we want to change the
> behavior of handling signals later on for all the archs.
>
> Note that currently only part of the archs are using this new helper,
> because some archs have their own way to handle signals. In the
> follow up patches, we'll try to apply this helper to all the rest of
> archs.
>
> Another note is that the "regs" parameter in the new helper is not
> used yet. It'll be used very soon. Now we kept it in this patch only
> to avoid touching all the archs again in the follow up patches.
>
> Signed-off-by: Peter Xu <peterx@...hat.com>
> ---
> arch/alpha/mm/fault.c | 2 +-
> arch/arm/mm/fault.c | 2 +-
> arch/hexagon/mm/vm_fault.c | 2 +-
> arch/ia64/mm/fault.c | 2 +-
> arch/m68k/mm/fault.c | 2 +-
> arch/microblaze/mm/fault.c | 2 +-
> arch/mips/mm/fault.c | 2 +-
> arch/nds32/mm/fault.c | 2 +-
> arch/nios2/mm/fault.c | 2 +-
> arch/openrisc/mm/fault.c | 2 +-
> arch/parisc/mm/fault.c | 2 +-
> arch/riscv/mm/fault.c | 2 +-
> arch/s390/mm/fault.c | 3 +--
> arch/sparc/mm/fault_32.c | 2 +-
> arch/sparc/mm/fault_64.c | 2 +-
> arch/unicore32/mm/fault.c | 2 +-
> arch/xtensa/mm/fault.c | 2 +-
> include/linux/sched/signal.h | 13 +++++++++++++
> 18 files changed, 30 insertions(+), 18 deletions(-)
>
> diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
> index 741e61ef9d3f..aea33b599037 100644
> --- a/arch/alpha/mm/fault.c
> +++ b/arch/alpha/mm/fault.c
> @@ -150,7 +150,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
> the fault. */
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
> index bd0f4821f7e1..937b81ff8649 100644
> --- a/arch/arm/mm/fault.c
> +++ b/arch/arm/mm/fault.c
> @@ -295,7 +295,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
> * signal first. We do not need to release the mmap_sem because
> * it would already be released in __lock_page_or_retry in
> * mm/filemap.c. */
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
> + if (fault_signal_pending(fault, regs)) {
> if (!user_mode(regs))
> goto no_context;
> return 0;
> diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
> index b3bc71680ae4..d19beaf11b4c 100644
> --- a/arch/hexagon/mm/vm_fault.c
> +++ b/arch/hexagon/mm/vm_fault.c
> @@ -91,7 +91,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
>
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> /* The most common case -- we are done. */
> diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
> index c2f299fe9e04..211b4f439384 100644
> --- a/arch/ia64/mm/fault.c
> +++ b/arch/ia64/mm/fault.c
> @@ -141,7 +141,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
> */
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
> index e9b1d7585b43..a455e202691b 100644
> --- a/arch/m68k/mm/fault.c
> +++ b/arch/m68k/mm/fault.c
> @@ -138,7 +138,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
> fault = handle_mm_fault(vma, address, flags);
> pr_debug("handle_mm_fault returns %x\n", fault);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return 0;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
> index e6a810b0c7ad..cdde01dcdfc3 100644
> --- a/arch/microblaze/mm/fault.c
> +++ b/arch/microblaze/mm/fault.c
> @@ -217,7 +217,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
> */
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
> index 1e8d00793784..0ee6fafc57bc 100644
> --- a/arch/mips/mm/fault.c
> +++ b/arch/mips/mm/fault.c
> @@ -154,7 +154,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
> */
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(regs))
> return;
>
> perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
> diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
> index 906dfb25353c..0e63f81eff5b 100644
> --- a/arch/nds32/mm/fault.c
> +++ b/arch/nds32/mm/fault.c
> @@ -214,7 +214,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
> * signal first. We do not need to release the mmap_sem because it
> * would already be released in __lock_page_or_retry in mm/filemap.c.
> */
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
> + if (fault_signal_pending(fault, regs)) {
> if (!user_mode(regs))
> goto no_context;
> return;
> diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
> index 6a2e716b959f..704ace8ca0ee 100644
> --- a/arch/nios2/mm/fault.c
> +++ b/arch/nios2/mm/fault.c
> @@ -133,7 +133,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
> */
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
> index 5d4d3a9691d0..85c7eb0c0186 100644
> --- a/arch/openrisc/mm/fault.c
> +++ b/arch/openrisc/mm/fault.c
> @@ -161,7 +161,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
>
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
> index adbd5e2144a3..f9be1d1cb43f 100644
> --- a/arch/parisc/mm/fault.c
> +++ b/arch/parisc/mm/fault.c
> @@ -304,7 +304,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
>
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
> index cf7248e07f43..1d3869e9ddef 100644
> --- a/arch/riscv/mm/fault.c
> +++ b/arch/riscv/mm/fault.c
> @@ -117,7 +117,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
> * signal first. We do not need to release the mmap_sem because it
> * would already be released in __lock_page_or_retry in mm/filemap.c.
> */
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
> index 7b0bb475c166..179cf92a56e5 100644
> --- a/arch/s390/mm/fault.c
> +++ b/arch/s390/mm/fault.c
> @@ -480,8 +480,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
> * the fault.
> */
> fault = handle_mm_fault(vma, address, flags);
> - /* No reason to continue if interrupted by SIGKILL. */
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
> + if (fault_signal_pending(fault, regs)) {
> fault = VM_FAULT_SIGNAL;
> if (flags & FAULT_FLAG_RETRY_NOWAIT)
> goto out_up;
> diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
> index 89976c9b936c..6efbeb227644 100644
> --- a/arch/sparc/mm/fault_32.c
> +++ b/arch/sparc/mm/fault_32.c
> @@ -237,7 +237,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
> */
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
> index 8b7ddbd14b65..dd1ed6555831 100644
> --- a/arch/sparc/mm/fault_64.c
> +++ b/arch/sparc/mm/fault_64.c
> @@ -425,7 +425,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
>
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> goto exit_exception;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
> index 76342de9cf8c..59d0e6ec2cfc 100644
> --- a/arch/unicore32/mm/fault.c
> +++ b/arch/unicore32/mm/fault.c
> @@ -250,7 +250,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
> * signal first. We do not need to release the mmap_sem because
> * it would already be released in __lock_page_or_retry in
> * mm/filemap.c. */
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return 0;
>
> if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
> diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
> index bee30a77cd70..59515905d4ad 100644
> --- a/arch/xtensa/mm/fault.c
> +++ b/arch/xtensa/mm/fault.c
> @@ -110,7 +110,7 @@ void do_page_fault(struct pt_regs *regs)
> */
> fault = handle_mm_fault(vma, address, flags);
>
> - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
> + if (fault_signal_pending(fault, regs))
> return;
>
> if (unlikely(fault & VM_FAULT_ERROR)) {
> diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
> index 88050259c466..4c87ffce64d1 100644
> --- a/include/linux/sched/signal.h
> +++ b/include/linux/sched/signal.h
> @@ -369,6 +369,19 @@ static inline int signal_pending_state(long state, struct task_struct *p)
> return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
> }
>
> +/*
> + * This should only be used in fault handlers to decide whether we
> + * should stop the current fault routine to handle the signals
> + * instead, especially with the case where we've got interrupted with
> + * a VM_FAULT_RETRY.
> + */
> +static inline bool fault_signal_pending(unsigned int fault_flags,
> + struct pt_regs *regs)
> +{
> + return unlikely((fault_flags & VM_FAULT_RETRY) &&
> + fatal_signal_pending(current));
> +}
> +
> /*
> * Reevaluate whether the task has signals pending delivery.
> * Wake the task if so.
>
Reviewed-by: David Hildenbrand <david@...hat.com>
--
Thanks,
David / dhildenb
Powered by blists - more mailing lists