[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <573508582.2432.1529686598096.JavaMail.zimbra@efficios.com>
Date: Fri, 22 Jun 2018 12:56:38 -0400 (EDT)
From: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
To: Will Deacon <will.deacon@....com>,
Thomas Gleixner <tglx@...utronix.de>
Cc: linux-kernel <linux-kernel@...r.kernel.org>,
Peter Zijlstra <peterz@...radead.org>,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
Boqun Feng <boqun.feng@...il.com>,
Ingo Molnar <mingo@...hat.com>
Subject: Re: [PATCH v3] rseq: Avoid infinite recursion when delivering
SIGSEGV
----- On Jun 22, 2018, at 6:45 AM, Will Deacon will.deacon@....com wrote:
> When delivering a signal to a task that is using rseq, we call into
> __rseq_handle_notify_resume() so that the registers pushed in the
> sigframe are updated to reflect the state of the restartable sequence
> (for example, ensuring that the signal returns to the abort handler if
> necessary).
>
> However, if the rseq management fails due to an unrecoverable fault when
> accessing userspace or certain combinations of RSEQ_CS_* flags, then we
> will attempt to deliver a SIGSEGV. This has the potential for infinite
> recursion if the rseq code continuously fails on signal delivery.
>
> Avoid this problem by using force_sigsegv() instead of force_sig(), which
> is explicitly designed to reset the SEGV handler to SIG_DFL in the case
> of a recursive fault. In doing so, remove rseq_signal_deliver() from the
> internal rseq API and have an optional struct ksignal * parameter to
> rseq_handle_notify_resume() instead.
>
> Signed-off-by: Will Deacon <will.deacon@....com>
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Thomas, do you want to pick up Will's rseq fix into the tip tree ?
Thanks,
Mathieu
> ---
>
> v2 -> v3: Reintroduce rseq_signal_deliver()
>
> arch/arm/kernel/signal.c | 4 ++--
> arch/powerpc/kernel/signal.c | 4 ++--
> arch/x86/entry/common.c | 2 +-
> arch/x86/kernel/signal.c | 2 +-
> include/linux/sched.h | 18 +++++++++++-------
> kernel/rseq.c | 7 ++++---
> 6 files changed, 21 insertions(+), 16 deletions(-)
>
> diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
> index f09e9d66d605..dec130e7078c 100644
> --- a/arch/arm/kernel/signal.c
> +++ b/arch/arm/kernel/signal.c
> @@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct
> pt_regs *regs)
> * Increment event counter and perform fixup for the pre-signal
> * frame.
> */
> - rseq_signal_deliver(regs);
> + rseq_signal_deliver(ksig, regs);
>
> /*
> * Set up the stack frame
> @@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int
> thread_flags, int syscall)
> } else {
> clear_thread_flag(TIF_NOTIFY_RESUME);
> tracehook_notify_resume(regs);
> - rseq_handle_notify_resume(regs);
> + rseq_handle_notify_resume(NULL, regs);
> }
> }
> local_irq_disable();
> diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
> index 17fe4339ba59..b3e8db376ecd 100644
> --- a/arch/powerpc/kernel/signal.c
> +++ b/arch/powerpc/kernel/signal.c
> @@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
> /* Re-enable the breakpoints for the signal stack */
> thread_change_pc(tsk, tsk->thread.regs);
>
> - rseq_signal_deliver(tsk->thread.regs);
> + rseq_signal_deliver(&ksig, tsk->thread.regs);
>
> if (is32) {
> if (ksig.ka.sa.sa_flags & SA_SIGINFO)
> @@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long
> thread_info_flags)
> if (thread_info_flags & _TIF_NOTIFY_RESUME) {
> clear_thread_flag(TIF_NOTIFY_RESUME);
> tracehook_notify_resume(regs);
> - rseq_handle_notify_resume(regs);
> + rseq_handle_notify_resume(NULL, regs);
> }
>
> user_enter();
> diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
> index 92190879b228..3b2490b81918 100644
> --- a/arch/x86/entry/common.c
> +++ b/arch/x86/entry/common.c
> @@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32
> cached_flags)
> if (cached_flags & _TIF_NOTIFY_RESUME) {
> clear_thread_flag(TIF_NOTIFY_RESUME);
> tracehook_notify_resume(regs);
> - rseq_handle_notify_resume(regs);
> + rseq_handle_notify_resume(NULL, regs);
> }
>
> if (cached_flags & _TIF_USER_RETURN_NOTIFY)
> diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
> index 445ca11ff863..92a3b312a53c 100644
> --- a/arch/x86/kernel/signal.c
> +++ b/arch/x86/kernel/signal.c
> @@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
> * Increment event counter and perform fixup for the pre-signal
> * frame.
> */
> - rseq_signal_deliver(regs);
> + rseq_signal_deliver(ksig, regs);
>
> /* Set up the stack frame */
> if (is_ia32_frame(ksig)) {
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 87bf02d93a27..c54dda3cd9e5 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct
> task_struct *t)
> set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
> }
>
> -void __rseq_handle_notify_resume(struct pt_regs *regs);
> +void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
>
> -static inline void rseq_handle_notify_resume(struct pt_regs *regs)
> +static inline void rseq_handle_notify_resume(struct ksignal *ksig,
> + struct pt_regs *regs)
> {
> if (current->rseq)
> - __rseq_handle_notify_resume(regs);
> + __rseq_handle_notify_resume(ksig, regs);
> }
>
> -static inline void rseq_signal_deliver(struct pt_regs *regs)
> +static inline void rseq_signal_deliver(struct ksignal *ksig,
> + struct pt_regs *regs)
> {
> preempt_disable();
> __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
> preempt_enable();
> - rseq_handle_notify_resume(regs);
> + rseq_handle_notify_resume(ksig, regs);
> }
>
> /* rseq_preempt() requires preemption to be disabled. */
> @@ -1864,10 +1866,12 @@ static inline void rseq_execve(struct task_struct *t)
> static inline void rseq_set_notify_resume(struct task_struct *t)
> {
> }
> -static inline void rseq_handle_notify_resume(struct pt_regs *regs)
> +static inline void rseq_handle_notify_resume(struct ksignal *ksig,
> + struct pt_regs *regs)
> {
> }
> -static inline void rseq_signal_deliver(struct pt_regs *regs)
> +static inline void rseq_signal_deliver(struct ksignal *ksig,
> + struct pt_regs *regs)
> {
> }
> static inline void rseq_preempt(struct task_struct *t)
> diff --git a/kernel/rseq.c b/kernel/rseq.c
> index ae306f90c514..22b6acf1ad63 100644
> --- a/kernel/rseq.c
> +++ b/kernel/rseq.c
> @@ -251,10 +251,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
> * respect to other threads scheduled on the same CPU, and with respect
> * to signal handlers.
> */
> -void __rseq_handle_notify_resume(struct pt_regs *regs)
> +void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
> {
> struct task_struct *t = current;
> - int ret;
> + int ret, sig;
>
> if (unlikely(t->flags & PF_EXITING))
> return;
> @@ -268,7 +268,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
> return;
>
> error:
> - force_sig(SIGSEGV, t);
> + sig = ksig ? ksig->sig : 0;
> + force_sigsegv(sig, t);
> }
>
> #ifdef CONFIG_DEBUG_RSEQ
> --
> 2.1.4
--
Mathieu Desnoyers
EfficiOS Inc.
http://www.efficios.com
Powered by blists - more mailing lists