lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 20 Jun 2018 12:55:35 -0400 (EDT)
From:   Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
To:     Will Deacon <will.deacon@....com>
Cc:     linux-kernel <linux-kernel@...r.kernel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
        Boqun Feng <boqun.feng@...il.com>
Subject: Re: [RFC PATCH] rseq: Avoid infinite recursion when delivering
 SIGSEGV

----- On Jun 20, 2018, at 12:36 PM, Will Deacon will.deacon@....com wrote:

> When delivering a signal to a task that is using rseq, we call into
> __rseq_handle_notify_resume() so that the registers pushed in the
> sigframe are updated to reflect the state of the restartable sequence
> (for example, ensuring that the signal returns to the abort handler if
> necessary).
> 
> However, if the rseq management fails due to an unrecoverable fault when
> accessing userspace or certain combinations of RSEQ_CS_* flags, then we
> will attempt to deliver a SIGSEGV. This has the potential for infinite
> recursion if the rseq code continuously fails on signal delivery.
> 
> Avoid this problem by using force_sigsegv() instead of force_sig(), which
> is explicitly designed to reset the SEGV handler to SIG_DFL in the case
> of a recursive fault.

Your approach looks good. One nit below implementation-wise:

> 
> Signed-off-by: Will Deacon <will.deacon@....com>
> ---
> 
> Sending as an RFC since I only spotted this via code inspection and haven't
> tried to trigger it yet.
> 
> arch/arm/kernel/signal.c     |  2 +-
> arch/powerpc/kernel/signal.c |  2 +-
> arch/x86/kernel/signal.c     |  2 +-
> include/linux/sched.h        | 20 ++++++++++++++------
> kernel/rseq.c                |  7 ++++---
> 5 files changed, 21 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
> index f09e9d66d605..6434a62591e8 100644
> --- a/arch/arm/kernel/signal.c
> +++ b/arch/arm/kernel/signal.c
> @@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct
> pt_regs *regs)
> 	 * Increment event counter and perform fixup for the pre-signal
> 	 * frame.
> 	 */
> -	rseq_signal_deliver(regs);
> +	rseq_signal_deliver(ksig, regs);
> 
> 	/*
> 	 * Set up the stack frame
> diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
> index 17fe4339ba59..af822e764569 100644
> --- a/arch/powerpc/kernel/signal.c
> +++ b/arch/powerpc/kernel/signal.c
> @@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
> 	/* Re-enable the breakpoints for the signal stack */
> 	thread_change_pc(tsk, tsk->thread.regs);
> 
> -	rseq_signal_deliver(tsk->thread.regs);
> +	rseq_signal_deliver(&ksig, tsk->thread.regs);
> 
> 	if (is32) {
>         	if (ksig.ka.sa.sa_flags & SA_SIGINFO)
> diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
> index 445ca11ff863..92a3b312a53c 100644
> --- a/arch/x86/kernel/signal.c
> +++ b/arch/x86/kernel/signal.c
> @@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
> 	 * Increment event counter and perform fixup for the pre-signal
> 	 * frame.
> 	 */
> -	rseq_signal_deliver(regs);
> +	rseq_signal_deliver(ksig, regs);
> 
> 	/* Set up the stack frame */
> 	if (is_ia32_frame(ksig)) {
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 87bf02d93a27..277f8ccb0661 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1799,20 +1799,27 @@ static inline void rseq_set_notify_resume(struct
> task_struct *t)
> 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
> }
> 
> -void __rseq_handle_notify_resume(struct pt_regs *regs);
> +void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
> 
> -static inline void rseq_handle_notify_resume(struct pt_regs *regs)
> +static inline void _rseq_handle_notify_resume(struct ksignal *ksig,
> +					      struct pt_regs *regs)
> {
> 	if (current->rseq)
> -		__rseq_handle_notify_resume(regs);
> +		__rseq_handle_notify_resume(ksig, regs);
> +}
> +
> +static inline void rseq_handle_notify_resume(struct pt_regs *regs)
> +{
> +	_rseq_handle_notify_resume(NULL, regs);
> }


If we add _rseq_handle_notify_resume() here, we'd need the CONFIG_RSEQ=n
code to also have an empty static inline for it.

So I'm not sure we want all 3 of:

void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);

static inline void _rseq_handle_notify_resume(struct ksignal *ksig,
				      struct pt_regs *regs)

static inline void rseq_handle_notify_resume(struct pt_regs *regs)

Instead, can we just have:

__rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);

static inline void rseq_handle_notify_resume(struct ksignal *ksig,
				      struct pt_regs *regs)

and change the callers to rseq_handle_notify_resume to pass a NULL
pointer when they are not within signal delivery ?

Thanks,

Mathieu

> 
> -static inline void rseq_signal_deliver(struct pt_regs *regs)
> +static inline void rseq_signal_deliver(struct ksignal *ksig,
> +				       struct pt_regs *regs)
> {
> 	preempt_disable();
> 	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
> 	preempt_enable();
> -	rseq_handle_notify_resume(regs);
> +	_rseq_handle_notify_resume(ksig, regs);
> }
> 
> /* rseq_preempt() requires preemption to be disabled. */
> @@ -1867,7 +1874,8 @@ static inline void rseq_set_notify_resume(struct
> task_struct *t)
> static inline void rseq_handle_notify_resume(struct pt_regs *regs)
> {
> }
> -static inline void rseq_signal_deliver(struct pt_regs *regs)
> +static inline void rseq_signal_deliver(struct ksignal *ksig,
> +				       struct pt_regs *regs)
> {
> }
> static inline void rseq_preempt(struct task_struct *t)
> diff --git a/kernel/rseq.c b/kernel/rseq.c
> index ae306f90c514..22b6acf1ad63 100644
> --- a/kernel/rseq.c
> +++ b/kernel/rseq.c
> @@ -251,10 +251,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
>  * respect to other threads scheduled on the same CPU, and with respect
>  * to signal handlers.
>  */
> -void __rseq_handle_notify_resume(struct pt_regs *regs)
> +void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
> {
> 	struct task_struct *t = current;
> -	int ret;
> +	int ret, sig;
> 
> 	if (unlikely(t->flags & PF_EXITING))
> 		return;
> @@ -268,7 +268,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
> 	return;
> 
> error:
> -	force_sig(SIGSEGV, t);
> +	sig = ksig ? ksig->sig : 0;
> +	force_sigsegv(sig, t);
> }
> 
> #ifdef CONFIG_DEBUG_RSEQ
> --
> 2.1.4

-- 
Mathieu Desnoyers
EfficiOS Inc.
http://www.efficios.com

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ