lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 6 May 2020 19:09:27 +0200
From:   Alexandre Chartre <alexandre.chartre@...cle.com>
To:     Thomas Gleixner <tglx@...utronix.de>,
        LKML <linux-kernel@...r.kernel.org>
Cc:     x86@...nel.org, "Paul E. McKenney" <paulmck@...nel.org>,
        Andy Lutomirski <luto@...nel.org>,
        Frederic Weisbecker <frederic@...nel.org>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Sean Christopherson <sean.j.christopherson@...el.com>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Petr Mladek <pmladek@...e.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Joel Fernandes <joel@...lfernandes.org>,
        Boris Ostrovsky <boris.ostrovsky@...cle.com>,
        Juergen Gross <jgross@...e.com>,
        Brian Gerst <brgerst@...il.com>,
        Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Will Deacon <will@...nel.org>,
        "Peter Zijlstra (Intel)" <peterz@...radead.org>
Subject: Re: [patch V4 part 1 36/36] rcu: Make RCU IRQ enter/exit functions
 rely on in_nmi()


On 5/5/20 3:16 PM, Thomas Gleixner wrote:
> From: Paul E. McKenney <paulmck@...nel.org>
> 
> The rcu_nmi_enter_common() and rcu_nmi_exit_common() functions take an
> "irq" parameter that indicates whether these functions are invoked from
> an irq handler (irq==true) or an NMI handler (irq==false).  However,
> recent changes have applied notrace to a few critical functions such
> that rcu_nmi_enter_common() and rcu_nmi_exit_common() many now rely
> on in_nmi().  Note that in_nmi() works no differently than before,
> but rather that tracing is now prohibited in code regions where in_nmi()
> would incorrectly report NMI state.
> 
> Therefore remove the "irq" parameter and inlines rcu_nmi_enter_common() and
> rcu_nmi_exit_common() into rcu_nmi_enter() and rcu_nmi_exit(),
> respectively.
> 
> Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> ---
>   kernel/rcu/tree.c |   47 +++++++++++++++--------------------------------
>   1 file changed, 15 insertions(+), 32 deletions(-)

I already sent a RB for the first patches for this serie, and went through the
remaining of them (20-36). I am not very familiar with some these areas so for
what it's worth:

Reviewed-by: Alexandre Chartre <alexandre.chartre@...cle.com>

alex.


> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -627,16 +627,18 @@ noinstr void rcu_user_enter(void)
>   }
>   #endif /* CONFIG_NO_HZ_FULL */
>   
> -/*
> +/**
> + * rcu_nmi_exit - inform RCU of exit from NMI context
> + *
>    * If we are returning from the outermost NMI handler that interrupted an
>    * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
>    * to let the RCU grace-period handling know that the CPU is back to
>    * being RCU-idle.
>    *
> - * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
> + * If you add or remove a call to rcu_nmi_exit(), be sure to test
>    * with CONFIG_RCU_EQS_DEBUG=y.
>    */
> -static __always_inline void rcu_nmi_exit_common(bool irq)
> +noinstr void rcu_nmi_exit(void)
>   {
>   	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
>   
> @@ -667,7 +669,7 @@ static __always_inline void rcu_nmi_exit
>   	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
>   	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
>   
> -	if (irq)
> +	if (!in_nmi())
>   		rcu_prepare_for_idle();
>   	instr_end();
>   
> @@ -675,22 +677,11 @@ static __always_inline void rcu_nmi_exit
>   	rcu_dynticks_eqs_enter();
>   	// ... but is no longer watching here.
>   
> -	if (irq)
> +	if (!in_nmi())
>   		rcu_dynticks_task_enter();
>   }
>   
>   /**
> - * rcu_nmi_exit - inform RCU of exit from NMI context
> - *
> - * If you add or remove a call to rcu_nmi_exit(), be sure to test
> - * with CONFIG_RCU_EQS_DEBUG=y.
> - */
> -void noinstr rcu_nmi_exit(void)
> -{
> -	rcu_nmi_exit_common(false);
> -}
> -
> -/**
>    * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
>    *
>    * Exit from an interrupt handler, which might possibly result in entering
> @@ -712,7 +703,7 @@ void noinstr rcu_nmi_exit(void)
>   void noinstr rcu_irq_exit(void)
>   {
>   	lockdep_assert_irqs_disabled();
> -	rcu_nmi_exit_common(true);
> +	rcu_nmi_exit();
>   }
>   
>   /*
> @@ -801,7 +792,7 @@ void noinstr rcu_user_exit(void)
>   #endif /* CONFIG_NO_HZ_FULL */
>   
>   /**
> - * rcu_nmi_enter_common - inform RCU of entry to NMI context
> + * rcu_nmi_enter - inform RCU of entry to NMI context
>    * @irq: Is this call from rcu_irq_enter?
>    *
>    * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
> @@ -810,10 +801,10 @@ void noinstr rcu_user_exit(void)
>    * long as the nesting level does not overflow an int.  (You will probably
>    * run out of stack space first.)
>    *
> - * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
> + * If you add or remove a call to rcu_nmi_enter(), be sure to test
>    * with CONFIG_RCU_EQS_DEBUG=y.
>    */
> -static __always_inline void rcu_nmi_enter_common(bool irq)
> +noinstr void rcu_nmi_enter(void)
>   {
>   	long incby = 2;
>   	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
> @@ -831,18 +822,18 @@ static __always_inline void rcu_nmi_ente
>   	 */
>   	if (rcu_dynticks_curr_cpu_in_eqs()) {
>   
> -		if (irq)
> +		if (!in_nmi())
>   			rcu_dynticks_task_exit();
>   
>   		// RCU is not watching here ...
>   		rcu_dynticks_eqs_exit();
>   		// ... but is watching here.
>   
> -		if (irq)
> +		if (!in_nmi())
>   			rcu_cleanup_after_idle();
>   
>   		incby = 1;
> -	} else if (irq) {
> +	} else if (!in_nmi()) {
>   		instr_begin();
>   		if (tick_nohz_full_cpu(rdp->cpu) &&
>   		    rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
> @@ -877,14 +868,6 @@ static __always_inline void rcu_nmi_ente
>   }
>   
>   /**
> - * rcu_nmi_enter - inform RCU of entry to NMI context
> - */
> -noinstr void rcu_nmi_enter(void)
> -{
> -	rcu_nmi_enter_common(false);
> -}
> -
> -/**
>    * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
>    *
>    * Enter an interrupt handler, which might possibly result in exiting
> @@ -909,7 +892,7 @@ noinstr void rcu_nmi_enter(void)
>   noinstr void rcu_irq_enter(void)
>   {
>   	lockdep_assert_irqs_disabled();
> -	rcu_nmi_enter_common(true);
> +	rcu_nmi_enter();
>   }
>   
>   /*
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ