lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20220302154810.42308-16-frederic@kernel.org> Date: Wed, 2 Mar 2022 16:48:06 +0100 From: Frederic Weisbecker <frederic@...nel.org> To: LKML <linux-kernel@...r.kernel.org> Cc: Frederic Weisbecker <frederic@...nel.org>, Peter Zijlstra <peterz@...radead.org>, Phil Auld <pauld@...hat.com>, Alex Belits <abelits@...vell.com>, Nicolas Saenz Julienne <nsaenz@...nel.org>, Xiongfeng Wang <wangxiongfeng2@...wei.com>, Neeraj Upadhyay <quic_neeraju@...cinc.com>, Thomas Gleixner <tglx@...utronix.de>, Yu Liao <liaoyu15@...wei.com>, Boqun Feng <boqun.feng@...il.com>, "Paul E . McKenney" <paulmck@...nel.org>, Marcelo Tosatti <mtosatti@...hat.com>, Paul Gortmaker <paul.gortmaker@...driver.com>, Uladzislau Rezki <uladzislau.rezki@...y.com>, Joel Fernandes <joel@...lfernandes.org> Subject: [PATCH 15/19] rcu/context-tracking: Remove unused and/or unecessary middle functions Some eqs functions are now only used internally by context tracking, so their public declarations can be removed. Also middle functions such as rcu_user_*() and rcu_idle_*() which now directly call to rcu_eqs_enter() and rcu_eqs_exit() can be wiped out as well. Signed-off-by: Frederic Weisbecker <frederic@...nel.org> Cc: Paul E. McKenney <paulmck@...nel.org> Cc: Peter Zijlstra <peterz@...radead.org> Cc: Thomas Gleixner <tglx@...utronix.de> Cc: Neeraj Upadhyay <quic_neeraju@...cinc.com> Cc: Uladzislau Rezki <uladzislau.rezki@...y.com> Cc: Joel Fernandes <joel@...lfernandes.org> Cc: Boqun Feng <boqun.feng@...il.com> Cc: Nicolas Saenz Julienne <nsaenz@...nel.org> Cc: Marcelo Tosatti <mtosatti@...hat.com> Cc: Xiongfeng Wang <wangxiongfeng2@...wei.com> Cc: Yu Liao<liaoyu15@...wei.com> Cc: Phil Auld <pauld@...hat.com> Cc: Paul Gortmaker<paul.gortmaker@...driver.com> Cc: Alex Belits <abelits@...vell.com> --- include/linux/hardirq.h | 8 --- include/linux/rcutiny.h | 2 - include/linux/rcutree.h | 2 - kernel/context_tracking.c | 137 ++++++++++++-------------------------- 4 files changed, 44 insertions(+), 105 deletions(-) diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 345cdbe9c1b7..d57cab4d4c06 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -92,14 +92,6 @@ void irq_exit_rcu(void); #define arch_nmi_exit() do { } while (0) #endif -#ifdef CONFIG_TINY_RCU -static inline void rcu_nmi_enter(void) { } -static inline void rcu_nmi_exit(void) { } -#else -extern void rcu_nmi_enter(void); -extern void rcu_nmi_exit(void); -#endif - /* * NMI vs Tracing * -------------- diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 9e07f8d9d544..f3326d88693a 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -96,8 +96,6 @@ static inline int rcu_needs_cpu(void) static inline void rcu_virt_note_context_switch(int cpu) { } static inline void rcu_cpu_stall_reset(void) { } static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } -static inline void rcu_idle_enter(void) { } -static inline void rcu_idle_exit(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 408435ff7a06..54e84020e7e0 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -49,8 +49,6 @@ unsigned long start_poll_synchronize_rcu(void); bool poll_state_synchronize_rcu(unsigned long oldstate); void cond_synchronize_rcu(unsigned long oldstate); -void rcu_idle_enter(void); -void rcu_idle_exit(void); bool rcu_is_idle_cpu(int cpu); #ifdef CONFIG_PROVE_RCU diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index dc24a9782bbd..de247e758767 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -149,52 +149,17 @@ static noinstr void rcu_eqs_enter(bool user) } /** - * rcu_idle_enter - inform RCU that current CPU is entering idle - * - * Enter idle mode, in other words, -leave- the mode in which RCU - * read-side critical sections can occur. (Though RCU read-side - * critical sections can occur in irq handlers in idle, a possibility - * handled by irq_enter() and irq_exit().) - * - * If you add or remove a call to rcu_idle_enter(), be sure to test with - * CONFIG_RCU_EQS_DEBUG=y. - */ -void rcu_idle_enter(void) -{ - lockdep_assert_irqs_disabled(); - rcu_eqs_enter(false); -} - -#ifdef CONFIG_NO_HZ_FULL -/** - * rcu_user_enter - inform RCU that we are resuming userspace. - * - * Enter RCU idle mode right before resuming userspace. No use of RCU - * is permitted between this call and rcu_user_exit(). This way the - * CPU doesn't need to maintain the tick for RCU maintenance purposes - * when the CPU runs in userspace. - * - * If you add or remove a call to rcu_user_enter(), be sure to test with - * CONFIG_RCU_EQS_DEBUG=y. - */ -noinstr void rcu_user_enter(void) -{ - rcu_eqs_enter(true); -} -#endif /* CONFIG_NO_HZ_FULL */ - -/** - * rcu_nmi_exit - inform RCU of exit from NMI context + * ct_nmi_exit - inform RCU of exit from NMI context * * If we are returning from the outermost NMI handler that interrupted an * RCU-idle period, update ct->dynticks and ct->dynticks_nmi_nesting * to let the RCU grace-period handling know that the CPU is back to * being RCU-idle. * - * If you add or remove a call to rcu_nmi_exit(), be sure to test + * If you add or remove a call to ct_nmi_exit(), be sure to test * with CONFIG_RCU_EQS_DEBUG=y. */ -noinstr void rcu_nmi_exit(void) +noinstr void ct_nmi_exit(void) { struct context_tracking *ct = this_cpu_ptr(&context_tracking); @@ -275,42 +240,7 @@ static void noinstr rcu_eqs_exit(bool user) } /** - * rcu_idle_exit - inform RCU that current CPU is leaving idle - * - * Exit idle mode, in other words, -enter- the mode in which RCU - * read-side critical sections can occur. - * - * If you add or remove a call to rcu_idle_exit(), be sure to test with - * CONFIG_RCU_EQS_DEBUG=y. - */ -void rcu_idle_exit(void) -{ - unsigned long flags; - - local_irq_save(flags); - rcu_eqs_exit(false); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(rcu_idle_exit); - -#ifdef CONFIG_NO_HZ_FULL -/** - * rcu_user_exit - inform RCU that we are exiting userspace. - * - * Exit RCU idle mode while entering the kernel because it can - * run a RCU read side critical section anytime. - * - * If you add or remove a call to rcu_user_exit(), be sure to test with - * CONFIG_RCU_EQS_DEBUG=y. - */ -void noinstr rcu_user_exit(void) -{ - rcu_eqs_exit(true); -} -#endif /* ifdef CONFIG_NO_HZ_FULL */ - -/** - * rcu_nmi_enter - inform RCU of entry to NMI context + * ct_nmi_enter - inform RCU of entry to NMI context * * If the CPU was idle from RCU's viewpoint, update ct->dynticks and * ct->dynticks_nmi_nesting to let the RCU grace-period handling know @@ -318,10 +248,10 @@ void noinstr rcu_user_exit(void) * long as the nesting level does not overflow an int. (You will probably * run out of stack space first.) * - * If you add or remove a call to rcu_nmi_enter(), be sure to test + * If you add or remove a call to ct_nmi_enter(), be sure to test * with CONFIG_RCU_EQS_DEBUG=y. */ -noinstr void rcu_nmi_enter(void) +noinstr void ct_nmi_enter(void) { long incby = 2; struct context_tracking *ct = this_cpu_ptr(&context_tracking); @@ -436,7 +366,13 @@ void noinstr __ct_user_enter(enum ctx_state state) * that will fire and reschedule once we resume in user/guest mode. */ rcu_irq_work_resched(); - rcu_user_enter(); + /* + * Enter RCU idle mode right before resuming userspace. No use of RCU + * is permitted between this call and rcu_eqs_exit(). This way the + * CPU doesn't need to maintain the tick for RCU maintenance purposes + * when the CPU runs in userspace. + */ + rcu_eqs_enter(true); } /* * Even if context tracking is disabled on this CPU, because it's outside @@ -510,10 +446,10 @@ void noinstr __ct_user_exit(enum ctx_state state) if (__this_cpu_read(context_tracking.state) == state) { if (__this_cpu_read(context_tracking.active)) { /* - * We are going to run code that may use RCU. Inform - * RCU core about that (ie: we may need the tick again). + * Exit RCU idle mode while entering the kernel because it can + * run a RCU read side critical section anytime. */ - rcu_user_exit(); + rcu_eqs_exit(true); if (state == CONTEXT_USER) { instrumentation_begin(); vtime_user_exit(current); @@ -595,16 +531,41 @@ DEFINE_PER_CPU(struct context_tracking, context_tracking) = { }; EXPORT_SYMBOL_GPL(context_tracking); - +/** + * ct_idle_enter - inform RCU that current CPU is entering idle + * + * Enter idle mode, in other words, -leave- the mode in which RCU + * read-side critical sections can occur. (Though RCU read-side + * critical sections can occur in irq handlers in idle, a possibility + * handled by irq_enter() and irq_exit().) + * + * If you add or remove a call to ct_idle_enter(), be sure to test with + * CONFIG_RCU_EQS_DEBUG=y. + */ void ct_idle_enter(void) { - rcu_idle_enter(); + lockdep_assert_irqs_disabled(); + rcu_eqs_enter(false); } EXPORT_SYMBOL_GPL(ct_idle_enter); +/** + * ct_idle_exit - inform RCU that current CPU is leaving idle + * + * Exit idle mode, in other words, -enter- the mode in which RCU + * read-side critical sections can occur. + * + * If you add or remove a call to ct_idle_exit(), be sure to test with + * CONFIG_RCU_EQS_DEBUG=y. + */ void ct_idle_exit(void) { - rcu_idle_exit(); + unsigned long flags; + + local_irq_save(flags); + rcu_eqs_exit(false); + local_irq_restore(flags); + } EXPORT_SYMBOL_GPL(ct_idle_exit); @@ -678,13 +639,3 @@ void ct_irq_exit_irqson(void) ct_irq_exit(); local_irq_restore(flags); } - -noinstr void ct_nmi_enter(void) -{ - rcu_nmi_enter(); -} - -noinstr void ct_nmi_exit(void) -{ - rcu_nmi_exit(); -} -- 2.25.1
Powered by blists - more mailing lists