[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240904111223.1035-5-kprateek.nayak@amd.com>
Date: Wed, 4 Sep 2024 11:12:22 +0000
From: K Prateek Nayak <kprateek.nayak@....com>
To: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot
<vincent.guittot@...aro.org>, Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>, "Mel
Gorman" <mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, "Thomas
Gleixner" <tglx@...utronix.de>
CC: Leonardo Bras <leobras@...hat.com>, "Paul E. McKenney"
<paulmck@...nel.org>, Rik van Riel <riel@...riel.com>, Thorsten Blum
<thorsten.blum@...lux.com>, Zqiang <qiang.zhang1211@...il.com>, Tejun Heo
<tj@...nel.org>, Lai Jiangshan <jiangshanlai@...il.com>, Caleb Sander Mateos
<csander@...estorage.com>, <linux-kernel@...r.kernel.org>, K Prateek Nayak
<kprateek.nayak@....com>, "Gautham R . Shenoy" <gautham.shenoy@....com>,
"Chen Yu" <yu.c.chen@...el.com>, Julia Lawall <Julia.Lawall@...ia.fr>,
"Sebastian Andrzej Siewior" <bigeasy@...utronix.de>
Subject: [RFC PATCH v2 4/5] softirq: Unify should_wakeup_ksoftirqd()
Define softirq_ctrl::cnt for !PREEMPT_RT kernels too and unify
should_wakeup_ksoftirqd() to return based on softirq_ctrl::cnt.
Since these counts can change quite frequently when running interrupt
heavy benchmark, declare per-cpu softirq_ctrl as cacheline aligned.
No functional changes intended since !PREEMPT_RT kernels do not
increment the softirq_ctrl::cnt (yet) and should always return true
mimicking the current behavior.
Signed-off-by: K Prateek Nayak <kprateek.nayak@....com>
---
v1..v2:
- New patch. Broken off from approach discussed in
https://lore.kernel.org/lkml/880f13fd-753d-2c5a-488a-d75c99e8dfa3@amd.com/
- Cacheline align softirq_ctrl since benchmark stressing the IPI path
showed sensitivity to softirq_ctrl being aligned vs unaligned. This
was also observed on v1 and has been elaborated in
https://lore.kernel.org/lkml/20240710090210.41856-4-kprateek.nayak@amd.com/
---
kernel/softirq.c | 55 ++++++++++++++++++++++++------------------------
1 file changed, 27 insertions(+), 28 deletions(-)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index e70a51d737ee..d8902fbcdebf 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -88,23 +88,6 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
#endif
-/*
- * SOFTIRQ_OFFSET usage:
- *
- * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
- * to a per CPU counter and to task::softirqs_disabled_cnt.
- *
- * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
- * processing.
- *
- * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
- * on local_bh_disable or local_bh_enable.
- *
- * This lets us distinguish between whether we are currently processing
- * softirq and whether we just have bh disabled.
- */
-#ifdef CONFIG_PREEMPT_RT
-
/*
* RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
* also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
@@ -118,14 +101,40 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
* the task which is in a softirq disabled section is preempted or blocks.
*/
struct softirq_ctrl {
+#ifdef CONFIG_PREEMPT_RT
local_lock_t lock;
+#endif
int cnt;
};
-static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
+static DEFINE_PER_CPU_ALIGNED(struct softirq_ctrl, softirq_ctrl) = {
+#ifdef CONFIG_PREEMPT_RT
.lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
+#endif
};
+static inline bool should_wake_ksoftirqd(void)
+{
+ return !this_cpu_read(softirq_ctrl.cnt);
+}
+
+/*
+ * SOFTIRQ_OFFSET usage:
+ *
+ * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
+ * to a per CPU counter and to task::softirqs_disabled_cnt.
+ *
+ * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
+ * processing.
+ *
+ * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+ * on local_bh_disable or local_bh_enable.
+ *
+ * This lets us distinguish between whether we are currently processing
+ * softirq and whether we just have bh disabled.
+ */
+#ifdef CONFIG_PREEMPT_RT
+
/**
* local_bh_blocked() - Check for idle whether BH processing is blocked
*
@@ -270,11 +279,6 @@ static inline void ksoftirqd_run_end(void)
static inline void softirq_handle_begin(void) { }
static inline void softirq_handle_end(void) { }
-static inline bool should_wake_ksoftirqd(void)
-{
- return !this_cpu_read(softirq_ctrl.cnt);
-}
-
static inline void invoke_softirq(void)
{
if (should_wake_ksoftirqd())
@@ -419,11 +423,6 @@ static inline void ksoftirqd_run_end(void)
local_irq_enable();
}
-static inline bool should_wake_ksoftirqd(void)
-{
- return true;
-}
-
static inline void invoke_softirq(void)
{
if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
--
2.34.1
Powered by blists - more mailing lists