[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240430091740.1826862-28-vschneid@redhat.com>
Date: Tue, 30 Apr 2024 11:17:31 +0200
From: Valentin Schneider <vschneid@...hat.com>
To: rcu@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: Frederic Weisbecker <frederic@...nel.org>,
"Paul E. McKenney" <paulmck@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Neeraj Upadhyay <quic_neeraju@...cinc.com>,
Joel Fernandes <joel@...lfernandes.org>,
Josh Triplett <josh@...htriplett.org>,
Boqun Feng <boqun.feng@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Zqiang <qiang.zhang1211@...il.com>
Subject: [PATCH v2 27/27] context_tracking, rcu: Rename rcu_dyntick trace event into rcu_watching
The "rcu_dyntick" naming convention has been turned into "rcu_watching" for
all helpers now, align the trace event to that.
To add to the confusion, the strings passed to the trace event are now
reversed: when RCU "starts" the dyntick / EQS state, it "stops" watching.
Signed-off-by: Valentin Schneider <vschneid@...hat.com>
---
include/trace/events/rcu.h | 16 ++++++++--------
kernel/context_tracking.c | 10 +++++-----
2 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 4066b6d51e46a..e51ef658437f0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -466,40 +466,40 @@ TRACE_EVENT(rcu_stall_warning,
/*
* Tracepoint for dyntick-idle entry/exit events. These take 2 strings
* as argument:
- * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
+ * polarity: "Start", "End", "StillWatching" for entering, exiting or still not
* being in dyntick-idle mode.
* context: "USER" or "IDLE" or "IRQ".
* NMIs nested in IRQs are inferred with nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
- * the ->dynticks counter. Note that task-related and interrupt-related
+ * the RCU_WATCHING counter. Note that task-related and interrupt-related
* events use two separate counters, and that the "++=" and "--=" events
* for irq/NMI will change the counter by two, otherwise by one.
*/
-TRACE_EVENT_RCU(rcu_dyntick,
+TRACE_EVENT_RCU(rcu_watching,
- TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int counter),
- TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+ TP_ARGS(polarity, oldnesting, newnesting, counter),
TP_STRUCT__entry(
__field(const char *, polarity)
__field(long, oldnesting)
__field(long, newnesting)
- __field(int, dynticks)
+ __field(int, counter)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
- __entry->dynticks = dynticks;
+ __entry->counter = counter;
),
TP_printk("%s %lx %lx %#3x", __entry->polarity,
__entry->oldnesting, __entry->newnesting,
- __entry->dynticks & 0xfff)
+ __entry->counter & 0xfff)
);
/*
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index dfa64f21d900d..fdbb584b8e797 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -137,7 +137,7 @@ static void noinstr ct_kernel_exit(bool user, int offset)
instrumentation_begin();
lockdep_assert_irqs_disabled();
- trace_rcu_dyntick(TPS("Start"), ct_nesting(), 0, ct_rcu_watching());
+ trace_rcu_watching(TPS("End"), ct_nesting(), 0, ct_rcu_watching());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
rcu_preempt_deferred_qs(current);
@@ -182,7 +182,7 @@ static void noinstr ct_kernel_enter(bool user, int offset)
// instrumentation for the noinstr ct_kernel_enter_state()
instrument_atomic_write(&ct->state, sizeof(ct->state));
- trace_rcu_dyntick(TPS("End"), ct_nesting(), 1, ct_rcu_watching());
+ trace_rcu_watching(TPS("Start"), ct_nesting(), 1, ct_rcu_watching());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
WRITE_ONCE(ct->nesting, 1);
WARN_ON_ONCE(ct_nmi_nesting());
@@ -219,7 +219,7 @@ void noinstr ct_nmi_exit(void)
* leave it in non-RCU-idle state.
*/
if (ct_nmi_nesting() != 1) {
- trace_rcu_dyntick(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2,
+ trace_rcu_watching(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2,
ct_rcu_watching());
WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */
ct_nmi_nesting() - 2);
@@ -228,7 +228,7 @@ void noinstr ct_nmi_exit(void)
}
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- trace_rcu_dyntick(TPS("Startirq"), ct_nmi_nesting(), 0, ct_rcu_watching());
+ trace_rcu_watching(TPS("Endirq"), ct_nmi_nesting(), 0, ct_rcu_watching());
WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */
// instrumentation for the noinstr ct_kernel_exit_state()
@@ -294,7 +294,7 @@ void noinstr ct_nmi_enter(void)
instrumentation_begin();
}
- trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
+ trace_rcu_watching(incby == 1 ? TPS("Startirq") : TPS("++="),
ct_nmi_nesting(),
ct_nmi_nesting() + incby, ct_rcu_watching());
instrumentation_end();
--
2.43.0
Powered by blists - more mailing lists