[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230519102715.435618812@infradead.org>
Date: Fri, 19 May 2023 12:21:02 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: bigeasy@...utronix.de
Cc: mark.rutland@....com, maz@...nel.org, catalin.marinas@....com,
will@...nel.org, chenhuacai@...nel.org, kernel@...0n.name,
hca@...ux.ibm.com, gor@...ux.ibm.com, agordeev@...ux.ibm.com,
borntraeger@...ux.ibm.com, svens@...ux.ibm.com,
pbonzini@...hat.com, wanpengli@...cent.com, vkuznets@...hat.com,
tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
dave.hansen@...ux.intel.com, x86@...nel.org, hpa@...or.com,
jgross@...e.com, boris.ostrovsky@...cle.com,
daniel.lezcano@...aro.org, kys@...rosoft.com,
haiyangz@...rosoft.com, wei.liu@...nel.org, decui@...rosoft.com,
rafael@...nel.org, peterz@...radead.org, longman@...hat.com,
boqun.feng@...il.com, pmladek@...e.com, senozhatsky@...omium.org,
rostedt@...dmis.org, john.ogness@...utronix.de,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com, jstultz@...gle.com,
sboyd@...nel.org, linux-kernel@...r.kernel.org,
loongarch@...ts.linux.dev, linux-s390@...r.kernel.org,
kvm@...r.kernel.org, linux-hyperv@...r.kernel.org,
linux-pm@...r.kernel.org
Subject: [PATCH v2 04/13] arm64/arch_timer: Provide noinstr sched_clock_read() functions
With the intent to provide local_clock_noinstr(), a variant of
local_clock() that's safe to be called from noinstr code (with the
assumption that any such code will already be non-preemptible),
prepare for things by providing a noinstr sched_clock_read() function.
Specifically, preempt_enable_*() calls out to schedule(), which upsets
noinstr validation efforts.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/arm64/include/asm/arch_timer.h | 8 ----
drivers/clocksource/arm_arch_timer.c | 60 ++++++++++++++++++++++++++---------
2 files changed, 47 insertions(+), 21 deletions(-)
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -88,13 +88,7 @@ static inline notrace u64 arch_timer_rea
#define arch_timer_reg_read_stable(reg) \
({ \
- u64 _val; \
- \
- preempt_disable_notrace(); \
- _val = erratum_handler(read_ ## reg)(); \
- preempt_enable_notrace(); \
- \
- _val; \
+ erratum_handler(read_ ## reg)(); \
})
/*
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -191,22 +191,40 @@ u32 arch_timer_reg_read(int access, enum
return val;
}
-static notrace u64 arch_counter_get_cntpct_stable(void)
+static noinstr u64 _arch_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
}
-static notrace u64 arch_counter_get_cntpct(void)
+static notrace u64 arch_counter_get_cntpct_stable(void)
+{
+ u64 val;
+ preempt_disable_notrace();
+ val = __arch_counter_get_cntpct_stable();
+ preempt_enable_notrace();
+ return val;
+}
+
+static noinstr u64 arch_counter_get_cntpct(void)
{
return __arch_counter_get_cntpct();
}
-static notrace u64 arch_counter_get_cntvct_stable(void)
+static noinstr u64 _arch_counter_get_cntvct_stable(void)
{
return __arch_counter_get_cntvct_stable();
}
-static notrace u64 arch_counter_get_cntvct(void)
+static notrace u64 arch_counter_get_cntvct_stable(void)
+{
+ u64 val;
+ preempt_disable_notrace();
+ val = __arch_counter_get_cntvct_stable();
+ preempt_enable_notrace();
+ return val;
+}
+
+static noinstr u64 arch_counter_get_cntvct(void)
{
return __arch_counter_get_cntvct();
}
@@ -753,14 +771,14 @@ static int arch_timer_set_next_event_phy
return 0;
}
-static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
+static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
{
u32 cnt_lo, cnt_hi, tmp_hi;
do {
- cnt_hi = readl_relaxed(t->base + offset_lo + 4);
- cnt_lo = readl_relaxed(t->base + offset_lo);
- tmp_hi = readl_relaxed(t->base + offset_lo + 4);
+ cnt_hi = __raw_readl(t->base + offset_lo + 4);
+ cnt_lo = __raw_readl(t->base + offset_lo);
+ tmp_hi = __raw_readl(t->base + offset_lo + 4);
} while (cnt_hi != tmp_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
@@ -1060,7 +1078,7 @@ bool arch_timer_evtstrm_available(void)
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
-static u64 arch_counter_get_cntvct_mem(void)
+static noinstr u64 arch_counter_get_cntvct_mem(void)
{
return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
}
@@ -1074,6 +1092,13 @@ struct arch_timer_kvm_info *arch_timer_g
static void __init arch_counter_register(unsigned type)
{
+ /*
+ * Default to cp15 based access because arm64 uses this function for
+ * sched_clock() before DT is probed and the cp15 method is guaranteed
+ * to exist on arm64. arm doesn't use this before DT is probed so even
+ * if we don't have the cp15 accessors we won't have a problem.
+ */
+ u64 (*scr)(void) = arch_counter_get_cntvct;
u64 start_count;
int width;
@@ -1083,21 +1108,28 @@ static void __init arch_counter_register
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
- if (arch_timer_counter_has_wa())
+ if (arch_timer_counter_has_wa()) {
rd = arch_counter_get_cntvct_stable;
- else
+ scr = _arch_counter_get_cntvct_stable;
+ } else {
rd = arch_counter_get_cntvct;
+ scr = arch_counter_get_cntvct;
+ }
} else {
- if (arch_timer_counter_has_wa())
+ if (arch_timer_counter_has_wa()) {
rd = arch_counter_get_cntpct_stable;
- else
+ scr = _arch_counter_get_cntpct_stable;
+ } else {
rd = arch_counter_get_cntpct;
+ scr = arch_counter_get_cntpct;
+ }
}
arch_timer_read_counter = rd;
clocksource_counter.vdso_clock_mode = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
+ scr = arch_counter_get_cntvct_mem;
}
width = arch_counter_get_width();
@@ -1113,7 +1145,7 @@ static void __init arch_counter_register
timecounter_init(&arch_timer_kvm_info.timecounter,
&cyclecounter, start_count);
- sched_clock_register(arch_timer_read_counter, width, arch_timer_rate);
+ sched_clock_register(scr, width, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
Powered by blists - more mailing lists