[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <86sfgpnjsg.wl-maz@kernel.org>
Date: Thu, 05 Jan 2023 14:22:07 +0000
From: Marc Zyngier <maz@...nel.org>
To: Yogesh Lal <quic_ylal@...cinc.com>
Cc: <mark.rutland@....com>, <daniel.lezcano@...aro.org>,
<tglx@...utronix.de>, <linux-arm-kernel@...ts.infradead.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-arm-msm@...r.kernel.org" <linux-arm-msm@...r.kernel.org>
Subject: Re: ERRATUM_858921 is broken on 5.15 kernel
On Thu, 05 Jan 2023 13:33:48 +0000,
Yogesh Lal <quic_ylal@...cinc.com> wrote:
>
> Hi,
>
> We are observing issue on A73 core where ERRATUM_858921 is broken.
>
> On 5.15 kernel arch_timer_enable_workaround is set by reading
> arm64_858921_read_cntpct_el0 and arm64_858921_read_cntvct_el0 during
> timer register using following path.
Have you checked whether the issue is still present on 6.1?
>
> arch_timer_enable_workaround->atomic_set(&timer_unstable_counter_workaround_in_use,
> 1);
>
> [code snap]
> 564 static
> 565 void arch_timer_enable_workaround(const struct
> arch_timer_erratum_workaround *wa,
> 566 bool local)
> 567 {
> 568 int i;
> 569
> 570 if (local) {
> 571 __this_cpu_write(timer_unstable_counter_workaround, wa);
> 572 } else {
> 573 for_each_possible_cpu(i)
> 574 per_cpu(timer_unstable_counter_workaround, i) = wa;
> 575 }
> 576
> 577 if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
> 578 atomic_set(&timer_unstable_counter_workaround_in_use, 1);
>
>
> and based on above workaround enablement , appropriate function to get
> counter is used.
>
> 1008 static void __init arch_counter_register(unsigned type)
> 1009 {
> 1010 u64 start_count;
> 1011
> 1012 /* Register the CP15 based counter if we have one */
> 1013 if (type & ARCH_TIMER_TYPE_CP15) {
> 1014 u64 (*rd)(void);
> 1015
> 1016 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
> 1017 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
> 1018 if (arch_timer_counter_has_wa())
> 1019 rd = arch_counter_get_cntvct_stable;
> 1020 else
> 1021 rd = arch_counter_get_cntvct;
> 1022 } else {
> 1023 if (arch_timer_counter_has_wa())
> 1024 rd = arch_counter_get_cntpct_stable;
> 1025 else
> 1026 rd = arch_counter_get_cntpct;
> 1027 }
> [snap]
> 1043 /* 56 bits minimum, so we assume worst case rollover */
> 1044 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
>
>
> As our boot cores are not impacted by errata sched_clock_register()
> will register !arch_timer_counter_has_wa() callback.
>
> Now when errata impacted core boots up and sched_clock_register
> already register will !arch_timer_counter_has_wa() path.
> As sched_clock_register is not per_cpu bases so
> arch_timer_read_counter will always point to
> !arch_timer_counter_has_wa() function calls.
Please try the following hack, only compile tested as I do not have
access to any affected HW, and report whether this solves your issue
or not. Note that this is based on 6.2-rc2.
Thanks,
M.
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index e09d4427f604..a7cf0a2c86d3 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -230,6 +230,28 @@ static u64 arch_counter_read_cc(const struct cyclecounter *cc)
return arch_timer_read_counter();
}
+static bool arch_timer_counter_has_wa(void);
+
+static u64 (*arch_counter_get_read_fn(void))(void)
+{
+ u64 (*rd)(void);
+
+ if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntvct_stable;
+ else
+ rd = arch_counter_get_cntvct;
+ } else {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntpct_stable;
+ else
+ rd = arch_counter_get_cntpct;
+ }
+
+ return rd;
+}
+
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
.id = CSID_ARM_ARCH_COUNTER,
@@ -571,8 +593,10 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
+ if (wa->read_cntvct_el0 || wa->read_cntpct_el0) {
atomic_set(&timer_unstable_counter_workaround_in_use, 1);
+ arch_timer_read_counter = arch_counter_get_read_fn();
+ }
/*
* Don't use the vdso fastpath if errata require using the
@@ -641,7 +665,7 @@ static bool arch_timer_counter_has_wa(void)
#else
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
-#define arch_timer_counter_has_wa() ({false;})
+static bool arch_timer_counter_has_wa(void) { return false; }
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static __always_inline irqreturn_t timer_handler(const int access,
@@ -1079,22 +1103,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
- u64 (*rd)(void);
-
- if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
- if (arch_timer_counter_has_wa())
- rd = arch_counter_get_cntvct_stable;
- else
- rd = arch_counter_get_cntvct;
- } else {
- if (arch_timer_counter_has_wa())
- rd = arch_counter_get_cntpct_stable;
- else
- rd = arch_counter_get_cntpct;
- }
-
- arch_timer_read_counter = rd;
+ arch_timer_read_counter = arch_counter_get_read_fn();
clocksource_counter.vdso_clock_mode = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
--
Without deviation from the norm, progress is not possible.
Powered by blists - more mailing lists