[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190408160802.GS6139@lakrids.cambridge.arm.com>
Date: Mon, 8 Apr 2019 17:08:03 +0100
From: Mark Rutland <mark.rutland@....com>
To: Marc Zyngier <marc.zyngier@....com>
Cc: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Russell King <linux@....linux.org.uk>,
Will Deacon <will.deacon@....com>,
Catalin Marinas <catalin.marinas@....com>,
Daniel Lezcano <daniel.lezcano@...aro.org>,
Wim Van Sebroeck <wim@...ux-watchdog.org>,
Guenter Roeck <linux@...ck-us.net>,
Valentin Schneider <valentin.schneider@....com>
Subject: Re: [PATCH 7/7] clocksource/arm_arch_timer: Use
arch_timer_read_counter to access stable counters
On Mon, Apr 08, 2019 at 04:49:07PM +0100, Marc Zyngier wrote:
> Instead of always going via arch_counter_get_cntvct_stable to
> access the counter workaround, let's have arch_timer_read_counter
> to point to the right method.
Nit: s/to point/point/
> For that, we need to track whether any CPU in the system has a
> workaround for the counter. This is done by having an atomic
> variable tracking this.
>
> Signed-off-by: Marc Zyngier <marc.zyngier@....com>
Acked-by: Mark Rutland <mark.rutland@....com>
Mark.
> ---
> arch/arm/include/asm/arch_timer.h | 14 ++++++--
> arch/arm64/include/asm/arch_timer.h | 16 ++++++++--
> drivers/clocksource/arm_arch_timer.c | 48 +++++++++++++++++++++++++---
> 3 files changed, 70 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
> index 3f0a0191f763..4b66ecd6be99 100644
> --- a/arch/arm/include/asm/arch_timer.h
> +++ b/arch/arm/include/asm/arch_timer.h
> @@ -83,7 +83,7 @@ static inline u32 arch_timer_get_cntfrq(void)
> return val;
> }
>
> -static inline u64 arch_counter_get_cntpct(void)
> +static inline u64 __arch_counter_get_cntpct(void)
> {
> u64 cval;
>
> @@ -92,7 +92,12 @@ static inline u64 arch_counter_get_cntpct(void)
> return cval;
> }
>
> -static inline u64 arch_counter_get_cntvct(void)
> +static inline u64 __arch_counter_get_cntpct_stable(void)
> +{
> + return __arch_counter_get_cntpct();
> +}
> +
> +static inline u64 __arch_counter_get_cntvct(void)
> {
> u64 cval;
>
> @@ -101,6 +106,11 @@ static inline u64 arch_counter_get_cntvct(void)
> return cval;
> }
>
> +static inline u64 __arch_counter_get_cntvct_stable(void)
> +{
> + return __arch_counter_get_cntvct();
> +}
> +
> static inline u32 arch_timer_get_cntkctl(void)
> {
> u32 cntkctl;
> diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
> index 5502ea049b63..48b2100f4aaa 100644
> --- a/arch/arm64/include/asm/arch_timer.h
> +++ b/arch/arm64/include/asm/arch_timer.h
> @@ -174,18 +174,30 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
> isb();
> }
>
> -static inline u64 arch_counter_get_cntpct(void)
> +static inline u64 __arch_counter_get_cntpct_stable(void)
> {
> isb();
> return arch_timer_reg_read_stable(cntpct_el0);
> }
>
> -static inline u64 arch_counter_get_cntvct(void)
> +static inline u64 __arch_counter_get_cntpct(void)
> +{
> + isb();
> + return read_sysreg(cntpct_el0);
> +}
> +
> +static inline u64 __arch_counter_get_cntvct_stable(void)
> {
> isb();
> return arch_timer_reg_read_stable(cntvct_el0);
> }
>
> +static inline u64 __arch_counter_get_cntvct(void)
> +{
> + isb();
> + return read_sysreg(cntvct_el0);
> +}
> +
> static inline int arch_timer_arch_init(void)
> {
> return 0;
> diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
> index da487fbfada3..5fcccc467868 100644
> --- a/drivers/clocksource/arm_arch_timer.c
> +++ b/drivers/clocksource/arm_arch_timer.c
> @@ -152,6 +152,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
> return val;
> }
>
> +static u64 arch_counter_get_cntpct_stable(void)
> +{
> + return __arch_counter_get_cntpct_stable();
> +}
> +
> +static u64 arch_counter_get_cntpct(void)
> +{
> + return __arch_counter_get_cntpct();
> +}
> +
> +static u64 arch_counter_get_cntvct_stable(void)
> +{
> + return __arch_counter_get_cntvct_stable();
> +}
> +
> +static u64 arch_counter_get_cntvct(void)
> +{
> + return __arch_counter_get_cntvct();
> +}
> +
> /*
> * Default to cp15 based access because arm64 uses this function for
> * sched_clock() before DT is probed and the cp15 method is guaranteed
> @@ -372,6 +392,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
> DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
> EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
>
> +static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
>
> static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
> struct clock_event_device *clk)
> @@ -550,6 +571,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
> per_cpu(timer_unstable_counter_workaround, i) = wa;
> }
>
> + if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
> + atomic_set(&timer_unstable_counter_workaround_in_use, 1);
> +
> /*
> * Don't use the vdso fastpath if errata require using the
> * out-of-line counter accessor. We may change our mind pretty
> @@ -606,9 +630,15 @@ static bool arch_timer_this_cpu_has_cntvct_wa(void)
> {
> return has_erratum_handler(read_cntvct_el0);
> }
> +
> +static bool arch_timer_counter_has_wa(void)
> +{
> + return atomic_read(&timer_unstable_counter_workaround_in_use);
> +}
> #else
> #define arch_timer_check_ool_workaround(t,a) do { } while(0)
> #define arch_timer_this_cpu_has_cntvct_wa() ({false;})
> +#define arch_timer_counter_has_wa() ({false;})
> #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
>
> static __always_inline irqreturn_t timer_handler(const int access,
> @@ -957,12 +987,22 @@ static void __init arch_counter_register(unsigned type)
>
> /* Register the CP15 based counter if we have one */
> if (type & ARCH_TIMER_TYPE_CP15) {
> + u64 (*rd)(void);
> +
> if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
> - arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
> - arch_timer_read_counter = arch_counter_get_cntvct;
> - else
> - arch_timer_read_counter = arch_counter_get_cntpct;
> + arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
> + if (arch_timer_counter_has_wa())
> + rd = arch_counter_get_cntvct_stable;
> + else
> + rd = arch_counter_get_cntvct;
> + } else {
> + if (arch_timer_counter_has_wa())
> + rd = arch_counter_get_cntpct_stable;
> + else
> + rd = arch_counter_get_cntpct;
> + }
>
> + arch_timer_read_counter = rd;
> clocksource_counter.archdata.vdso_direct = vdso_default;
> } else {
> arch_timer_read_counter = arch_counter_get_cntvct_mem;
> --
> 2.20.1
>
Powered by blists - more mailing lists