[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240614-padded-mammal-d956735c1293@wendy>
Date: Fri, 14 Jun 2024 09:22:47 +0100
From: Conor Dooley <conor.dooley@...rochip.com>
To: Jesse Taube <jesse@...osinc.com>
CC: <linux-riscv@...ts.infradead.org>, Jonathan Corbet <corbet@....net>, Paul
Walmsley <paul.walmsley@...ive.com>, Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>, Conor Dooley <conor@...nel.org>, Rob
Herring <robh@...nel.org>, Krzysztof Kozlowski <krzk+dt@...nel.org>,
Clément Léger <cleger@...osinc.com>, Evan Green
<evan@...osinc.com>, Andrew Jones <ajones@...tanamicro.com>, Charlie Jenkins
<charlie@...osinc.com>, Xiao Wang <xiao.w.wang@...el.com>, Andy Chiu
<andy.chiu@...ive.com>, Eric Biggers <ebiggers@...gle.com>, Greentime Hu
<greentime.hu@...ive.com>, Björn Töpel
<bjorn@...osinc.com>, Heiko Stuebner <heiko@...ech.de>, Costa Shulyupin
<costa.shul@...hat.com>, Andrew Morton <akpm@...ux-foundation.org>, Baoquan
He <bhe@...hat.com>, Anup Patel <apatel@...tanamicro.com>, Zong Li
<zong.li@...ive.com>, Sami Tolvanen <samitolvanen@...gle.com>, Ben Dooks
<ben.dooks@...ethink.co.uk>, Alexandre Ghiti <alexghiti@...osinc.com>,
"Gustavo A. R. Silva" <gustavoars@...nel.org>, Erick Archer
<erick.archer@....com>, Joel Granados <j.granados@...sung.com>,
<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<devicetree@...r.kernel.org>
Subject: Re: [PATCH v2 3/6] RISC-V: Check scalar unaligned access on all CPUs
On Thu, Jun 13, 2024 at 03:16:12PM -0400, Jesse Taube wrote:
> Originally, the check_unaligned_access_emulated_all_cpus function
> only checked the boot hart. This fixes the function to check all
> harts.
This seems like it should be split out and get a Fixes: tag & a cc:
stable.
> Check for Zicclsm before checking for unaligned access. This will
> greatly reduce the boot up time as finding the access speed is no longer
> necessary.
>
> Signed-off-by: Jesse Taube <jesse@...osinc.com>
> ---
> V1 -> V2:
> - New patch
> ---
> arch/riscv/kernel/traps_misaligned.c | 23 ++++++----------------
> arch/riscv/kernel/unaligned_access_speed.c | 23 +++++++++++++---------
> 2 files changed, 20 insertions(+), 26 deletions(-)
>
> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
> index b62d5a2f4541..8fadbe00dd62 100644
> --- a/arch/riscv/kernel/traps_misaligned.c
> +++ b/arch/riscv/kernel/traps_misaligned.c
> @@ -526,31 +526,17 @@ int handle_misaligned_store(struct pt_regs *regs)
> return 0;
> }
>
> -static bool check_unaligned_access_emulated(int cpu)
> +static void check_unaligned_access_emulated(struct work_struct *unused)
> {
> + int cpu = smp_processor_id();
> long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
> unsigned long tmp_var, tmp_val;
> - bool misaligned_emu_detected;
>
> *mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
>
> __asm__ __volatile__ (
> " "REG_L" %[tmp], 1(%[ptr])\n"
> : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
> -
> - misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
> - /*
> - * If unaligned_ctl is already set, this means that we detected that all
> - * CPUS uses emulated misaligned access at boot time. If that changed
> - * when hotplugging the new cpu, this is something we don't handle.
> - */
> - if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
> - pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
> - while (true)
> - cpu_relax();
> - }
> -
> - return misaligned_emu_detected;
> }
>
> bool check_unaligned_access_emulated_all_cpus(void)
> @@ -562,8 +548,11 @@ bool check_unaligned_access_emulated_all_cpus(void)
> * accesses emulated since tasks requesting such control can run on any
> * CPU.
> */
> + schedule_on_each_cpu(check_unaligned_access_emulated);
> +
> for_each_online_cpu(cpu)
> - if (!check_unaligned_access_emulated(cpu))
> + if (per_cpu(misaligned_access_speed, cpu)
> + != RISCV_HWPROBE_MISALIGNED_EMULATED)
> return false;
>
> unaligned_ctl = true;
> diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
> index a9a6bcb02acf..70c1588fc353 100644
> --- a/arch/riscv/kernel/unaligned_access_speed.c
> +++ b/arch/riscv/kernel/unaligned_access_speed.c
> @@ -259,23 +259,28 @@ static int check_unaligned_access_speed_all_cpus(void)
> kfree(bufs);
> return 0;
> }
> +#endif /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
>
> static int check_unaligned_access_all_cpus(void)
> {
> - bool all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
> + bool all_cpus_emulated;
> + int cpu;
>
> + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICCLSM)) {
> + for_each_online_cpu(cpu) {
> + per_cpu(misaligned_access_speed, cpu) = RISCV_HWPROBE_MISALIGNED_FAST;
> + }
> + return 0;
> + }
> +
> + all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
> +
> +#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
Can we make this an IS_ENABLED() please?
Thanks,
Conor.
> if (!all_cpus_emulated)
> return check_unaligned_access_speed_all_cpus();
> +#endif
>
> return 0;
> }
> -#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
> -static int check_unaligned_access_all_cpus(void)
> -{
> - check_unaligned_access_emulated_all_cpus();
> -
> - return 0;
> -}
> -#endif
>
> arch_initcall(check_unaligned_access_all_cpus);
> --
> 2.43.0
>
Download attachment "signature.asc" of type "application/pgp-signature" (229 bytes)
Powered by blists - more mailing lists