[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fca2a328-f2c4-4df3-9086-06b8a12b8da4@sifive.com>
Date: Fri, 10 Jan 2025 17:35:53 -0600
From: Samuel Holland <samuel.holland@...ive.com>
To: Clément Léger <cleger@...osinc.com>
Cc: Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>, Anup Patel <anup@...infault.org>,
Atish Patra <atishp@...shpatra.org>, linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
kvm-riscv@...ts.infradead.org
Subject: Re: [PATCH 2/6] riscv: request misaligned exception delegation from
SBI
Hi Clément,
On 2025-01-06 9:48 AM, Clément Léger wrote:
> Now that the kernel can handle misaligned accesses in S-mode, request
> misaligned access exception delegation from SBI. This uses the FWFT SBI
> extension defined in SBI version 3.0.
>
> Signed-off-by: Clément Léger <cleger@...osinc.com>
> ---
> arch/riscv/include/asm/cpufeature.h | 1 +
> arch/riscv/kernel/traps_misaligned.c | 59 ++++++++++++++++++++++
> arch/riscv/kernel/unaligned_access_speed.c | 2 +
> 3 files changed, 62 insertions(+)
>
> diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
> index 4bd054c54c21..cd406fe37df8 100644
> --- a/arch/riscv/include/asm/cpufeature.h
> +++ b/arch/riscv/include/asm/cpufeature.h
> @@ -62,6 +62,7 @@ void __init riscv_user_isa_enable(void);
> _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
>
> bool check_unaligned_access_emulated_all_cpus(void);
> +void unaligned_access_init(void);
> #if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
> void check_unaligned_access_emulated(struct work_struct *work __always_unused);
> void unaligned_emulation_finish(void);
> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
> index 7cc108aed74e..4aca600527e9 100644
> --- a/arch/riscv/kernel/traps_misaligned.c
> +++ b/arch/riscv/kernel/traps_misaligned.c
> @@ -16,6 +16,7 @@
> #include <asm/entry-common.h>
> #include <asm/hwprobe.h>
> #include <asm/cpufeature.h>
> +#include <asm/sbi.h>
> #include <asm/vector.h>
>
> #define INSN_MATCH_LB 0x3
> @@ -689,3 +690,61 @@ bool check_unaligned_access_emulated_all_cpus(void)
> return false;
> }
> #endif
> +
> +#ifdef CONFIG_RISCV_SBI
> +
> +struct misaligned_deleg_req {
> + bool enable;
> + int error;
> +};
> +
> +static void
> +cpu_unaligned_sbi_request_delegation(void *arg)
> +{
> + struct misaligned_deleg_req *req = arg;
> + struct sbiret ret;
> +
> + ret = sbi_ecall(SBI_EXT_FWFT, SBI_EXT_FWFT_SET,
> + SBI_FWFT_MISALIGNED_EXC_DELEG, req->enable, 0, 0, 0, 0);
> + if (ret.error)
> + req->error = 1;
> +}
> +
> +static void unaligned_sbi_request_delegation(void)
> +{
> + struct misaligned_deleg_req req = {true, 0};
> +
> + on_each_cpu(cpu_unaligned_sbi_request_delegation, &req, 1);
> + if (!req.error) {
> + pr_info("SBI misaligned access exception delegation ok\n");
> + /*
> + * Note that we don't have to take any specific action here, if
> + * the delegation is successful, then
> + * check_unaligned_access_emulated() will verify that indeed the
> + * platform traps on misaligned accesses.
> + */
> + return;
> + }
> +
> + /*
> + * If at least delegation request failed on one hart, revert misaligned
> + * delegation for all harts, if we don't do that, we'll panic at
> + * misaligned delegation check time (see
> + * check_unaligned_access_emulated()).
> + */
> + req.enable = false;
> + req.error = 0;
> + on_each_cpu(cpu_unaligned_sbi_request_delegation, &req, 1);
> + if (req.error)
> + panic("Failed to disable misaligned delegation for all CPUs\n");
This logic doesn't handle the case where the delegation request failed on every
CPU, so there's nothing to revert. This causes a panic in a KVM guest inside
qemu-softmmu (the host kernel detects MISALIGNED_SCALAR_FAST, so
unaligned_ctl_available() returns false, and all FWFT calls fail).
Regards,
Samuel
> +
> +}
> +
> +void unaligned_access_init(void)
> +{
> + if (sbi_probe_extension(SBI_EXT_FWFT) > 0)
> + unaligned_sbi_request_delegation();
> +}
> +#else
> +void unaligned_access_init(void) {}
> +#endif
> diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
> index 91f189cf1611..1e3166100837 100644
> --- a/arch/riscv/kernel/unaligned_access_speed.c
> +++ b/arch/riscv/kernel/unaligned_access_speed.c
> @@ -403,6 +403,8 @@ static int check_unaligned_access_all_cpus(void)
> {
> bool all_cpus_emulated, all_cpus_vec_unsupported;
>
> + unaligned_access_init();
> +
> all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
> all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();
>
Powered by blists - more mailing lists