[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250210213549.1867704-3-cleger@rivosinc.com>
Date: Mon, 10 Feb 2025 22:35:35 +0100
From: Clément Léger <cleger@...osinc.com>
To: Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
Anup Patel <anup@...infault.org>,
Atish Patra <atishp@...shpatra.org>,
Shuah Khan <shuah@...nel.org>,
Jonathan Corbet <corbet@....net>,
linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org,
kvm@...r.kernel.org,
kvm-riscv@...ts.infradead.org,
linux-kselftest@...r.kernel.org
Cc: Clément Léger <cleger@...osinc.com>,
Samuel Holland <samuel.holland@...ive.com>
Subject: [PATCH v2 02/15] riscv: misaligned: request misaligned exception from SBI
Now that the kernel can handle misaligned accesses in S-mode, request
misaligned access exception delegation from SBI. This uses the FWFT SBI
extension defined in SBI version 3.0.
Signed-off-by: Clément Léger <cleger@...osinc.com>
---
arch/riscv/include/asm/cpufeature.h | 3 +-
arch/riscv/kernel/traps_misaligned.c | 96 +++++++++++++++++++++-
arch/riscv/kernel/unaligned_access_speed.c | 11 ++-
3 files changed, 105 insertions(+), 5 deletions(-)
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 569140d6e639..ad7d26788e6a 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -64,8 +64,9 @@ void __init riscv_user_isa_enable(void);
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
bool check_unaligned_access_emulated_all_cpus(void);
+void unaligned_access_init(void);
+int cpu_online_unaligned_access_init(unsigned int cpu);
#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
-void check_unaligned_access_emulated(struct work_struct *work __always_unused);
void unaligned_emulation_finish(void);
bool unaligned_ctl_available(void);
DECLARE_PER_CPU(long, misaligned_access_speed);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 7cc108aed74e..66eef398bad4 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -16,6 +16,7 @@
#include <asm/entry-common.h>
#include <asm/hwprobe.h>
#include <asm/cpufeature.h>
+#include <asm/sbi.h>
#include <asm/vector.h>
#define INSN_MATCH_LB 0x3
@@ -635,7 +636,7 @@ bool check_vector_unaligned_access_emulated_all_cpus(void)
static bool unaligned_ctl __read_mostly;
-void check_unaligned_access_emulated(struct work_struct *work __always_unused)
+static void check_unaligned_access_emulated(struct work_struct *work __always_unused)
{
int cpu = smp_processor_id();
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
@@ -646,6 +647,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
__asm__ __volatile__ (
" "REG_L" %[tmp], 1(%[ptr])\n"
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
+}
+
+static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
+{
+ long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
+
+ check_unaligned_access_emulated(NULL);
/*
* If unaligned_ctl is already set, this means that we detected that all
@@ -654,9 +662,10 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
*/
if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
- while (true)
- cpu_relax();
+ return -EINVAL;
}
+
+ return 0;
}
bool check_unaligned_access_emulated_all_cpus(void)
@@ -688,4 +697,85 @@ bool check_unaligned_access_emulated_all_cpus(void)
{
return false;
}
+static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
+{
+ return 0;
+}
#endif
+
+#ifdef CONFIG_RISCV_SBI
+
+struct misaligned_deleg_req {
+ atomic_t error;
+};
+
+static int sbi_request_unaligned_delegation(void)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_FWFT, SBI_EXT_FWFT_SET,
+ SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0, 0, 0, 0);
+
+ return sbi_err_map_linux_errno(ret.error);
+}
+
+static bool misaligned_traps_delegated;
+
+static int cpu_online_sbi_unaligned_setup(unsigned int cpu)
+{
+ if (sbi_request_unaligned_delegation() && misaligned_traps_delegated) {
+ pr_crit("Misaligned trap delegation non homogeneous (expected delegated)");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+cpu_unaligned_sbi_request_delegation(void *arg)
+{
+ struct misaligned_deleg_req *req = arg;
+
+ if (sbi_request_unaligned_delegation())
+ atomic_set(&req->error, 1);
+}
+
+static void unaligned_sbi_request_delegation(void)
+{
+ struct misaligned_deleg_req req = {.error = ATOMIC_INIT(0)};
+
+ on_each_cpu(cpu_unaligned_sbi_request_delegation, &req, 1);
+ if (atomic_read(&req.error) == 0) {
+ pr_info("SBI misaligned access exception delegation ok\n");
+ misaligned_traps_delegated = true;
+ /*
+ * Note that we don't have to take any specific action here, if
+ * the delegation is successful, then
+ * check_unaligned_access_emulated() will verify that indeed the
+ * platform traps on misaligned accesses.
+ */
+ }
+}
+
+void unaligned_access_init(void)
+{
+ if (sbi_probe_extension(SBI_EXT_FWFT) > 0)
+ unaligned_sbi_request_delegation();
+}
+#else
+void unaligned_access_init(void) {}
+
+static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused)
+{
+ return 0;
+}
+#endif
+
+int cpu_online_unaligned_access_init(unsigned int cpu)
+{
+ int ret = cpu_online_sbi_unaligned_setup(cpu);
+ if (ret)
+ return ret;
+
+ return cpu_online_check_unaligned_access_emulated(cpu);
+}
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 91f189cf1611..2f3aba073297 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -188,13 +188,20 @@ arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
static int riscv_online_cpu(unsigned int cpu)
{
+ int ret;
static struct page *buf;
/* We are already set since the last check */
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
goto exit;
- check_unaligned_access_emulated(NULL);
+ ret = cpu_online_unaligned_access_init(cpu);
+ if (ret)
+ return ret;
+
+ if (per_cpu(misaligned_access_speed, cpu) == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
+ goto exit;
+
buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
if (!buf) {
pr_warn("Allocation failure, not measuring misaligned performance\n");
@@ -403,6 +410,8 @@ static int check_unaligned_access_all_cpus(void)
{
bool all_cpus_emulated, all_cpus_vec_unsupported;
+ unaligned_access_init();
+
all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();
--
2.47.2
Powered by blists - more mailing lists