lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230704140924.315594-8-cleger@rivosinc.com>
Date:   Tue,  4 Jul 2023 16:09:22 +0200
From:   Clément Léger <cleger@...osinc.com>
To:     Paul Walmsley <paul.walmsley@...ive.com>,
        Palmer Dabbelt <palmer@...belt.com>,
        Albert Ou <aou@...s.berkeley.edu>
Cc:     Clément Léger <cleger@...osinc.com>,
        Stafford Horne <shorne@...il.com>,
        Brian Cain <bcain@...cinc.com>,
        Kefeng Wang <wangkefeng.wang@...wei.com>,
        "Russell King (Oracle)" <rmk+kernel@...linux.org.uk>,
        Michael Ellerman <mpe@...erman.id.au>,
        Sunil V L <sunilvl@...tanamicro.com>,
        Anup Patel <apatel@...tanamicro.com>,
        Atish Patra <atishp@...osinc.com>,
        Andrew Jones <ajones@...tanamicro.com>,
        Conor Dooley <conor.dooley@...rochip.com>,
        Heiko Stuebner <heiko@...ech.de>, Guo Ren <guoren@...nel.org>,
        Alexandre Ghiti <alexghiti@...osinc.com>,
        Masahiro Yamada <masahiroy@...nel.org>,
        Xianting Tian <xianting.tian@...ux.alibaba.com>,
        Sia Jee Heng <jeeheng.sia@...rfivetech.com>,
        Li Zhengyu <lizhengyu3@...wei.com>,
        Jisheng Zhang <jszhang@...nel.org>,
        "Gautham R. Shenoy" <gautham.shenoy@....com>,
        Mark Rutland <mark.rutland@....com>,
        Peter Zijlstra <peterz@...radead.org>,
        Marc Zyngier <maz@...nel.org>,
        Björn Töpel <bjorn@...osinc.com>,
        Krzysztof Kozlowski <krzysztof.kozlowski@...aro.org>,
        Evan Green <evan@...osinc.com>,
        linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [RFC V2 PATCH 7/9] riscv: report misaligned accesses emulation to hwprobe

hwprobe provides a way to report if misaligned access are emulated. In
order to correctly populate that feature, if the SBI delegated us
misaligned access handling, then we can check if it actually traps when
doing a misaligned access. This can be checked using an exception table
entry which will actually be used when a misaligned access is done from
kernel mode.

Signed-off-by: Clément Léger <cleger@...osinc.com>
---
 arch/riscv/include/asm/cpufeature.h  |  2 ++
 arch/riscv/kernel/setup.c            |  2 ++
 arch/riscv/kernel/traps_misaligned.c | 32 ++++++++++++++++++++++++++++
 3 files changed, 36 insertions(+)

diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 808d5403f2ac..7e968499db49 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -20,4 +20,6 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
 
 DECLARE_PER_CPU(long, misaligned_access_speed);
 
+void __init misaligned_emulation_init(void);
+
 #endif
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 36b026057503..820a8158e4f7 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -23,6 +23,7 @@
 
 #include <asm/alternative.h>
 #include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
 #include <asm/early_ioremap.h>
 #include <asm/pgtable.h>
@@ -284,6 +285,7 @@ void __init setup_arch(char **cmdline_p)
 
 	init_resources();
 	sbi_init();
+	misaligned_emulation_init();
 
 #ifdef CONFIG_KASAN
 	kasan_init();
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 39ec6caa6234..243ef9314734 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -14,6 +14,8 @@
 #include <asm/ptrace.h>
 #include <asm/csr.h>
 #include <asm/entry-common.h>
+#include <asm/hwprobe.h>
+#include <asm/cpufeature.h>
 
 #define INSN_MATCH_LB			0x3
 #define INSN_MASK_LB			0x707f
@@ -441,3 +443,33 @@ int handle_misaligned_store(struct pt_regs *regs)
 
 	return 0;
 }
+
+void __init misaligned_emulation_init(void)
+{
+	int cpu;
+	unsigned long emulated = 1, tmp_var;
+
+	/* Temporarily disable unaligned accesses support so that we fixup the
+	 * exception for code below.
+	 */
+	unaligned_enabled = 0;
+
+	__asm__ __volatile__ (
+		"1:\n"
+		"	ld %[tmp], 1(%[ptr])\n"
+		"	li %[emulated], 0\n"
+		"2:\n"
+		_ASM_EXTABLE(1b, 2b)
+	: [emulated] "+r" (emulated), [tmp] "=r" (tmp_var)
+	: [ptr] "r" (&tmp_var)
+	: "memory" );
+
+	unaligned_enabled = 1;
+	if (!emulated)
+		return;
+
+	for_each_possible_cpu(cpu) {
+		per_cpu(misaligned_access_speed, cpu) =
+					RISCV_HWPROBE_MISALIGNED_EMULATED;
+	}
+}
-- 
2.40.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ