lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250923174903.76283-14-ada.coupriediaz@arm.com>
Date: Tue, 23 Sep 2025 18:49:00 +0100
From: Ada Couprie Diaz <ada.coupriediaz@....com>
To: linux-arm-kernel@...ts.infradead.org
Cc: Catalin Marinas <catalin.marinas@....com>,
	Will Deacon <will@...nel.org>,
	Marc Zyngier <maz@...nel.org>,
	Oliver Upton <oliver.upton@...ux.dev>,
	Ard Biesheuvel <ardb@...nel.org>,
	Joey Gouly <joey.gouly@....com>,
	Suzuki K Poulose <suzuki.poulose@....com>,
	Zenghui Yu <yuzenghui@...wei.com>,
	Andrey Ryabinin <ryabinin.a.a@...il.com>,
	Alexander Potapenko <glider@...gle.com>,
	Andrey Konovalov <andreyknvl@...il.com>,
	Dmitry Vyukov <dvyukov@...gle.com>,
	Vincenzo Frascino <vincenzo.frascino@....com>,
	linux-kernel@...r.kernel.org,
	kvmarm@...ts.linux.dev,
	kasan-dev@...glegroups.com,
	Mark Rutland <mark.rutland@....com>,
	Ada Couprie Diaz <ada.coupriediaz@....com>
Subject: [RFC PATCH 13/16] arm64/insn: introduce missing is_store/is_load helpers

The current helpers only cover single and pair load/stores.
Introduce new helpers to cover exclusive, load acquire, store release
and the LSE atomics, as they both load and store.

To gather all of them in one call : introduce `aarch64_insn_is_load()`,
`aarch64_insn_is_store()`, and `aarch64_insn_is_ldst()` helpers which
check if the instruction is a load, store or either.

Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@....com>
---
Note: I made the LSE atomics part of the is_{load,store} helpers
as they are used as such by `aarch64_insn_encode_ldst_size()`,
but it could make sense to not have them in the helpers and just
call them together where neeeded.
---
 arch/arm64/include/asm/insn.h | 53 +++++++++++++++++++++++++++++++++++
 1 file changed, 53 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 4ba4d5c50137..44435eede1f3 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -520,6 +520,23 @@ static __always_inline bool aarch64_insn_is_barrier(u32 insn)
 	       aarch64_insn_is_pssbb(insn);
 }
 
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+static __always_inline bool aarch64_insn_is_lse_atomic(u32 insn)
+{
+	return aarch64_insn_is_ldadd(insn) ||
+	       aarch64_insn_is_ldclr(insn) ||
+	       aarch64_insn_is_ldeor(insn) ||
+		   aarch64_insn_is_ldset(insn) ||
+		   aarch64_insn_is_swp(insn) ||
+		   aarch64_insn_is_cas(insn);
+}
+#else /* CONFIG_ARM64_LSE_ATOMICS */
+static __always_inline bool aarch64_insn_is_lse_atomic(u32 insn)
+{
+	return false;
+}
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
+
 static __always_inline bool aarch64_insn_is_store_single(u32 insn)
 {
 	return aarch64_insn_is_store_imm(insn) ||
@@ -534,6 +551,21 @@ static __always_inline bool aarch64_insn_is_store_pair(u32 insn)
 	       aarch64_insn_is_stp_post(insn);
 }
 
+static __always_inline bool aarch64_insn_is_store_ex_or_rel(u32 insn)
+{
+	return aarch64_insn_is_store_ex(insn) ||
+	       aarch64_insn_is_store_ex(insn & (~BIT(15))) ||
+		   aarch64_insn_is_store_rel(insn);
+}
+
+static __always_inline bool aarch64_insn_is_store(u32 insn)
+{
+	return aarch64_insn_is_store_single(insn) ||
+	       aarch64_insn_is_store_pair(insn) ||
+		   aarch64_insn_is_store_ex_or_rel(insn) ||
+		   aarch64_insn_is_lse_atomic(insn);
+}
+
 static __always_inline bool aarch64_insn_is_load_single(u32 insn)
 {
 	return aarch64_insn_is_load_imm(insn) ||
@@ -548,6 +580,27 @@ static __always_inline bool aarch64_insn_is_load_pair(u32 insn)
 	       aarch64_insn_is_ldp_post(insn);
 }
 
+static __always_inline bool aarch64_insn_is_load_ex_or_acq(u32 insn)
+{
+	return aarch64_insn_is_load_ex(insn) ||
+	       aarch64_insn_is_load_ex(insn & (~BIT(15))) ||
+		   aarch64_insn_is_load_acq(insn);
+}
+
+static __always_inline bool aarch64_insn_is_load(u32 insn)
+{
+	return aarch64_insn_is_load_single(insn) ||
+	       aarch64_insn_is_load_pair(insn) ||
+		   aarch64_insn_is_load_ex_or_acq(insn) ||
+		   aarch64_insn_is_lse_atomic(insn);
+}
+
+static __always_inline bool aarch64_insn_is_ldst(u32 insn)
+{
+	return aarch64_insn_is_load(insn) ||
+		   aarch64_insn_is_store(insn);
+}
+
 static __always_inline bool aarch64_insn_uses_literal(u32 insn)
 {
 	/* ldr/ldrsw (literal), prfm */
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ