[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240610204821.230388-5-torvalds@linux-foundation.org>
Date: Mon, 10 Jun 2024 13:48:18 -0700
From: Linus Torvalds <torvalds@...ux-foundation.org>
To: Peter Anvin <hpa@...or.com>,
Ingo Molnar <mingo@...nel.org>,
Borislav Petkov <bp@...en8.de>,
Thomas Gleixner <tglx@...utronix.de>,
Rasmus Villemoes <linux@...musvillemoes.dk>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>
Cc: Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
the arch/x86 maintainers <x86@...nel.org>,
linux-arm-kernel@...ts.infradead.org,
linux-arch <linux-arch@...r.kernel.org>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [PATCH 4/7] arm64: add 'runtime constant' support
This implements the runtime constant infrastructure for arm64, allowing
the dcache d_hash() function to be generated using as a constant for
hash table address followed by shift by a constant of the hash index.
Signed-off-by: Linus Torvalds <torvalds@...ux-foundation.org>
---
arch/arm64/include/asm/runtime-const.h | 75 ++++++++++++++++++++++++++
arch/arm64/kernel/vmlinux.lds.S | 3 ++
2 files changed, 78 insertions(+)
create mode 100644 arch/arm64/include/asm/runtime-const.h
diff --git a/arch/arm64/include/asm/runtime-const.h b/arch/arm64/include/asm/runtime-const.h
new file mode 100644
index 000000000000..02462b2cb6f9
--- /dev/null
+++ b/arch/arm64/include/asm/runtime-const.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RUNTIME_CONST_H
+#define _ASM_RUNTIME_CONST_H
+
+#define runtime_const_ptr(sym) ({ \
+ typeof(sym) __ret; \
+ asm_inline("1:\t" \
+ "movz %0, #0xcdef\n\t" \
+ "movk %0, #0x89ab, lsl #16\n\t" \
+ "movk %0, #0x4567, lsl #32\n\t" \
+ "movk %0, #0x0123, lsl #48\n\t" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ :"=r" (__ret)); \
+ __ret; })
+
+#define runtime_const_shift_right_32(val, sym) ({ \
+ unsigned long __ret; \
+ asm_inline("1:\t" \
+ "lsr %w0,%w1,#12\n\t" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ :"=r" (__ret) \
+ :"r" (0u+(val))); \
+ __ret; })
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+// 16-bit immediate for wide move (movz and movk) in bits 5..20
+static inline void __runtime_fixup_16(unsigned int *p, unsigned int val)
+{
+ unsigned int insn = *p;
+ insn &= 0xffe0001f;
+ insn |= (val & 0xffff) << 5;
+ *p = insn;
+}
+
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+ unsigned int *p = lm_alias(where);
+ __runtime_fixup_16(p, val);
+ __runtime_fixup_16(p+1, val >> 16);
+ __runtime_fixup_16(p+2, val >> 32);
+ __runtime_fixup_16(p+3, val >> 48);
+}
+
+// Immediate value is 5 bits starting at bit #16
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ unsigned int *p = lm_alias(where);
+ unsigned int insn = *p;
+ insn &= 0xffc0ffff;
+ insn |= (val & 63) << 16;
+ *p = insn;
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 755a22d4f840..55a8e310ea12 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -264,6 +264,9 @@ SECTIONS
EXIT_DATA
}
+ RUNTIME_CONST(shift, d_hash_shift)
+ RUNTIME_CONST(ptr, dentry_hashtable)
+
PERCPU_SECTION(L1_CACHE_BYTES)
HYPERVISOR_PERCPU_SECTION
--
2.45.1.209.gc6f12300df
Powered by blists - more mailing lists