[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251013143444.3999-38-david.kaplan@amd.com>
Date: Mon, 13 Oct 2025 09:34:25 -0500
From: David Kaplan <david.kaplan@....com>
To: Thomas Gleixner <tglx@...utronix.de>, Borislav Petkov <bp@...en8.de>,
Peter Zijlstra <peterz@...radead.org>, Josh Poimboeuf <jpoimboe@...nel.org>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>, Ingo Molnar
<mingo@...hat.com>, Dave Hansen <dave.hansen@...ux.intel.com>,
<x86@...nel.org>, "H . Peter Anvin" <hpa@...or.com>
CC: Alexander Graf <graf@...zon.com>, Boris Ostrovsky
<boris.ostrovsky@...cle.com>, <linux-kernel@...r.kernel.org>
Subject: [RFC PATCH 37/56] x86/alternative: Reset alternatives
These functions reset the kernel code back to the original form it was at
boot time. Retpoline and alternative bytes were stored when those were
first patched on boot. For returns, all returns are simply a jmp to
__x86_return_thunk so patch that in instead.
Signed-off-by: David Kaplan <david.kaplan@....com>
---
arch/x86/include/asm/alternative.h | 7 +++
arch/x86/kernel/alternative.c | 76 ++++++++++++++++++++++++++++++
2 files changed, 83 insertions(+)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 24a4afbf163b..936e555c13ce 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -97,6 +97,13 @@ extern int alternatives_patched;
struct module;
+#ifdef CONFIG_DYNAMIC_MITIGATIONS
+extern void reset_retpolines(s32 *start, s32 *end, struct module *mod);
+extern void reset_returns(s32 *start, s32 *end, struct module *mod);
+extern void reset_alternatives(struct alt_instr *start, struct alt_instr *end,
+ struct module *mod);
+#endif
+
struct alt_site {
u8 *pbytes;
u8 len;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 17b93763d1be..b67116ae883c 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -3292,3 +3292,79 @@ void __ref smp_text_poke_single(void *addr, const void *opcode, size_t len, cons
smp_text_poke_batch_add(addr, opcode, len, emulate);
smp_text_poke_batch_finish();
}
+
+#ifdef CONFIG_DYNAMIC_MITIGATIONS
+void reset_retpolines(s32 *start, s32 *end, struct module *mod)
+{
+ s32 *s;
+ u32 idx = 0;
+ struct retpoline_site *sites;
+
+ if (!mod)
+ sites = retpoline_sites;
+ else
+ sites = mod->arch.retpoline_sites;
+
+ if (WARN_ON(!sites))
+ return;
+
+ for (s = start; s < end; s++, idx++) {
+ void *addr = (void *)s + *s;
+
+ if (!should_patch(addr, mod))
+ continue;
+ /*
+ * This indirect might have been removed due to a static call
+ * transform. If so, ignore it.
+ */
+ if (*(u8 *)addr == INT3_INSN_OPCODE)
+ continue;
+
+ if (sites[idx].len)
+ text_poke_early(addr, sites[idx].bytes, sites[idx].len);
+ }
+}
+
+void reset_returns(s32 *start, s32 *end, struct module *mod)
+{
+ s32 *s;
+
+ for (s = start; s < end; s++) {
+ void *addr = (void *)s + *s;
+ u8 bytes[JMP32_INSN_SIZE];
+
+ if (!should_patch(addr, mod))
+ continue;
+
+ /* Generate jmp __x86_return_thunk */
+ __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr,
+ &__x86_return_thunk, JMP32_INSN_SIZE);
+ text_poke_early(addr, bytes, JMP32_INSN_SIZE);
+ }
+}
+
+void reset_alternatives(struct alt_instr *start, struct alt_instr *end, struct module *mod)
+{
+ struct alt_instr *s;
+ u32 idx = 0;
+ struct alt_site *sites;
+
+ if (!mod)
+ sites = alt_sites;
+ else
+ sites = mod->arch.alt_sites;
+
+ if (WARN_ON(!sites))
+ return;
+
+ for (s = start; s < end; s++, idx++) {
+ u8 *addr = instr_va(s);
+
+ if (!should_patch(addr, mod))
+ continue;
+
+ if (sites[idx].len)
+ text_poke_early(addr, sites[idx].pbytes, sites[idx].len);
+ }
+}
+#endif
--
2.34.1
Powered by blists - more mailing lists