lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251013143444.3999-39-david.kaplan@amd.com>
Date: Mon, 13 Oct 2025 09:34:26 -0500
From: David Kaplan <david.kaplan@....com>
To: Thomas Gleixner <tglx@...utronix.de>, Borislav Petkov <bp@...en8.de>,
	Peter Zijlstra <peterz@...radead.org>, Josh Poimboeuf <jpoimboe@...nel.org>,
	Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>, Ingo Molnar
	<mingo@...hat.com>, Dave Hansen <dave.hansen@...ux.intel.com>,
	<x86@...nel.org>, "H . Peter Anvin" <hpa@...or.com>
CC: Alexander Graf <graf@...zon.com>, Boris Ostrovsky
	<boris.ostrovsky@...cle.com>, <linux-kernel@...r.kernel.org>
Subject: [RFC PATCH 38/56] x86/callthunks: Reset callthunks

Define functions to restore call sites back to their original bytes.
This is done by checking if each annotated call is pointing to the
expected thunk and if so, adjusting the call target to point back at the
original destination.

Signed-off-by: David Kaplan <david.kaplan@....com>
---
 arch/x86/include/asm/alternative.h |  7 +++
 arch/x86/include/asm/module.h      |  1 +
 arch/x86/kernel/callthunks.c       | 73 ++++++++++++++++++++++++++++++
 3 files changed, 81 insertions(+)

diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 936e555c13ce..00e60195d768 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -133,6 +133,10 @@ extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
 					  struct module *mod);
 extern void *callthunks_translate_call_dest(void *dest);
 extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
+#ifdef CONFIG_DYNAMIC_MITIGATIONS
+extern void reset_builtin_callthunks(void);
+extern void reset_module_callthunks(struct callthunk_sites *cs, struct module *mod);
+#endif
 #else
 static __always_inline void callthunks_patch_builtin_calls(void) {}
 static __always_inline void
@@ -147,6 +151,9 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
 {
 	return 0;
 }
+static __always_inline void reset_builtin_callthunks(void) {}
+static __always_inline void reset_module_callthunks(struct callthunk_sites *cs,
+						    struct module *mod) {}
 #endif
 
 #ifdef CONFIG_MITIGATION_ITS
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index d0c39b921408..58d7f1017a14 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -23,6 +23,7 @@ struct mod_arch_specific {
 	struct alt_site *alt_sites;
 	struct retpoline_site *retpoline_sites;
 	int num_retpoline_sites;
+	bool callthunks_initialized;
 #endif
 };
 
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index 758e655f36a8..3e6f00e19814 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -336,6 +336,10 @@ void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
 	mutex_lock(&text_mutex);
 	callthunks_setup(cs, &ct);
 	mutex_unlock(&text_mutex);
+
+#ifdef CONFIG_DYNAMIC_MITIGATIONS
+	mod->arch.callthunks_initialized = true;
+#endif
 }
 #endif /* CONFIG_MODULES */
 
@@ -381,3 +385,72 @@ static int __init callthunks_debugfs_init(void)
 }
 __initcall(callthunks_debugfs_init);
 #endif
+
+#ifdef CONFIG_DYNAMIC_MITIGATIONS
+static void reset_call_sites(s32 *start, s32 *end, const struct core_text *ct)
+{
+	s32 *s;
+
+	for (s = start; s < end; s++) {
+		void *dest;
+		u8 bytes[8];
+		u8 insn_buff[MAX_PATCH_LEN];
+		void *addr = (void *)s + *s;
+
+		if (!within_coretext(ct, addr))
+			continue;
+
+		dest = call_get_dest(addr);
+		if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
+			continue;
+
+		memcpy(insn_buff, skl_call_thunk_template, SKL_TMPL_SIZE);
+		text_poke_apply_relocation(insn_buff, dest, SKL_TMPL_SIZE,
+					skl_call_thunk_template, SKL_TMPL_SIZE);
+		/* Check for the thunk */
+		if (bcmp(dest, insn_buff, SKL_TMPL_SIZE))
+			continue;
+
+		/* Set new destination to be after the thunk */
+		dest += SKL_TMPL_SIZE;
+		__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, dest, CALL_INSN_SIZE);
+		text_poke_early(addr, bytes, CALL_INSN_SIZE);
+	}
+}
+
+static void callthunks_reset(struct callthunk_sites *cs, const struct core_text *ct)
+{
+	prdbg("Resetting call sites %s\n", ct->name);
+	reset_call_sites(cs->call_start, cs->call_end, ct);
+	prdbg("Resetting call sites done %s\n", ct->name);
+}
+
+void reset_builtin_callthunks(void)
+{
+	struct callthunk_sites cs = {
+		.call_start	= __call_sites,
+		.call_end	= __call_sites_end,
+	};
+
+	if (!thunks_initialized)
+		return;
+
+	callthunks_reset(&cs, &builtin_coretext);
+	thunks_initialized = false;
+}
+
+void reset_module_callthunks(struct callthunk_sites *cs, struct module *mod)
+{
+	struct core_text ct = {
+		.base = (unsigned long)mod->mem[MOD_TEXT].base,
+		.end  = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
+		.name = mod->name,
+	};
+
+	if (!mod->arch.callthunks_initialized)
+		return;
+
+	callthunks_reset(cs, &ct);
+	mod->arch.callthunks_initialized = false;
+}
+#endif
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ