[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220716230953.858048083@linutronix.de>
Date: Sun, 17 Jul 2022 01:17:42 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Linus Torvalds <torvalds@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Andrew Cooper <Andrew.Cooper3@...rix.com>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Johannes Wikner <kwikner@...z.ch>,
Alyssa Milburn <alyssa.milburn@...ux.intel.com>,
Jann Horn <jannh@...gle.com>, "H.J. Lu" <hjl.tools@...il.com>,
Joao Moreira <joao.moreira@...el.com>,
Joseph Nuzman <joseph.nuzman@...el.com>,
Steven Rostedt <rostedt@...dmis.org>
Subject: [patch 20/38] x86/alternatives: Provide text_poke_[copy|set]_locked()
The upcoming call thunk patching must hold text_mutex and needs access to
text_poke_copy() and text_poke_set(), which take text_mutex.
Provide _locked postfixed variants to expose the inner workings.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/include/asm/text-patching.h | 2 +
arch/x86/kernel/alternative.c | 48 +++++++++++++++++++++--------------
2 files changed, 32 insertions(+), 18 deletions(-)
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -45,6 +45,8 @@ extern void *text_poke(void *addr, const
extern void text_poke_sync(void);
extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
+extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len);
+extern void *text_poke_set_locked(void *addr, int c, size_t len);
extern void *text_poke_set(void *addr, int c, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1225,6 +1225,26 @@ void *text_poke_kgdb(void *addr, const v
return __text_poke(text_poke_memcpy, addr, opcode, len);
}
+void *text_poke_copy_locked(void *addr, const void *opcode, size_t len)
+{
+ unsigned long start = (unsigned long)addr;
+ size_t patched = 0;
+
+ if (WARN_ON_ONCE(core_kernel_text(start)))
+ return NULL;
+
+ while (patched < len) {
+ unsigned long ptr = start + patched;
+ size_t s;
+
+ s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
+
+ __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
+ patched += s;
+ }
+ return addr;
+}
+
/**
* text_poke_copy - Copy instructions into (an unused part of) RX memory
* @addr: address to modify
@@ -1239,23 +1259,29 @@ void *text_poke_kgdb(void *addr, const v
*/
void *text_poke_copy(void *addr, const void *opcode, size_t len)
{
+ mutex_lock(&text_mutex);
+ addr = text_poke_copy_locked(addr, opcode, len);
+ mutex_unlock(&text_mutex);
+ return addr;
+}
+
+void *text_poke_set_locked(void *addr, int c, size_t len)
+{
unsigned long start = (unsigned long)addr;
size_t patched = 0;
if (WARN_ON_ONCE(core_kernel_text(start)))
return NULL;
- mutex_lock(&text_mutex);
while (patched < len) {
unsigned long ptr = start + patched;
size_t s;
s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
- __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
+ __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
patched += s;
}
- mutex_unlock(&text_mutex);
return addr;
}
@@ -1270,22 +1296,8 @@ void *text_poke_copy(void *addr, const v
*/
void *text_poke_set(void *addr, int c, size_t len)
{
- unsigned long start = (unsigned long)addr;
- size_t patched = 0;
-
- if (WARN_ON_ONCE(core_kernel_text(start)))
- return NULL;
-
mutex_lock(&text_mutex);
- while (patched < len) {
- unsigned long ptr = start + patched;
- size_t s;
-
- s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
-
- __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
- patched += s;
- }
+ addr = text_poke_set_locked(addr, c, len);
mutex_unlock(&text_mutex);
return addr;
}
Powered by blists - more mailing lists