lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200325220700.GZ2452@worktop.programming.kicks-ass.net>
Date:   Wed, 25 Mar 2020 23:07:00 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Linus Torvalds <torvalds@...ux-foundation.org>
Cc:     Peter Anvin <hpa@...or.com>, Andy Lutomirski <luto@...capital.net>,
        the arch/x86 maintainers <x86@...nel.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Steven Rostedt <rostedt@...dmis.org>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Daniel Bristot de Oliveira <bristot@...hat.com>,
        Jason Baron <jbaron@...mai.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...nel.org>, Nadav Amit <namit@...are.com>,
        Andrew Lutomirski <luto@...nel.org>,
        Ard Biesheuvel <ard.biesheuvel@...aro.org>,
        Josh Poimboeuf <jpoimboe@...hat.com>
Subject: Re: [RESEND][PATCH v3 14/17] static_call: Add static_cond_call()

On Wed, Mar 25, 2020 at 01:52:00PM -0700, Linus Torvalds wrote:
> On Wed, Mar 25, 2020 at 12:35 PM <hpa@...or.com> wrote:
> >
> > "movl $0,%eax" is five bytes, the same length as a call. Doesn't work for a tailcall, still, although if the sequence:
> >
> >     jmp tailcall
> >     retq
> >
> > ... can be generated at the tailcall site then the jmp can get patched out.
> 
> No, the problem is literally that the whole approach depends on the
> compiler just generating normal code for the static calls.
> 
> And the tailcall is the only interesting case. The normal call thing
> can be trivially just a single instruction (a mov like you say, but
> also easily just a xor padded with prefixes).

So I got the text poking bit written, and that turned out to be the
simple part :/ Find below.

Then we can do:

#define static_void_call(name)
	if (STATIC_CALL_NAME(name).func) \
		((typeof(STATIC_CALL_TRAMP(name))*)STATIC_CALL_NAME(name).func)

Which works, as evidenced by if being the current static_cond_call(),
but it is non-optimal code-gen for the case where func will never be
NULL, and also there is no way to write a !void version of the same.

The best I can come up with is something like:

#define static_call(name, args...) ({ \
	typeof(STATIC_CALL_TRAMP(name)(args)) __ret = (typeof(STATIC_CALL_TRAMP(name)(args)))0; \
	if (STATIC_CALL_NAME(name).func) \
		__ret = ((typeof(STATIC_CALL_TRAMP(name))*)STATIC_CALL_NAME(name).func)(args); \
	__ret; })

Which has a different (and IMO less natural) syntax.

That then brings us to the HAVE_STATIC_CALL variant; there we need to
somehow make the void vs !void thing persistent, and there I ran out of
ideas.

Initially I figured we could do something like:

#define annotate_void_call() ({ \
	asm volatile("%c0:\n\t" \
		     ".pushsection .discard.void_call\n\t" \
		     ".long %c0b - .\n\t" \
		     ".popsection\n\t" : : "i" (__COUNTER__)); \
})

#define static_void_call(name) \
	annotate_void_call(); \
	STATIC_CALL_TRAMP(name)

But that doesn't actually work for something like:

	static_void_call(foo)(static_call(bar)());

Where the argument setup of the call, include another static call.
Arguably this is quite insane, and we could just say:
"don't-do-that-then", but it does show how fragile this is.

Anyway, let me ponder this a little more... brain is starting to give
out anyway. More tomorrow.


---
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 346c98d5261e..240996338f66 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -947,6 +947,7 @@ struct text_poke_loc {
 	s32 rel32;
 	u8 opcode;
 	const u8 text[POKE_MAX_OPCODE_SIZE];
+	u8 multi;
 };

 struct bp_patching_desc {
@@ -1103,8 +1104,8 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
 		.refs = ATOMIC_INIT(1),
 	};
 	unsigned char int3 = INT3_INSN_OPCODE;
+	int do_sync, do_multi = 0;
 	unsigned int i;
-	int do_sync;

 	lockdep_assert_held(&text_mutex);

@@ -1119,11 +1120,24 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
 	/*
 	 * First step: add a int3 trap to the address that will be patched.
 	 */
-	for (i = 0; i < nr_entries; i++)
+	for (i = 0; i < nr_entries; i++) {
 		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
+		do_multi |= tp[i].multi;
+	}

 	text_poke_sync();

+	if (do_multi) {
+		/*
+		 * In case the 'old' text consisted of multiple instructions
+		 * we need to wait for an rcu_tasks quiescence period to ensure
+		 * all potentially preempted tasks have normally scheduled.
+		 * This ensures no tasks still have their instruction pointer
+		 * pointed at what will become the middle of an instruction.
+		 */
+		synchronize_rcu_tasks();
+	}
+
 	/*
 	 * Second step: update all but the first byte of the patched range.
 	 */
@@ -1176,10 +1190,28 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
 {
 	struct insn insn;

+	/*
+	 * Determine if the 'old' text at @addr consists of multiple
+	 * instructions. Make an exception for INT3 and RET, since
+	 * they don't (necessarily) continue to execute the following
+	 * instructions.
+	 */
+	kernel_insn_init(&insn, addr, MAX_INSN_SIZE);
+	insn_get_length(&insn);
+	tp.multi = (insn.legnth < len) &&
+		   (insn.opcode.bytes[0] != RET_INSN_OPCODE ||
+		    insn.opcode.bytes[0] != INT3_INSN_OPCODE);
+
+	/*
+	 * Copy the 'new' text into the text_poke vector.
+	 */
 	memcpy((void *)tp->text, opcode, len);
 	if (!emulate)
 		emulate = opcode;

+	/*
+	 * Decode the instruction poke_int3_handler() needs to emulate.
+	 */
 	kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
 	insn_get_length(&insn);

diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index 1e82e2486e76..2055e2d3674d 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -9,8 +9,13 @@ enum insn_type {
 	nop = 1,  /* site cond-call */
 	jmp = 2,  /* tramp / site tail-call */
 	ret = 3,  /* tramp / site cond-tail-call */
+	null = 4,
+	null_ret = 5,
 };

+static const u8 null_insn[5] =     { 0xb8, 0x00, 0x00, 0x00, 0x00 }; /* movl $0, %eax */
+static const u8 null_ret_insn[5] = { 0x31, 0xc0, 0xc3, 0x90, 0x90 }; /* xorl %eax, %eax; ret; nop; nop; */
+
 static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
 {
 	int size = CALL_INSN_SIZE;
@@ -34,6 +39,14 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
 		size = RET_INSN_SIZE;
 		break;

+	case null:
+		code = null_insi;
+		break;
+
+	case null_ret:
+		code = null_ret_insn;
+		break;
+
 	default: /* GCC is a moron -- it figures @code can be uninitialized below */
 		BUG();
 	}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ