lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e35a5b13-ea12-42d2-b569-ff1d69ef58d1@email.android.com>
Date:	Wed, 27 Oct 2010 15:24:05 -0700
From:	"H. Peter Anvin" <hpa@...or.com>
To:	Jason Baron <jbaron@...hat.com>, rostedt@...dmis.org, mingo@...e.hu
CC:	mathieu.desnoyers@...ymtl.ca, tglx@...utronix.de,
	andi@...stfloor.org, roland@...hat.com, rth@...hat.com,
	masami.hiramatsu.pt@...achi.com, fweisbec@...il.com,
	avi@...hat.com, davem@...emloft.net, vgoyal@...hat.com,
	sam@...nborg.org, tony@...eyournoodle.com,
	ddaney@...iumnetworks.com, dsd@...top.org,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/2] move arch_init_ideal_nop5 later

The exception test is broken anyway for reasons I already explained, so it would be better to just drop it.

"Jason Baron" <jbaron@...hat.com> wrote:

>arch_init_ideal_nop5() was being called from setup_arch() before
>the exception table was setup. Move it later into
>alternative_instructions().
>
>Fixes a boot hang on OLPC's XO-1 laptop based on Geode LX
>processor.
>
>
>Reported-by: Daniel Drake <dsd@...top.org>
>Signed-off-by: Jason Baron <jbaron@...hat.com>
>---
> arch/x86/include/asm/alternative.h |    1 -
>arch/x86/kernel/alternative.c      |  132
>++++++++++++++++++------------------
> arch/x86/kernel/setup.c            |    6 --
> 3 files changed, 67 insertions(+), 72 deletions(-)
>
>diff --git a/arch/x86/include/asm/alternative.h
>b/arch/x86/include/asm/alternative.h
>index 76561d2..2a7f618 100644
>--- a/arch/x86/include/asm/alternative.h
>+++ b/arch/x86/include/asm/alternative.h
>@@ -186,7 +186,6 @@ extern void *text_poke_smp(void *addr, const void
>*opcode, size_t len);
> #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
> #define IDEAL_NOP_SIZE_5 5
> extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
>-extern void arch_init_ideal_nop5(void);
> #else
> static inline void arch_init_ideal_nop5(void) {}
> #endif
>diff --git a/arch/x86/kernel/alternative.c
>b/arch/x86/kernel/alternative.c
>index a36bb90..9f39a1c 100644
>--- a/arch/x86/kernel/alternative.c
>+++ b/arch/x86/kernel/alternative.c
>@@ -452,6 +452,71 @@ extern struct paravirt_patch_site
>__start_parainstructions[],
> 	__stop_parainstructions[];
> #endif	/* CONFIG_PARAVIRT */
> 
>+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
>+
>+unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
>+
>+static void __init arch_init_ideal_nop5(void)
>+{
>+	extern const unsigned char ftrace_test_p6nop[];
>+	extern const unsigned char ftrace_test_nop5[];
>+	extern const unsigned char ftrace_test_jmp[];
>+	int faulted = 0;
>+
>+	/*
>+	 * There is no good nop for all x86 archs.
>+	 * We will default to using the P6_NOP5, but first we
>+	 * will test to make sure that the nop will actually
>+	 * work on this CPU. If it faults, we will then
>+	 * go to a lesser efficient 5 byte nop. If that fails
>+	 * we then just use a jmp as our nop. This isn't the most
>+	 * efficient nop, but we can not use a multi part nop
>+	 * since we would then risk being preempted in the middle
>+	 * of that nop, and if we enabled tracing then, it might
>+	 * cause a system crash.
>+	 *
>+	 * TODO: check the cpuid to determine the best nop.
>+	 */
>+	asm volatile (
>+		"ftrace_test_jmp:"
>+		"jmp ftrace_test_p6nop\n"
>+		"nop\n"
>+		"nop\n"
>+		"nop\n"  /* 2 byte jmp + 3 bytes */
>+		"ftrace_test_p6nop:"
>+		P6_NOP5
>+		"jmp 1f\n"
>+		"ftrace_test_nop5:"
>+		".byte 0x66,0x66,0x66,0x66,0x90\n"
>+		"1:"
>+		".section .fixup, \"ax\"\n"
>+		"2:	movl $1, %0\n"
>+		"	jmp ftrace_test_nop5\n"
>+		"3:	movl $2, %0\n"
>+		"	jmp 1b\n"
>+		".previous\n"
>+		_ASM_EXTABLE(ftrace_test_p6nop, 2b)
>+		_ASM_EXTABLE(ftrace_test_nop5, 3b)
>+		: "=r"(faulted) : "0" (faulted));
>+
>+	switch (faulted) {
>+	case 0:
>+		pr_info("converting mcount calls to 0f 1f 44 00 00\n");
>+		memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
>+		break;
>+	case 1:
>+		pr_info("converting mcount calls to 66 66 66 66 90\n");
>+		memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
>+		break;
>+	case 2:
>+		pr_info("converting mcount calls to jmp . + 5\n");
>+		memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
>+		break;
>+	}
>+
>+}
>+#endif
>+
> void __init alternative_instructions(void)
> {
>	/* The patching is not fully atomic, so try to avoid local
>interruptions
>@@ -508,6 +573,8 @@ void __init alternative_instructions(void)
> 				(unsigned long)__smp_locks_end);
> 
> 	restart_nmi();
>+
>+	arch_init_ideal_nop5();
> }
> 
> /**
>@@ -641,68 +708,3 @@ void *__kprobes text_poke_smp(void *addr, const
>void *opcode, size_t len)
> 	__stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
> 	return addr;
> }
>-
>-#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
>-
>-unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
>-
>-void __init arch_init_ideal_nop5(void)
>-{
>-	extern const unsigned char ftrace_test_p6nop[];
>-	extern const unsigned char ftrace_test_nop5[];
>-	extern const unsigned char ftrace_test_jmp[];
>-	int faulted = 0;
>-
>-	/*
>-	 * There is no good nop for all x86 archs.
>-	 * We will default to using the P6_NOP5, but first we
>-	 * will test to make sure that the nop will actually
>-	 * work on this CPU. If it faults, we will then
>-	 * go to a lesser efficient 5 byte nop. If that fails
>-	 * we then just use a jmp as our nop. This isn't the most
>-	 * efficient nop, but we can not use a multi part nop
>-	 * since we would then risk being preempted in the middle
>-	 * of that nop, and if we enabled tracing then, it might
>-	 * cause a system crash.
>-	 *
>-	 * TODO: check the cpuid to determine the best nop.
>-	 */
>-	asm volatile (
>-		"ftrace_test_jmp:"
>-		"jmp ftrace_test_p6nop\n"
>-		"nop\n"
>-		"nop\n"
>-		"nop\n"  /* 2 byte jmp + 3 bytes */
>-		"ftrace_test_p6nop:"
>-		P6_NOP5
>-		"jmp 1f\n"
>-		"ftrace_test_nop5:"
>-		".byte 0x66,0x66,0x66,0x66,0x90\n"
>-		"1:"
>-		".section .fixup, \"ax\"\n"
>-		"2:	movl $1, %0\n"
>-		"	jmp ftrace_test_nop5\n"
>-		"3:	movl $2, %0\n"
>-		"	jmp 1b\n"
>-		".previous\n"
>-		_ASM_EXTABLE(ftrace_test_p6nop, 2b)
>-		_ASM_EXTABLE(ftrace_test_nop5, 3b)
>-		: "=r"(faulted) : "0" (faulted));
>-
>-	switch (faulted) {
>-	case 0:
>-		pr_info("converting mcount calls to 0f 1f 44 00 00\n");
>-		memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
>-		break;
>-	case 1:
>-		pr_info("converting mcount calls to 66 66 66 66 90\n");
>-		memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
>-		break;
>-	case 2:
>-		pr_info("converting mcount calls to jmp . + 5\n");
>-		memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
>-		break;
>-	}
>-
>-}
>-#endif
>diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
>index 0ac571d..850059d 100644
>--- a/arch/x86/kernel/setup.c
>+++ b/arch/x86/kernel/setup.c
>@@ -112,7 +112,6 @@
> #include <asm/numa_64.h>
> #endif
> #include <asm/mce.h>
>-#include <asm/alternative.h>
> 
> /*
>* end_pfn only includes RAM, while max_pfn_mapped includes all e820
>entries.
>@@ -695,7 +694,6 @@ void __init setup_arch(char **cmdline_p)
> {
> 	int acpi = 0;
> 	int k8 = 0;
>-	unsigned long flags;
> 
> #ifdef CONFIG_X86_32
> 	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
>@@ -1055,10 +1053,6 @@ void __init setup_arch(char **cmdline_p)
> 	x86_init.oem.banner();
> 
> 	mcheck_init();
>-
>-	local_irq_save(flags);
>-	arch_init_ideal_nop5();
>-	local_irq_restore(flags);
> }
> 
> #ifdef CONFIG_X86_32
>-- 
>1.7.1

-- 
Sent from my mobile phone.  Please pardon any lack of formatting.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ