lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <92216dce371922b561cac40b7358cd285fce4979.1512109321.git.luto@kernel.org>
Date:   Thu, 30 Nov 2017 22:29:45 -0800
From:   Andy Lutomirski <luto@...nel.org>
To:     x86@...nel.org
Cc:     linux-kernel@...r.kernel.org, Borislav Petkov <bp@...en8.de>,
        Brian Gerst <brgerst@...il.com>,
        David Laight <David.Laight@...lab.com>,
        Kees Cook <keescook@...omium.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Andy Lutomirski <luto@...nel.org>
Subject: [PATCH 5/6] Fixup "x86/entry/64: Move the IST stacks into cpu_entry_area"

I'm not entirely certain, but I suspect this caused the last kbuild
bot error.  I wasn't able to reproduce it, but it seems plausble.

Add to the commit log:

The IST stacks are unlike the rest of cpu_entry_area: they're used
even for entries from kernel mode.  This means that they should be set
up before we load the final IDT.  Since the kernel sets up all
possible CPUs' percpu areas early in boot of the BP, move
cpu_entry_area setup to trap_init() and do it for all CPUs at once.

Signed-off-by: Andy Lutomirski <luto@...nel.org>
---
 arch/x86/include/asm/fixmap.h |  2 ++
 arch/x86/kernel/cpu/common.c  | 26 +++++++++++++++++++-------
 arch/x86/kernel/traps.c       |  6 ++++++
 3 files changed, 27 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 5a1013df456e..9a4caed665fd 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -242,5 +242,7 @@ static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
 	return &get_cpu_entry_area((cpu))->tss.SYSENTER_stack;
 }
 
+extern void setup_cpu_entry_areas(void);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 1509f09abf5e..c0f11a684acf 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -490,7 +490,8 @@ void load_percpu_segment(int cpu)
 	load_stack_canary_segment();
 }
 
-static void set_percpu_fixmap_pages(int fixmap_index, void *ptr, int pages, pgprot_t prot)
+static void __init
+set_percpu_fixmap_pages(int fixmap_index, void *ptr, int pages, pgprot_t prot)
 {
 	int i;
 
@@ -520,7 +521,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
 #endif
 
 /* Setup the fixmap mappings only once per-processor */
-static inline void setup_cpu_entry_area(int cpu)
+static void __init setup_cpu_entry_area(int cpu)
 {
 #ifdef CONFIG_X86_64
 	extern char _entry_trampoline[];
@@ -569,7 +570,7 @@ static inline void setup_cpu_entry_area(int cpu)
 				PAGE_KERNEL);
 
 #ifdef CONFIG_X86_32
-	this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu));
+	per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
 #endif
 
 #ifdef CONFIG_X86_64
@@ -586,6 +587,21 @@ static inline void setup_cpu_entry_area(int cpu)
 #endif
 }
 
+void __init setup_cpu_entry_areas(void)
+{
+	int cpu;
+
+	/*
+	 * For better or for worse, the kernel allocates percpu space
+	 * for all possible CPUs early in BP startup.  Map every CPU's
+	 * cpu_entry_area right off the bat so that they're available
+	 * before anything in AP boot could need them.
+	 */
+	for_each_possible_cpu(cpu) {
+		setup_cpu_entry_area(cpu);
+	}
+}
+
 /* Load the original GDT from the per-cpu structure */
 void load_direct_gdt(int cpu)
 {
@@ -1658,8 +1674,6 @@ void cpu_init(void)
 	initialize_tlbstate_and_flush();
 	enter_lazy_tlb(&init_mm, me);
 
-	setup_cpu_entry_area(cpu);
-
 	/*
 	 * Initialize the TSS.  sp0 points to the entry trampoline stack
 	 * regardless of what task is running.
@@ -1718,8 +1732,6 @@ void cpu_init(void)
 	initialize_tlbstate_and_flush();
 	enter_lazy_tlb(&init_mm, curr);
 
-	setup_cpu_entry_area(cpu);
-
 	/*
 	 * Initialize the TSS.  Don't bother initializing sp0, as the initial
 	 * task never enters user mode.
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 61e26b03afd8..b70aec60ebbd 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -946,6 +946,12 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 
 void __init trap_init(void)
 {
+	/*
+	 * We need cpu_entry_area working before any IST-using entries could
+	 * happen.
+	 */
+	setup_cpu_entry_areas();
+
 	idt_setup_traps();
 
 	/*
-- 
2.13.6

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ