lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-a4af767ae59cc579569bbfe49513a0037d5989ee@git.kernel.org>
Date:   Wed, 17 Apr 2019 07:09:32 -0700
From:   tip-bot for Thomas Gleixner <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     peterz@...radead.org, dave.hansen@...ux.intel.com,
        jpoimboe@...hat.com, mingo@...nel.org, hpa@...or.com,
        sean.j.christopherson@...el.com, mingo@...hat.com,
        tglx@...utronix.de, bp@...e.de, x86@...nel.org,
        linux-kernel@...r.kernel.org, luto@...nel.org
Subject: [tip:x86/irq] x86/cpu_entry_area: Prepare for IST guard pages

Commit-ID:  a4af767ae59cc579569bbfe49513a0037d5989ee
Gitweb:     https://git.kernel.org/tip/a4af767ae59cc579569bbfe49513a0037d5989ee
Author:     Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Sun, 14 Apr 2019 17:59:48 +0200
Committer:  Borislav Petkov <bp@...e.de>
CommitDate: Wed, 17 Apr 2019 12:58:21 +0200

x86/cpu_entry_area: Prepare for IST guard pages

To allow guard pages between the IST stacks each stack needs to be
mapped individually.

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Borislav Petkov <bp@...e.de>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Josh Poimboeuf <jpoimboe@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Sean Christopherson <sean.j.christopherson@...el.com>
Cc: x86-ml <x86@...nel.org>
Link: https://lkml.kernel.org/r/20190414160144.592691557@linutronix.de
---
 arch/x86/mm/cpu_entry_area.c | 37 ++++++++++++++++++++++++++++++-------
 1 file changed, 30 insertions(+), 7 deletions(-)

diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 6a09b84c13fe..2b1407662a6d 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -77,6 +77,34 @@ static void __init percpu_setup_debug_store(unsigned int cpu)
 #endif
 }
 
+#ifdef CONFIG_X86_64
+
+#define cea_map_stack(name) do {					\
+	npages = sizeof(estacks->name## _stack) / PAGE_SIZE;		\
+	cea_map_percpu_pages(cea->estacks.name## _stack,		\
+			estacks->name## _stack, npages, PAGE_KERNEL);	\
+	} while (0)
+
+static void __init percpu_setup_exception_stacks(unsigned int cpu)
+{
+	struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
+	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
+	unsigned int npages;
+
+	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+	/*
+	 * The exceptions stack mappings in the per cpu area are protected
+	 * by guard pages so each stack must be mapped separately.
+	 */
+	cea_map_stack(DF);
+	cea_map_stack(NMI);
+	cea_map_stack(DB);
+	cea_map_stack(MCE);
+}
+#else
+static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
+#endif
+
 /* Setup the fixmap mappings only once per-processor */
 static void __init setup_cpu_entry_area(unsigned int cpu)
 {
@@ -134,13 +162,8 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
 	per_cpu(cpu_entry_area, cpu) = cea;
 #endif
 
-#ifdef CONFIG_X86_64
-	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
-	BUILD_BUG_ON(sizeof(exception_stacks) !=
-		     sizeof(((struct cpu_entry_area *)0)->estacks));
-	cea_map_percpu_pages(&cea->estacks, &per_cpu(exception_stacks, cpu),
-			     sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
-#endif
+	percpu_setup_exception_stacks(cpu);
+
 	percpu_setup_debug_store(cpu);
 }
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ