lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 5 Dec 2017 00:18:42 +0100
From:   Borislav Petkov <bp@...e.de>
To:     Thomas Gleixner <tglx@...utronix.de>
Cc:     LKML <linux-kernel@...r.kernel.org>, x86@...nel.org,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Andy Lutomirsky <luto@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Dave Hansen <dave.hansen@...el.com>,
        Greg KH <gregkh@...uxfoundation.org>, keescook@...gle.com,
        hughd@...gle.com, Brian Gerst <brgerst@...il.com>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Denys Vlasenko <dvlasenk@...hat.com>,
        Rik van Riel <riel@...hat.com>,
        Boris Ostrovsky <boris.ostrovsky@...cle.com>,
        Juergen Gross <jgross@...e.com>,
        David Laight <David.Laight@...lab.com>,
        Eduardo Valentin <eduval@...zon.com>, aliguori@...zon.com,
        Will Deacon <will.deacon@....com>, daniel.gruss@...k.tugraz.at
Subject: Re: [patch 27/60] x86/cpufeatures: Add X86_BUG_CPU_INSECURE

On Mon, Dec 04, 2017 at 03:07:33PM +0100, Thomas Gleixner wrote:
> From: Thomas Gleixner <tglx@...utronix.de>
> 
> Many x86 CPUs leak information to user space due to missing isolation of
> user space and kernel space page tables. There are many well documented
> ways to exploit that.
> 
> The upcoming software migitation of isolating the user and kernel space
> page tables needs a misfeature flag so code can be made runtime
> conditional.
> 
> Add two BUG bits: One which indicates that the CPU is affected and one that
> the software migitation is enabled.
> 
> Assume for now that _ALL_ x86 CPUs are affected by this. Exceptions can be
> made later.
> 
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> 
> ---
>  arch/x86/include/asm/cpufeatures.h |    2 ++
>  arch/x86/kernel/cpu/common.c       |    4 ++++
>  2 files changed, 6 insertions(+)
> 
> --- a/arch/x86/include/asm/cpufeatures.h
> +++ b/arch/x86/include/asm/cpufeatures.h
> @@ -340,5 +340,7 @@
>  #define X86_BUG_SWAPGS_FENCE		X86_BUG(11) /* SWAPGS without input dep on GS */
>  #define X86_BUG_MONITOR			X86_BUG(12) /* IPI required to wake up remote CPU */
>  #define X86_BUG_AMD_E400		X86_BUG(13) /* CPU is among the affected by Erratum 400 */
> +#define X86_BUG_CPU_INSECURE		X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */
> +#define X86_BUG_CPU_SECURE_MODE_KPTI	X86_BUG(15) /* Kernel Page Table Isolation enabled*/

Right, if this second one is going to denote that the workaround is
enabled, let's make it a feature bit and shorter:

#define X86_FEATURE_KPTI

Delta diff below.

---
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 4dd0bda9fe09..604b62a5a2fe 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -212,7 +212,7 @@ For 32-bit we have the following conventions - kernel is built with
 .endm
 
 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
-	ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_CPU_SECURE_MODE_KPTI
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KPTI
 	mov	%cr3, \scratch_reg
 	ADJUST_KERNEL_CR3 \scratch_reg
 	mov	\scratch_reg, %cr3
@@ -223,7 +223,7 @@ For 32-bit we have the following conventions - kernel is built with
 	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
 
 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
-	ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_CPU_SECURE_MODE_KPTI
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KPTI
 	mov	%cr3, \scratch_reg
 
 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
@@ -259,7 +259,7 @@ For 32-bit we have the following conventions - kernel is built with
 .endm
 
 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
-	ALTERNATIVE "jmp .Ldone_\@", "", X86_BUG_CPU_SECURE_MODE_KPTI
+	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_KPTI
 	movq	%cr3, \scratch_reg
 	movq	\scratch_reg, \save_reg
 	/*
@@ -282,7 +282,7 @@ For 32-bit we have the following conventions - kernel is built with
 .endm
 
 .macro RESTORE_CR3 scratch_reg:req save_reg:req
-	ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_CPU_SECURE_MODE_KPTI
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KPTI
 
 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
 
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 6e905acb4e97..b367c23e7d83 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -198,10 +198,10 @@
 #define X86_FEATURE_CAT_L2		( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3		( 7*32+ 6) /* Code and Data Prioritization L3 */
 #define X86_FEATURE_INVPCID_SINGLE	( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
-
 #define X86_FEATURE_HW_PSTATE		( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_KPTI		( 7*32+11) /* Kernel Page Table Isolation enabled */
 
 #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_INTEL_PT		( 7*32+15) /* Intel Processor Trace */
@@ -342,6 +342,5 @@
 #define X86_BUG_MONITOR			X86_BUG(12) /* IPI required to wake up remote CPU */
 #define X86_BUG_AMD_E400		X86_BUG(13) /* CPU is among the affected by Erratum 400 */
 #define X86_BUG_CPU_INSECURE		X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */
-#define X86_BUG_CPU_SECURE_MODE_KPTI	X86_BUG(15) /* Kernel Page Table Isolation enabled*/
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 0405960cee25..d1bf0b3a8232 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -210,7 +210,7 @@ static inline bool pgd_userspace_access(pgd_t pgd)
 static inline pgd_t kpti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
 {
 #ifdef CONFIG_KERNEL_PAGE_TABLE_ISOLATION
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (!static_cpu_has(X86_FEATURE_KPTI))
 		return pgd;
 
 	if (pgd_userspace_access(pgd)) {
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 55ebfd144f18..d84167c036c0 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -330,7 +330,7 @@ static inline void invalidate_pcid_other(void)
 	 * are set as _PAGE_GLOBAL.  We have no shared nonglobals
 	 * and nothing to do here.
 	 */
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (!static_cpu_has(X86_FEATURE_KPTI))
 		return;
 
 	this_cpu_write(cpu_tlbstate.invalidate_other, true);
@@ -374,7 +374,7 @@ static inline void invalidate_user_asid(u16 asid)
 	if (!cpu_feature_enabled(X86_FEATURE_PCID))
 		return;
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (!static_cpu_has(X86_FEATURE_KPTI))
 		return;
 
 	__set_bit(kern_pcid(asid),
@@ -438,7 +438,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
 
 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (!static_cpu_has(X86_FEATURE_KPTI))
 		return;
 
 	/*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b38a426a9855..4aa7b1efa6d8 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1481,7 +1481,7 @@ void syscall_init(void)
 		(entry_SYSCALL_64_trampoline - _entry_trampoline);
 
 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
-	if (static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (static_cpu_has(X86_FEATURE_KPTI))
 		wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
 	else
 		wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index f63a2b00d775..15dfdb76523d 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -53,7 +53,7 @@ static void set_ldt_and_map(struct ldt_struct *ldt)
 	void *fixva;
 	int idx, i;
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI)) {
+	if (!static_cpu_has(X86_FEATURE_KPTI)) {
 		set_ldt(ldt->entries_va, ldt->nr_entries);
 		return;
 	}
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index f9dfc20234e9..f18041e7d4d2 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -504,7 +504,7 @@ void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
 {
 #ifdef CONFIG_KERNEL_PAGE_TABLE_ISOLATION
-	if (user && static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (user && static_cpu_has(X86_FEATURE_KPTI))
 		pgd = kernel_to_user_pgdp(pgd);
 #endif
 	ptdump_walk_pgd_level_core(m, pgd, false, false);
@@ -516,7 +516,7 @@ static void ptdump_walk_user_pgd_level_checkwx(void)
 #ifdef CONFIG_KERNEL_PAGE_TABLE_ISOLATION
 	pgd_t *pgd = (pgd_t *) &init_top_pgt;
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (!static_cpu_has(X86_FEATURE_KPTI))
 		return;
 
 	pr_info("x86/mm: Checking user space page tables\n");
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ffd55531206e..d65bc503da44 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -164,7 +164,7 @@ static int page_size_mask;
 
 static void enable_global_pages(void)
 {
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (!static_cpu_has(X86_FEATURE_KPTI))
 		__supported_pte_mask |= _PAGE_GLOBAL;
 }
 
diff --git a/arch/x86/mm/kpti.c b/arch/x86/mm/kpti.c
index a3b39c01e028..b8f2e300e26c 100644
--- a/arch/x86/mm/kpti.c
+++ b/arch/x86/mm/kpti.c
@@ -61,7 +61,7 @@ void __init kpti_check_boottime_disable(void)
 		enable = false;
 	}
 	if (enable)
-		setup_force_cpu_bug(X86_BUG_CPU_SECURE_MODE_KPTI);
+		setup_force_cpu_cap(X86_FEATURE_KPTI);
 }
 
 /*
@@ -236,7 +236,7 @@ static void __init kpti_init_all_pgds(void)
  */
 void __init kpti_init(void)
 {
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI))
+	if (!static_cpu_has(X86_FEATURE_KPTI))
 		return;
 
 	pr_info("enabled\n");
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 20f6cc4e49b8..430c6ba24ad7 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -42,7 +42,7 @@ void clear_asid_other(void)
 	 * This is only expected to be set if we have disabled
 	 * kernel _PAGE_GLOBAL pages.
 	 */
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI)) {
+	if (!static_cpu_has(X86_FEATURE_KPTI)) {
 		WARN_ON_ONCE(1);
 		return;
 	}

-- 
Regards/Gruss,
    Boris.

SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 (AG Nürnberg)
-- 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ