lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20171227164615.223499127@linuxfoundation.org> Date: Wed, 27 Dec 2017 17:46:01 +0100 From: Greg Kroah-Hartman <gregkh@...uxfoundation.org> To: linux-kernel@...r.kernel.org Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>, stable@...r.kernel.org, "Peter Zijlstra (Intel)" <peterz@...radead.org>, Andy Lutomirski <luto@...nel.org>, Boris Ostrovsky <boris.ostrovsky@...cle.com>, Borislav Petkov <bp@...en8.de>, Brian Gerst <brgerst@...il.com>, Dave Hansen <dave.hansen@...ux.intel.com>, David Laight <David.Laight@...lab.com>, Denys Vlasenko <dvlasenk@...hat.com>, Eduardo Valentin <eduval@...zon.com>, "H. Peter Anvin" <hpa@...or.com>, Josh Poimboeuf <jpoimboe@...hat.com>, Juergen Gross <jgross@...e.com>, Linus Torvalds <torvalds@...ux-foundation.org>, Thomas Gleixner <tglx@...utronix.de>, Will Deacon <will.deacon@....com>, aliguori@...zon.com, daniel.gruss@...k.tugraz.at, hughd@...gle.com, keescook@...gle.com, linux-mm@...ck.org, Ingo Molnar <mingo@...nel.org> Subject: [PATCH 4.14 28/74] x86/mm: Create asm/invpcid.h 4.14-stable review patch. If anyone has any objections, please let me know. ------------------ From: Peter Zijlstra <peterz@...radead.org> commit 1a3b0caeb77edeac5ce5fa05e6a61c474c9a9745 upstream. Unclutter tlbflush.h a little. Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org> Cc: Andy Lutomirski <luto@...nel.org> Cc: Boris Ostrovsky <boris.ostrovsky@...cle.com> Cc: Borislav Petkov <bp@...en8.de> Cc: Brian Gerst <brgerst@...il.com> Cc: Dave Hansen <dave.hansen@...ux.intel.com> Cc: David Laight <David.Laight@...lab.com> Cc: Denys Vlasenko <dvlasenk@...hat.com> Cc: Eduardo Valentin <eduval@...zon.com> Cc: Greg KH <gregkh@...uxfoundation.org> Cc: H. Peter Anvin <hpa@...or.com> Cc: Josh Poimboeuf <jpoimboe@...hat.com> Cc: Juergen Gross <jgross@...e.com> Cc: Linus Torvalds <torvalds@...ux-foundation.org> Cc: Peter Zijlstra <peterz@...radead.org> Cc: Thomas Gleixner <tglx@...utronix.de> Cc: Will Deacon <will.deacon@....com> Cc: aliguori@...zon.com Cc: daniel.gruss@...k.tugraz.at Cc: hughd@...gle.com Cc: keescook@...gle.com Cc: linux-mm@...ck.org Signed-off-by: Ingo Molnar <mingo@...nel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org> --- arch/x86/include/asm/invpcid.h | 53 ++++++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/tlbflush.h | 49 ------------------------------------ 2 files changed, 54 insertions(+), 48 deletions(-) --- /dev/null +++ b/arch/x86/include/asm/invpcid.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_INVPCID +#define _ASM_X86_INVPCID + +static inline void __invpcid(unsigned long pcid, unsigned long addr, + unsigned long type) +{ + struct { u64 d[2]; } desc = { { pcid, addr } }; + + /* + * The memory clobber is because the whole point is to invalidate + * stale TLB entries and, especially if we're flushing global + * mappings, we don't want the compiler to reorder any subsequent + * memory accesses before the TLB flush. + * + * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and + * invpcid (%rcx), %rax in long mode. + */ + asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" + : : "m" (desc), "a" (type), "c" (&desc) : "memory"); +} + +#define INVPCID_TYPE_INDIV_ADDR 0 +#define INVPCID_TYPE_SINGLE_CTXT 1 +#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 +#define INVPCID_TYPE_ALL_NON_GLOBAL 3 + +/* Flush all mappings for a given pcid and addr, not including globals. */ +static inline void invpcid_flush_one(unsigned long pcid, + unsigned long addr) +{ + __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); +} + +/* Flush all mappings for a given PCID, not including globals. */ +static inline void invpcid_flush_single_context(unsigned long pcid) +{ + __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); +} + +/* Flush all mappings, including globals, for all PCIDs. */ +static inline void invpcid_flush_all(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); +} + +/* Flush all mappings for all PCIDs except globals. */ +static inline void invpcid_flush_all_nonglobals(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); +} + +#endif /* _ASM_X86_INVPCID */ --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -9,54 +9,7 @@ #include <asm/cpufeature.h> #include <asm/special_insns.h> #include <asm/smp.h> - -static inline void __invpcid(unsigned long pcid, unsigned long addr, - unsigned long type) -{ - struct { u64 d[2]; } desc = { { pcid, addr } }; - - /* - * The memory clobber is because the whole point is to invalidate - * stale TLB entries and, especially if we're flushing global - * mappings, we don't want the compiler to reorder any subsequent - * memory accesses before the TLB flush. - * - * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and - * invpcid (%rcx), %rax in long mode. - */ - asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" - : : "m" (desc), "a" (type), "c" (&desc) : "memory"); -} - -#define INVPCID_TYPE_INDIV_ADDR 0 -#define INVPCID_TYPE_SINGLE_CTXT 1 -#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 -#define INVPCID_TYPE_ALL_NON_GLOBAL 3 - -/* Flush all mappings for a given pcid and addr, not including globals. */ -static inline void invpcid_flush_one(unsigned long pcid, - unsigned long addr) -{ - __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); -} - -/* Flush all mappings for a given PCID, not including globals. */ -static inline void invpcid_flush_single_context(unsigned long pcid) -{ - __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); -} - -/* Flush all mappings, including globals, for all PCIDs. */ -static inline void invpcid_flush_all(void) -{ - __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); -} - -/* Flush all mappings for all PCIDs except globals. */ -static inline void invpcid_flush_all_nonglobals(void) -{ - __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); -} +#include <asm/invpcid.h> static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) {
Powered by blists - more mailing lists