lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <e5e86fddcafbe4daedf58cf43e954cd6c70a836e.1480638597.git.luto@kernel.org>
Date:   Thu,  1 Dec 2016 16:35:00 -0800
From:   Andy Lutomirski <luto@...nel.org>
To:     x86@...nel.org
Cc:     One Thousand Gnomes <gnomes@...rguk.ukuu.org.uk>,
        Borislav Petkov <bp@...en8.de>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        Brian Gerst <brgerst@...il.com>,
        Matthew Whitehead <tedheadster@...il.com>,
        Henrique de Moraes Holschuh <hmh@....eng.br>,
        Peter Zijlstra <peterz@...radead.org>,
        Andy Lutomirski <luto@...nel.org>
Subject: [PATCH v2 4/6] x86/paravirt: Make sync_core() be a paravirt op

I want to change sync_core() to use MOV to CR2, but that won't work
the way we want on Xen PV, and the easiest fix is to make
sync_core() be a paravirt op.  Make it so.

A real paravirt guru could probably microoptimize this.  I doubt it
matters much, though, as sync_core() is mostly used during boot.

Signed-off-by: Andy Lutomirski <luto@...nel.org>
---
 arch/x86/include/asm/paravirt.h       | 5 +++++
 arch/x86/include/asm/paravirt_types.h | 2 ++
 arch/x86/include/asm/processor.h      | 3 ++-
 arch/x86/kernel/paravirt.c            | 2 ++
 4 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ce932812f142..7e76b72aa698 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -28,6 +28,11 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
 	PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
 }
 
+static inline void sync_core(void)
+{
+	PVOP_VCALL0(pv_cpu_ops.sync_core);
+}
+
 /*
  * These special macros can be used to get or set a debugging register
  */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0f400c0e4979..e4d2cb2c0165 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -177,6 +177,8 @@ struct pv_cpu_ops {
 
 	void (*start_context_switch)(struct task_struct *prev);
 	void (*end_context_switch)(struct task_struct *next);
+
+	void (*sync_core)(void);
 };
 
 struct pv_irq_ops {
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 64fbc937d586..c4402053c663 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -507,6 +507,7 @@ static inline void load_sp0(struct tss_struct *tss,
 }
 
 #define set_iopl_mask native_set_iopl_mask
+#define sync_core native_sync_core
 #endif /* CONFIG_PARAVIRT */
 
 /* Free all resources held by a thread. */
@@ -591,7 +592,7 @@ static __always_inline void cpu_relax(void)
 #define cpu_relax_lowlatency() cpu_relax()
 
 /* Stop speculative execution and prefetching of modified code. */
-static inline void sync_core(void)
+static inline void native_sync_core(void)
 {
 	int tmp;
 
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index bbf3d5933eaa..4d6a20ecbc78 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -373,6 +373,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
 
 	.start_context_switch = paravirt_nop,
 	.end_context_switch = paravirt_nop,
+
+	.sync_core = native_sync_core,
 };
 
 /* At this point, native_get/set_debugreg has real function entries */
-- 
2.9.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ