lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YAFmLzVnVzzUit4T@hirez.programming.kicks-ass.net>
Date:   Fri, 15 Jan 2021 10:53:51 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Jason Baron <jbaron@...mai.com>
Cc:     pbonzini@...hat.com, seanjc@...gle.com, kvm@...r.kernel.org,
        x86@...nel.org, linux-kernel@...r.kernel.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        Andrea Arcangeli <aarcange@...hat.com>
Subject: Re: [PATCH v2 3/3] KVM: x86: use static calls to reduce kvm_x86_ops
 overhead

On Fri, Jan 15, 2021 at 10:45:49AM +0100, Peter Zijlstra wrote:
> On Thu, Jan 14, 2021 at 10:27:56PM -0500, Jason Baron wrote:
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 5060922..9d4492b 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -1350,7 +1350,7 @@ void kvm_arch_free_vm(struct kvm *kvm);
> >  static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
> >  {
> >  	if (kvm_x86_ops.tlb_remote_flush &&
> > -	    !kvm_x86_ops.tlb_remote_flush(kvm))
> > +	    !static_call(kvm_x86_tlb_remote_flush)(kvm))
> >  		return 0;
> >  	else
> >  		return -ENOTSUPP;
> 
> Would you be able to use something like this?
> 
>   https://lkml.kernel.org/r/20201110101307.GO2651@hirez.programming.kicks-ass.net
> 
> we could also add __static_call_return1(), if that would help.

Something like so on top of the above.

--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -17,6 +17,7 @@ static void __ref __static_call_transfor
 	 * disp16 disp16 xorq %rax, %rax - a single 5 byte instruction that clears %rax
 	 */
 	static const u8 ret0[5] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
+	static const u8 ret1[5] = { 0xb8, 0x01, 0x00, 0x00, 0x00 }; /* mov $0x1,%eax */
 	int size = CALL_INSN_SIZE;
 	const void *emulate = NULL;
 	const void *code;
@@ -27,7 +28,11 @@ static void __ref __static_call_transfor
 		if (func == &__static_call_return0) {
 			emulate = code;
 			code = ret0;
+		} else if (func == &__static_call_return1) {
+			emulate = code;
+			code = ret1;
 		}
+
 		break;
 
 	case NOP:
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -137,6 +137,7 @@ extern void arch_static_call_transform(v
 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
 
 extern long __static_call_return0(void);
+extern long __static_call_return1(void);
 
 extern int __init static_call_init(void);
 
@@ -190,6 +191,7 @@ extern int static_call_text_reserved(voi
 #elif defined(CONFIG_HAVE_STATIC_CALL)
 
 static inline long __static_call_return0(void) { return 0; }
+static inline long __static_call_return0(void) { return 1; }
 
 static inline int static_call_init(void) { return 0; }
 
@@ -239,6 +241,7 @@ static inline int static_call_text_reser
 #else /* Generic implementation */
 
 static inline long __static_call_return0(void) { return 0; }
+static inline long __static_call_return0(void) { return 1; }
 
 static inline int static_call_init(void) { return 0; }
 
--- a/kernel/static_call.c
+++ b/kernel/static_call.c
@@ -443,6 +443,11 @@ long __static_call_return0(void)
 	return 0;
 }
 
+long __static_call_return1(void)
+{
+	return 1;
+}
+
 #ifdef CONFIG_STATIC_CALL_SELFTEST
 
 static int func_a(int x)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ