lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210507164456.1033-1-jon@nutanix.com>
Date:   Fri,  7 May 2021 12:44:50 -0400
From:   Jon Kohler <jon@...anix.com>
To:     unlisted-recipients:; (no To-header on input)
Cc:     Jon Kohler <jon@...anix.com>, Babu Moger <babu.moger@....com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Sean Christopherson <seanjc@...gle.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>,
        Dave Hansen <dave.hansen@...el.com>,
        Fenghua Yu <fenghua.yu@...el.com>,
        Yu-cheng Yu <yu-cheng.yu@...el.com>,
        Tony Luck <tony.luck@...el.com>,
        Uros Bizjak <ubizjak@...il.com>,
        Petteri Aimonen <jpa@....mail.kapsi.fi>,
        Kan Liang <kan.liang@...ux.intel.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Mike Rapoport <rppt@...nel.org>,
        Benjamin Thiel <b.thiel@...teo.de>,
        Fan Yang <Fan_Yang@...u.edu.cn>,
        Juergen Gross <jgross@...e.com>,
        Dave Jiang <dave.jiang@...el.com>,
        "Peter Zijlstra (Intel)" <peterz@...radead.org>,
        Ricardo Neri <ricardo.neri-calderon@...ux.intel.com>,
        Arvind Sankar <nivedita@...m.mit.edu>,
        linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Subject: [PATCH] KVM: x86: add hint to skip hidden rdpkru under kvm_load_host_xsave_state

kvm_load_host_xsave_state handles xsave on vm exit, part of which is
managing memory protection key state. The latest arch.pkru is updated
with a rdpkru, and if that doesn't match the base host_pkru (which
about 70% of the time), we issue a __write_pkru.

__write_pkru issues another rdpkru internally to try to avoid the
wrpkru, so we're reading the same value back to back when we 100% of
the time know that we need to go directly to wrpkru. This is a 100%
branch miss and extra work that can be skipped.

To improve performance, add a hint to __write_pkru so that callers
can specify if they've already done the check themselves. The
compiler seems to filter this branch this out completely when the hint
is true, so incrementally tighter code is generated as well.

While we're in this section of code, optimize if condition ordering
prior to __write_pkru in both kvm_load_{guest|host}_xsave_state.

For both functions, flip the ordering of the || condition so that
arch.xcr0 & XFEATURE_MASK_PKRU is checked first, which when
instrumented in our evironment appeared to be always true and less
overall work than kvm_read_cr4_bits.

For kvm_load_guest_xsave_state, hoist arch.pkru != host_pkru ahead
one position. When instrumented, I saw this be true roughly ~70% of
the time vs the other conditions which were almost always true.
With this change, we will avoid 3rd condition check ~30% of the time.

Cc: Babu Moger <babu.moger@....com>
Signed-off-by: Jon Kohler <jon@...anix.com>
---
 arch/x86/include/asm/fpu/internal.h  |  2 +-
 arch/x86/include/asm/pgtable.h       |  2 +-
 arch/x86/include/asm/special_insns.h | 10 ++++++----
 arch/x86/kvm/x86.c                   | 14 +++++++-------
 4 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 8d33ad80704f..0860bbadb422 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -583,7 +583,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
 		if (pk)
 			pkru_val = pk->pkru;
 	}
-	__write_pkru(pkru_val);
+	__write_pkru(pkru_val, false);

 	/*
 	 * Expensive PASID MSR write will be avoided in update_pasid() because
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index b1099f2d9800..8c8d4796b864 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -151,7 +151,7 @@ static inline void write_pkru(u32 pkru)
 	fpregs_lock();
 	if (pk)
 		pk->pkru = pkru;
-	__write_pkru(pkru);
+	__write_pkru(pkru, false);
 	fpregs_unlock();
 }

diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 2acd6cb62328..f4ac8ec3f096 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -104,13 +104,15 @@ static inline void wrpkru(u32 pkru)
 		     : : "a" (pkru), "c"(ecx), "d"(edx));
 }

-static inline void __write_pkru(u32 pkru)
+static inline void __write_pkru(u32 pkru, bool skip_comparison)
 {
 	/*
 	 * WRPKRU is relatively expensive compared to RDPKRU.
-	 * Avoid WRPKRU when it would not change the value.
+	 * Avoid WRPKRU when it would not change the value,
+	 * except when the caller has already done the
+	 * comparison, in which case skip redundant RDPKRU.
 	 */
-	if (pkru == rdpkru())
+	if (!skip_comparison && pkru == rdpkru())
 		return;

 	wrpkru(pkru);
@@ -122,7 +124,7 @@ static inline u32 rdpkru(void)
 	return 0;
 }

-static inline void __write_pkru(u32 pkru)
+static inline void __write_pkru(u32 pkru, bool skip_comparison)
 {
 }
 #endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cebdaa1e3cf5..cd95adbd140c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -912,10 +912,10 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
 	}

 	if (static_cpu_has(X86_FEATURE_PKU) &&
-	    (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
-	     (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
-	    vcpu->arch.pkru != vcpu->arch.host_pkru)
-		__write_pkru(vcpu->arch.pkru);
+	    vcpu->arch.pkru != vcpu->arch.host_pkru &&
+	    ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
+	     kvm_read_cr4_bits(vcpu, X86_CR4_PKE)))
+		__write_pkru(vcpu->arch.pkru, false);
 }
 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);

@@ -925,11 +925,11 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
 		return;

 	if (static_cpu_has(X86_FEATURE_PKU) &&
-	    (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
-	     (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
+	    ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
+	     kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) {
 		vcpu->arch.pkru = rdpkru();
 		if (vcpu->arch.pkru != vcpu->arch.host_pkru)
-			__write_pkru(vcpu->arch.host_pkru);
+			__write_pkru(vcpu->arch.host_pkru, true);
 	}

 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
--
2.30.1 (Apple Git-130)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ