lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 18 Dec 2019 13:45:47 -0600
From:   Tom Lendacky <thomas.lendacky@....com>
To:     kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc:     Paolo Bonzini <pbonzini@...hat.com>,
        Sean Christopherson <sean.j.christopherson@...el.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>,
        Brijesh Singh <brijesh.singh@....com>
Subject: [PATCH v1 2/2] KVM: SVM: Implement reserved bit callback to set MMIO SPTE mask

Register a reserved bit(s) mask callback that will check if memory
encryption is supported/enabled:
  If enabled, then the physical address width is reduced and the first
  bit after the last valid reduced physical address bit will always be
  reserved.

  If disabled, then the physical address width is not reduced, so bit 51
  can be used, unless the physical address width is 52. In this case,
  return zero for the mask.

Fixes: 28a1f3ac1d0c ("kvm: x86: Set highest physical address bits in non-present/reserved SPTEs")
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
---
 arch/x86/kvm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 122d4ce3b1ab..a769aab45841 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -7242,6 +7242,46 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
 		   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
 }
 
+static u64 svm_get_reserved_mask(void)
+{
+	u64 mask, msr;
+
+	/* The default mask, used when memory encryption is not enabled */
+	mask = 1ull << 51;
+
+	/* No support for memory encryption, use the default */
+	if (cpuid_eax(0x80000000) < 0x8000001f)
+		return mask;
+
+	/*
+	 * Check for memory encryption support. If memory encryption support
+	 * is enabled:
+	 *   The physical addressing width is reduced. The first bit above the
+	 *   new physical addressing limit will always be reserved.
+	 */
+	rdmsrl(MSR_K8_SYSCFG, msr);
+	if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
+		/*
+		 * x86_phys_bits has been adjusted as part of the memory
+		 * encryption support.
+		 */
+		mask = 1ull << boot_cpu_data.x86_phys_bits;
+
+		return mask;
+	}
+
+	/*
+	 * If memory encryption support is disabled:
+	 *   The physical addressing width is not reduced, so the default mask
+	 *   will always be reserved unless the physical addressing width is 52,
+	 *   in which case there are no reserved bits, so return an empty mask.
+	 */
+	if (IS_ENABLED(CONFIG_X86_64) && boot_cpu_data.x86_phys_bits == 52)
+		mask = 0;
+
+	return mask;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -7379,6 +7419,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
 
 	.apic_init_signal_blocked = svm_apic_init_signal_blocked,
+
+	.get_reserved_mask = svm_get_reserved_mask,
 };
 
 static int __init svm_init(void)
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ