[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230719144131.29052-8-binbin.wu@linux.intel.com>
Date: Wed, 19 Jul 2023 22:41:29 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: seanjc@...gle.com, pbonzini@...hat.com, chao.gao@...el.com,
kai.huang@...el.com, David.Laight@...LAB.COM,
robert.hu@...ux.intel.com, guang.zeng@...el.com,
binbin.wu@...ux.intel.com
Subject: [PATCH v10 7/9] KVM: VMX: Implement and wire get_untagged_addr() for LAM
Implement LAM version of get_untagged_addr() in VMX.
Skip address untag for instruction fetch, branch target and operand of INVLPG,
which LAM doesn't apply to. Skip address untag for implicit system accesses
since LAM doesn't apply to the loading of base addresses of memory management
registers and segment registers, their values still need to be canonical (for
now, get_untagged_addr() interface is not called for implicit system accesses,
just for future proof).
Co-developed-by: Robert Hoo <robert.hu@...ux.intel.com>
Signed-off-by: Robert Hoo <robert.hu@...ux.intel.com>
Signed-off-by: Binbin Wu <binbin.wu@...ux.intel.com>
Reviewed-by: Chao Gao <chao.gao@...el.com>
---
arch/x86/kvm/vmx/vmx.c | 35 +++++++++++++++++++++++++++++++++++
1 file changed, 35 insertions(+)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index bcee5dc3dd0b..abf6d42672cd 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8177,6 +8177,39 @@ static void vmx_vm_destroy(struct kvm *kvm)
free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
}
+static gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva,
+ unsigned int flags)
+{
+ unsigned long cr3_bits;
+ int lam_bit;
+
+ if (flags & (X86EMUL_F_FETCH | X86EMUL_F_BRANCH | X86EMUL_F_IMPLICIT |
+ X86EMUL_F_INVTLB))
+ return gva;
+
+ if (!is_64_bit_mode(vcpu))
+ return gva;
+
+ /*
+ * Bit 63 determines if the address should be treated as user address
+ * or a supervisor address.
+ */
+ if (!(gva & BIT_ULL(63))) {
+ cr3_bits = kvm_get_active_cr3_lam_bits(vcpu);
+ if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48)))
+ return gva;
+
+ /* LAM_U48 is ignored if LAM_U57 is set. */
+ lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47;
+ } else {
+ if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
+ return gva;
+
+ lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
+ }
+ return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
+}
+
static struct kvm_x86_ops vmx_x86_ops __initdata = {
.name = KBUILD_MODNAME,
@@ -8316,6 +8349,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.complete_emulated_msr = kvm_complete_insn_gp,
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
+
+ .get_untagged_addr = vmx_get_untagged_addr,
};
static unsigned int vmx_handle_intel_pt_intr(void)
--
2.25.1
Powered by blists - more mailing lists