[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <b4350d9d652551eb8f7d93d29ffe9e2a0120ef99.1705965635.git.isaku.yamahata@intel.com>
Date: Mon, 22 Jan 2024 15:54:03 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com,
isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
Kai Huang <kai.huang@...el.com>,
chen.bo@...el.com,
hang.yuan@...el.com,
tina.zhang@...el.com
Subject: [PATCH v18 087/121] KVM: TDX: Add a place holder to handle TDX VM exit
From: Isaku Yamahata <isaku.yamahata@...el.com>
Wire up handle_exit and handle_exit_irqoff methods and add a place holder
to handle VM exit. Add helper functions to get exit info, exit
qualification, etc.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/kvm/vmx/main.c | 37 ++++++++++++-
arch/x86/kvm/vmx/tdx.c | 110 +++++++++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/x86_ops.h | 10 ++++
3 files changed, 154 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 3751e6ed2388..1a20542f3a84 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -243,6 +243,25 @@ static bool vt_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
return tdx_protected_apic_has_interrupt(vcpu);
}
+static int vt_handle_exit(struct kvm_vcpu *vcpu,
+ enum exit_fastpath_completion fastpath)
+{
+ if (is_td_vcpu(vcpu))
+ return tdx_handle_exit(vcpu, fastpath);
+
+ return vmx_handle_exit(vcpu, fastpath);
+}
+
+static void vt_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_handle_exit_irqoff(vcpu);
+ return;
+ }
+
+ vmx_handle_exit_irqoff(vcpu);
+}
+
static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
@@ -449,6 +468,18 @@ static void vt_request_immediate_exit(struct kvm_vcpu *vcpu)
vmx_request_immediate_exit(vcpu);
}
+static void vt_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
+ u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_get_exit_info(vcpu, reason, info1, info2, intr_info,
+ error_code);
+ return;
+ }
+
+ vmx_get_exit_info(vcpu, reason, info1, info2, intr_info, error_code);
+}
+
static u8 vt_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
if (is_td_vcpu(vcpu))
@@ -544,7 +575,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.vcpu_pre_run = vt_vcpu_pre_run,
.vcpu_run = vt_vcpu_run,
- .handle_exit = vmx_handle_exit,
+ .handle_exit = vt_handle_exit,
.skip_emulated_instruction = vmx_skip_emulated_instruction,
.update_emulated_instruction = vmx_update_emulated_instruction,
.set_interrupt_shadow = vt_set_interrupt_shadow,
@@ -579,7 +610,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.set_identity_map_addr = vmx_set_identity_map_addr,
.get_mt_mask = vt_get_mt_mask,
- .get_exit_info = vmx_get_exit_info,
+ .get_exit_info = vt_get_exit_info,
.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
@@ -593,7 +624,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.load_mmu_pgd = vt_load_mmu_pgd,
.check_intercept = vmx_check_intercept,
- .handle_exit_irqoff = vmx_handle_exit_irqoff,
+ .handle_exit_irqoff = vt_handle_exit_irqoff,
.request_immediate_exit = vt_request_immediate_exit,
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 0f0f1ea8a712..eea36f990e17 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -164,6 +164,26 @@ static __always_inline hpa_t set_hkid_to_hpa(hpa_t pa, u16 hkid)
return pa | ((hpa_t)hkid << boot_cpu_data.x86_phys_bits);
}
+static __always_inline unsigned long tdexit_exit_qual(struct kvm_vcpu *vcpu)
+{
+ return kvm_rcx_read(vcpu);
+}
+
+static __always_inline unsigned long tdexit_ext_exit_qual(struct kvm_vcpu *vcpu)
+{
+ return kvm_rdx_read(vcpu);
+}
+
+static __always_inline unsigned long tdexit_gpa(struct kvm_vcpu *vcpu)
+{
+ return kvm_r8_read(vcpu);
+}
+
+static __always_inline unsigned long tdexit_intr_info(struct kvm_vcpu *vcpu)
+{
+ return kvm_r9_read(vcpu);
+}
+
static inline bool is_td_vcpu_created(struct vcpu_tdx *tdx)
{
return tdx->td_vcpu_created;
@@ -887,6 +907,12 @@ static noinstr void tdx_vcpu_enter_exit(struct vcpu_tdx *tdx)
WARN_ON_ONCE(!kvm_rebooting &&
(tdx->exit_reason.full & TDX_SW_ERROR) == TDX_SW_ERROR);
+ if ((u16)tdx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
+ is_nmi(tdexit_intr_info(vcpu))) {
+ kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ vmx_do_nmi_irqoff();
+ kvm_after_interrupt(vcpu);
+ }
guest_state_exit_irqoff();
}
@@ -930,6 +956,25 @@ void tdx_inject_nmi(struct kvm_vcpu *vcpu)
td_management_write8(to_tdx(vcpu), TD_VCPU_PEND_NMI, 1);
}
+void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ u16 exit_reason = tdx->exit_reason.basic;
+
+ if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
+ vmx_handle_external_interrupt_irqoff(vcpu,
+ tdexit_intr_info(vcpu));
+ else if (exit_reason == EXIT_REASON_EXCEPTION_NMI)
+ vmx_handle_exception_irqoff(vcpu, tdexit_intr_info(vcpu));
+}
+
+static int tdx_handle_triple_fault(struct kvm_vcpu *vcpu)
+{
+ vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ vcpu->mmio_needed = 0;
+ return 0;
+}
+
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
{
td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa & PAGE_MASK);
@@ -1285,6 +1330,71 @@ void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
__vmx_deliver_posted_interrupt(vcpu, &tdx->pi_desc, vector);
}
+int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
+{
+ union tdx_exit_reason exit_reason = to_tdx(vcpu)->exit_reason;
+
+ /* See the comment of tdh_sept_seamcall(). */
+ if (unlikely(exit_reason.full == (TDX_OPERAND_BUSY | TDX_OPERAND_ID_SEPT)))
+ return 1;
+
+ /*
+ * TDH.VP.ENTRY checks TD EPOCH which contend with TDH.MEM.TRACK and
+ * vcpu TDH.VP.ENTER.
+ */
+ if (unlikely(exit_reason.full == (TDX_OPERAND_BUSY | TDX_OPERAND_ID_TD_EPOCH)))
+ return 1;
+
+ if (unlikely(exit_reason.full == TDX_SEAMCALL_UD)) {
+ kvm_spurious_fault();
+ /*
+ * In the case of reboot or kexec, loop with TDH.VP.ENTER and
+ * TDX_SEAMCALL_UD to avoid unnecessarily activity.
+ */
+ return 1;
+ }
+
+ if (unlikely(exit_reason.non_recoverable || exit_reason.error)) {
+ if (unlikely(exit_reason.basic == EXIT_REASON_TRIPLE_FAULT))
+ return tdx_handle_triple_fault(vcpu);
+
+ kvm_pr_unimpl("TD exit 0x%llx, %d hkid 0x%x hkid pa 0x%llx\n",
+ exit_reason.full, exit_reason.basic,
+ to_kvm_tdx(vcpu->kvm)->hkid,
+ set_hkid_to_hpa(0, to_kvm_tdx(vcpu->kvm)->hkid));
+ goto unhandled_exit;
+ }
+
+ WARN_ON_ONCE(fastpath != EXIT_FASTPATH_NONE);
+
+ switch (exit_reason.basic) {
+ default:
+ break;
+ }
+
+unhandled_exit:
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
+ vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.data[0] = exit_reason.full;
+ vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+ return 0;
+}
+
+void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
+ u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ *reason = tdx->exit_reason.full;
+
+ *info1 = tdexit_exit_qual(vcpu);
+ *info2 = tdexit_ext_exit_qual(vcpu);
+
+ *intr_info = tdexit_intr_info(vcpu);
+ *error_code = 0;
+}
+
static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
{
struct kvm_tdx_capabilities __user *user_caps;
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 58053daf52d1..7877debdce8a 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -155,11 +155,16 @@ void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void tdx_vcpu_put(struct kvm_vcpu *vcpu);
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
+void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
+int tdx_handle_exit(struct kvm_vcpu *vcpu,
+ enum exit_fastpath_completion fastpath);
u8 tdx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector);
void tdx_inject_nmi(struct kvm_vcpu *vcpu);
+void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
+ u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
@@ -192,11 +197,16 @@ static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; }
+static inline void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu) {}
+static inline int tdx_handle_exit(struct kvm_vcpu *vcpu,
+ enum exit_fastpath_completion fastpath) { return 0; }
static inline u8 tdx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { return 0; }
static inline void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector) {}
static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {}
+static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1,
+ u64 *info2, u32 *intr_info, u32 *error_code) {}
static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }
--
2.25.1
Powered by blists - more mailing lists