[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4f4aaf292008608a8717e9553c3315ee02f66b20.1708933498.git.isaku.yamahata@intel.com>
Date: Mon, 26 Feb 2024 00:26:51 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com,
isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
Kai Huang <kai.huang@...el.com>,
chen.bo@...el.com,
hang.yuan@...el.com,
tina.zhang@...el.com
Subject: [PATCH v19 109/130] KVM: TDX: Handle TDX PV port io hypercall
From: Isaku Yamahata <isaku.yamahata@...el.com>
Wire up TDX PV port IO hypercall to the KVM backend function.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
---
v18:
- Fix out case to set R10 and R11 correctly when user space handled port
out.
---
arch/x86/kvm/vmx/tdx.c | 67 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 67 insertions(+)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index a2caf2ae838c..55fc6cc6c816 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1152,6 +1152,71 @@ static int tdx_emulate_hlt(struct kvm_vcpu *vcpu)
return kvm_emulate_halt_noskip(vcpu);
}
+static int tdx_complete_pio_out(struct kvm_vcpu *vcpu)
+{
+ tdvmcall_set_return_code(vcpu, TDVMCALL_SUCCESS);
+ tdvmcall_set_return_val(vcpu, 0);
+ return 1;
+}
+
+static int tdx_complete_pio_in(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ unsigned long val = 0;
+ int ret;
+
+ WARN_ON_ONCE(vcpu->arch.pio.count != 1);
+
+ ret = ctxt->ops->pio_in_emulated(ctxt, vcpu->arch.pio.size,
+ vcpu->arch.pio.port, &val, 1);
+ WARN_ON_ONCE(!ret);
+
+ tdvmcall_set_return_code(vcpu, TDVMCALL_SUCCESS);
+ tdvmcall_set_return_val(vcpu, val);
+
+ return 1;
+}
+
+static int tdx_emulate_io(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ unsigned long val = 0;
+ unsigned int port;
+ int size, ret;
+ bool write;
+
+ ++vcpu->stat.io_exits;
+
+ size = tdvmcall_a0_read(vcpu);
+ write = tdvmcall_a1_read(vcpu);
+ port = tdvmcall_a2_read(vcpu);
+
+ if (size != 1 && size != 2 && size != 4) {
+ tdvmcall_set_return_code(vcpu, TDVMCALL_INVALID_OPERAND);
+ return 1;
+ }
+
+ if (write) {
+ val = tdvmcall_a3_read(vcpu);
+ ret = ctxt->ops->pio_out_emulated(ctxt, size, port, &val, 1);
+
+ /* No need for a complete_userspace_io callback. */
+ vcpu->arch.pio.count = 0;
+ } else
+ ret = ctxt->ops->pio_in_emulated(ctxt, size, port, &val, 1);
+
+ if (ret)
+ tdvmcall_set_return_val(vcpu, val);
+ else {
+ if (write)
+ vcpu->arch.complete_userspace_io = tdx_complete_pio_out;
+ else
+ vcpu->arch.complete_userspace_io = tdx_complete_pio_in;
+ }
+
+ return ret;
+}
+
static int handle_tdvmcall(struct kvm_vcpu *vcpu)
{
if (tdvmcall_exit_type(vcpu))
@@ -1162,6 +1227,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
return tdx_emulate_cpuid(vcpu);
case EXIT_REASON_HLT:
return tdx_emulate_hlt(vcpu);
+ case EXIT_REASON_IO_INSTRUCTION:
+ return tdx_emulate_io(vcpu);
default:
break;
}
--
2.25.1
Powered by blists - more mailing lists