[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230131013029.qtansdeuvwe7nlzt@yy-desk-7060>
Date: Tue, 31 Jan 2023 09:30:29 +0800
From: Yuan Yao <yuan.yao@...ux.intel.com>
To: isaku.yamahata@...el.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>
Subject: Re: [PATCH v11 099/113] KVM: TDX: Handle TDX PV map_gpa hypercall
On Thu, Jan 12, 2023 at 08:32:47AM -0800, isaku.yamahata@...el.com wrote:
> From: Isaku Yamahata <isaku.yamahata@...el.com>
>
> Wire up TDX PV map_gpa hypercall to the kvm/mmu backend.
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> ---
> arch/x86/kvm/vmx/tdx.c | 53 ++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 53 insertions(+)
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 4bbde58510a4..486d0f0c6dd1 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -1181,6 +1181,57 @@ static int tdx_emulate_wrmsr(struct kvm_vcpu *vcpu)
> return 1;
> }
>
> +static int tdx_map_gpa(struct kvm_vcpu *vcpu)
> +{
> + struct kvm *kvm = vcpu->kvm;
> + gpa_t gpa = tdvmcall_a0_read(vcpu);
> + gpa_t size = tdvmcall_a1_read(vcpu);
> + gpa_t end = gpa + size;
> + gfn_t s = gpa_to_gfn(gpa) & ~kvm_gfn_shared_mask(kvm);
> + gfn_t e = gpa_to_gfn(end) & ~kvm_gfn_shared_mask(kvm);
> + int i;
> +
> + if (!IS_ALIGNED(gpa, 4096) || !IS_ALIGNED(size, 4096) ||
> + end < gpa ||
> + end > kvm_gfn_shared_mask(kvm) << (PAGE_SHIFT + 1) ||
> + kvm_is_private_gpa(kvm, gpa) != kvm_is_private_gpa(kvm, end)) {
> + tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_INVALID_OPERAND);
> + return 1;
> + }
> +
> + /*
> + * Check how the requested region overlaps with the KVM memory slots.
> + * For simplicity, require that it must be contained within a memslot or
> + * it must not overlap with any memslots (MMIO).
> + */
> + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> + struct kvm_memslots *slots = __kvm_memslots(kvm, i);
> + struct kvm_memslot_iter iter;
> +
> + kvm_for_each_memslot_in_gfn_range(&iter, slots, s, e) {
> + struct kvm_memory_slot *slot = iter.slot;
> + gfn_t slot_s = slot->base_gfn;
> + gfn_t slot_e = slot->base_gfn + slot->npages;
> +
> + /* no overlap */
> + if (e < slot_s || s >= slot_e)
> + continue;
> +
> + /* contained in slot */
> + if (slot_s <= s && e <= slot_e) {
> + if (kvm_slot_can_be_private(slot))
> + return tdx_vp_vmcall_to_user(vcpu);
> + continue;
> + }
> +
> + break;
> + }
> + }
> +
> + tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_INVALID_OPERAND);
This returns TDG_VP_VMCALL_INVALID_OPERAND if the TD is running with
non-private slots, which looks incorrect to the caller in TD guest(because
the operands are correct). Can we just refuse to create TD if private memory
slot is the only supported slot type for it ?
> + return 1;
> +}
> +
> static int handle_tdvmcall(struct kvm_vcpu *vcpu)
> {
> if (tdvmcall_exit_type(vcpu))
> @@ -1206,6 +1257,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
> * guest TD doesn't make sense. No argument check is done.
> */
> return tdx_vp_vmcall_to_user(vcpu);
> + case TDG_VP_VMCALL_MAP_GPA:
> + return tdx_map_gpa(vcpu);
> default:
> break;
> }
> --
> 2.25.1
>
Powered by blists - more mailing lists