[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221216052642.GF527635@ls.amr.corp.intel.com>
Date: Thu, 15 Dec 2022 21:26:42 -0800
From: Isaku Yamahata <isaku.yamahata@...il.com>
To: "Huang, Kai" <kai.huang@...el.com>
Cc: "kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"Yamahata, Isaku" <isaku.yamahata@...el.com>,
"sean.j.christopherson@...el.com" <sean.j.christopherson@...el.com>,
"pbonzini@...hat.com" <pbonzini@...hat.com>,
"Shahar, Sagi" <sagis@...gle.com>,
"Aktas, Erdem" <erdemaktas@...gle.com>,
"isaku.yamahata@...il.com" <isaku.yamahata@...il.com>,
"dmatlack@...gle.com" <dmatlack@...gle.com>,
"Christopherson,, Sean" <seanjc@...gle.com>
Subject: Re: [PATCH v10 105/108] KVM: TDX: Add methods to ignore accesses to
CPU state
On Wed, Dec 14, 2022 at 11:43:14AM +0000,
"Huang, Kai" <kai.huang@...el.com> wrote:
> On Sat, 2022-10-29 at 23:23 -0700, isaku.yamahata@...el.com wrote:
> > +static u8 vt_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
> > +{
> > + if (is_td_vcpu(vcpu)) {
> > + if (is_mmio)
> > + return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
> > + returnĀ MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT;
> > + }
> > +
> > + return vmx_get_mt_mask(vcpu, gfn, is_mmio);
> > +}
>
> So you are returning WB for _ALL_ guest memory, including shared. Wouldn't this
> break MTRR handling for shared memory? For instance, IIUC we can still support
> assigning a device to a TDX guest while the VT-d doesn't support coherent
> memory, in which case guest's MTRR/PAT are honored. I think this should also
> apply to TDX guest's shared memory?
You're right. So here is the updated change.
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -798,11 +798,8 @@ static int vt_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
static u8 vt_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
- if (is_td_vcpu(vcpu)) {
- if (is_mmio)
- return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
- return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT;
- }
+ if (is_td_vcpu(vcpu))
+ return tdx_get_mt_mask(vcpu, gfn, is_mmio);
return vmx_get_mt_mask(vcpu, gfn, is_mmio);
}
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index ac47b20e4e91..f1842eb32d6c 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -568,7 +568,31 @@ int tdx_vm_init(struct kvm *kvm)
return 0;
}
+u8 tdx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+ if (is_mmio)
+ return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
+
+ if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
+
+ /* Guest CR0 is unknown. Assume CR0.CD = 0. */
+
+ /* TDX private GPA is always WB. */
+ if (gfn & kvm_gfn_shared_mask(vcpu->kvm))
+ return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT;
+
+ /*
+ * Because the definition of MTRR MSR is unaware of shared-bit,
+ * clear shared-bit.
+ */
+ gfn = kvm_gfn_private(vcpu->kvm, gfn);
+ return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
+}
+
int tdx_vcpu_create(struct kvm_vcpu *vcpu)
+
+
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
struct vcpu_tdx *tdx = to_tdx(vcpu);
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 96f5546194d4..38fb888e1be9 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -168,6 +168,7 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu,
void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector);
void tdx_inject_nmi(struct kvm_vcpu *vcpu);
+u8 tdx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
bool tdx_is_emulated_msr(u32 index, bool write);
--
Isaku Yamahata <isaku.yamahata@...il.com>
Powered by blists - more mailing lists