[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Yqz+CZlGCoQo7lMQ@google.com>
Date: Fri, 17 Jun 2022 22:19:53 +0000
From: Sean Christopherson <seanjc@...gle.com>
To: Peter Gonda <pgonda@...gle.com>
Cc: kvm@...r.kernel.org, Marc Orr <marcorr@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Tom Lendacky <thomas.lendacky@....com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] KVM: SEV: Init target VMCBs in sev_migrate_from
On Fri, Jun 17, 2022, Peter Gonda wrote:
> @@ -1681,6 +1683,10 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
>
> list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
>
> + kvm_for_each_vcpu(i, vcpu, dst_kvm) {
> + sev_init_vmcb(to_svm(vcpu));
Curly braces are unnecessary.
> + }
> +
> /*
> * If this VM has mirrors, "transfer" each mirror's refcount of the
> * source to the destination (this KVM). The caller holds a reference
> @@ -1739,6 +1745,8 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
> src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
> src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
> src_vcpu->arch.guest_state_protected = false;
> +
> + sev_es_init_vmcb(dst_svm);
> }
> to_kvm_svm(src)->sev_info.es_active = false;
> to_kvm_svm(dst)->sev_info.es_active = true;
> @@ -2914,6 +2922,12 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
> count, in);
> }
>
> +void sev_init_vmcb(struct vcpu_svm *svm)
> +{
> + svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
> + clr_exception_intercept(svm, UD_VECTOR);
I don't love separating SEV and SEV-ES VMCB initialization, especially since they're
both doing RMW operations and not straight writes. E.g. migration ends up reversing
the order between the two relatively to init_vmcb(). That's just asking for a subtle
bug to be introduced that affects only due to the ordering difference.
What about using common top-level flows for SEV and SEV-ES so that the sequencing
between SEV and SEV-ES is more rigid? The resulting sev_migrate_from() is a little
gross, but IMO it's worth having a fixed sequence, and the flip side to the ugliness
it that it documents some of the differences between SEV and SEV-ES migration.
---
arch/x86/kvm/svm/sev.c | 75 +++++++++++++++++++++++++++---------------
arch/x86/kvm/svm/svm.c | 11 ++-----
arch/x86/kvm/svm/svm.h | 2 +-
3 files changed, 52 insertions(+), 36 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 51fd985cf21d..9efb679d89d1 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1665,7 +1665,10 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
{
struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
+ struct kvm_vcpu *dst_vcpu, *src_vcpu;
+ struct vcpu_svm *dst_svm, *src_svm;
struct kvm_sev_info *mirror;
+ unsigned long i;
dst->active = true;
dst->asid = src->asid;
@@ -1704,27 +1707,22 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
list_del(&src->mirror_entry);
list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
}
-}
-static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
-{
- unsigned long i;
- struct kvm_vcpu *dst_vcpu, *src_vcpu;
- struct vcpu_svm *dst_svm, *src_svm;
-
- if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
- return -EINVAL;
-
- kvm_for_each_vcpu(i, src_vcpu, src) {
- if (!src_vcpu->arch.guest_state_protected)
- return -EINVAL;
- }
-
- kvm_for_each_vcpu(i, src_vcpu, src) {
- src_svm = to_svm(src_vcpu);
- dst_vcpu = kvm_get_vcpu(dst, i);
+ kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
dst_svm = to_svm(dst_vcpu);
+ sev_init_vmcb(dst_svm);
+
+ if (!src->es_active)
+ continue;
+
+ /*
+ * Note, the source is not required to have the same number of
+ * vCPUs as the destination when migrating a vanilla SEV VM.
+ */
+ src_vcpu = kvm_get_vcpu(dst_kvm, i);
+ src_svm = to_svm(src_vcpu);
+
/*
* Transfer VMSA and GHCB state to the destination. Nullify and
* clear source fields as appropriate, the state now belongs to
@@ -1740,8 +1738,26 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
src_vcpu->arch.guest_state_protected = false;
}
- to_kvm_svm(src)->sev_info.es_active = false;
- to_kvm_svm(dst)->sev_info.es_active = true;
+
+ dst->es_active = src->es_active;
+ src->es_active = false;
+}
+
+static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
+{
+ struct kvm_vcpu *src_vcpu;
+ unsigned long i;
+
+ if (!sev_es_guest(src))
+ return 0;
+
+ if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
+ return -EINVAL;
+
+ kvm_for_each_vcpu(i, src_vcpu, src) {
+ if (!src_vcpu->arch.guest_state_protected)
+ return -EINVAL;
+ }
return 0;
}
@@ -1789,11 +1805,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
if (ret)
goto out_dst_vcpu;
- if (sev_es_guest(source_kvm)) {
- ret = sev_es_migrate_from(kvm, source_kvm);
- if (ret)
- goto out_source_vcpu;
- }
+ ret = sev_check_source_vcpus(kvm, source_kvm);
+ if (ret)
+ goto out_source_vcpu;
sev_migrate_from(kvm, source_kvm);
kvm_vm_dead(source_kvm);
@@ -2914,7 +2928,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
count, in);
}
-void sev_es_init_vmcb(struct vcpu_svm *svm)
+static void sev_es_init_vmcb(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
@@ -2967,6 +2981,15 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
}
}
+void sev_init_vmcb(struct vcpu_svm *svm)
+{
+ svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+ clr_exception_intercept(svm, UD_VECTOR);
+
+ if (sev_es_guest(svm->vcpu.kvm))
+ sev_es_init_vmcb(svm);
+}
+
void sev_es_vcpu_reset(struct vcpu_svm *svm)
{
/*
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c6cca0ce127b..a6bb67738005 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1259,15 +1259,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
}
- if (sev_guest(vcpu->kvm)) {
- svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
- clr_exception_intercept(svm, UD_VECTOR);
-
- if (sev_es_guest(vcpu->kvm)) {
- /* Perform SEV-ES specific VMCB updates */
- sev_es_init_vmcb(svm);
- }
- }
+ if (sev_guest(vcpu->kvm))
+ sev_init_vmcb(svm);
svm_hv_init_vmcb(vmcb);
init_vmcb_after_set_cpuid(vcpu);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 128993feb4c6..444a7a67122a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -653,10 +653,10 @@ void __init sev_set_cpu_caps(void);
void __init sev_hardware_setup(void);
void sev_hardware_unsetup(void);
int sev_cpu_init(struct svm_cpu_data *sd);
+void sev_init_vmcb(struct vcpu_svm *svm);
void sev_free_vcpu(struct kvm_vcpu *vcpu);
int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
-void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_vcpu_reset(struct vcpu_svm *svm);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
base-commit: 8baacf67c76c560fed954ac972b63e6e59a6fba0
--
Powered by blists - more mailing lists