[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230118152721.GA24742@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net>
Date: Wed, 18 Jan 2023 07:27:21 -0800
From: Jeremi Piotrowski <jpiotrowski@...ux.microsoft.com>
To: Michael Roth <michael.roth@....com>
Cc: kvm@...r.kernel.org, linux-coco@...ts.linux.dev,
linux-mm@...ck.org, linux-crypto@...r.kernel.org, x86@...nel.org,
linux-kernel@...r.kernel.org, tglx@...utronix.de, mingo@...hat.com,
jroedel@...e.de, thomas.lendacky@....com, hpa@...or.com,
ardb@...nel.org, pbonzini@...hat.com, seanjc@...gle.com,
vkuznets@...hat.com, wanpengli@...cent.com, jmattson@...gle.com,
luto@...nel.org, dave.hansen@...ux.intel.com, slp@...hat.com,
pgonda@...gle.com, peterz@...radead.org,
srinivas.pandruvada@...ux.intel.com, rientjes@...gle.com,
dovmurik@...ux.ibm.com, tobin@....com, bp@...en8.de,
vbabka@...e.cz, kirill@...temov.name, ak@...ux.intel.com,
tony.luck@...el.com, marcorr@...gle.com,
sathyanarayanan.kuppuswamy@...ux.intel.com, alpergun@...gle.com,
dgilbert@...hat.com, jarkko@...nel.org, ashish.kalra@....com,
harald@...fian.com, Brijesh Singh <brijesh.singh@....com>
Subject: Re: [PATCH RFC v7 44/64] KVM: SVM: Remove the long-lived GHCB host
map
On Wed, Dec 14, 2022 at 01:40:36PM -0600, Michael Roth wrote:
> From: Brijesh Singh <brijesh.singh@....com>
>
> On VMGEXIT, sev_handle_vmgexit() creates a host mapping for the GHCB GPA,
> and unmaps it just before VM-entry. This long-lived GHCB map is used by
> the VMGEXIT handler through accessors such as ghcb_{set_get}_xxx().
>
> A long-lived GHCB map can cause issue when SEV-SNP is enabled. When
> SEV-SNP is enabled the mapped GPA needs to be protected against a page
> state change.
>
> To eliminate the long-lived GHCB mapping, update the GHCB sync operations
> to explicitly map the GHCB before access and unmap it after access is
> complete. This requires that the setting of the GHCBs sw_exit_info_{1,2}
> fields be done during sev_es_sync_to_ghcb(), so create two new fields in
> the vcpu_svm struct to hold these values when required to be set outside
> of the GHCB mapping.
>
> Signed-off-by: Brijesh Singh <brijesh.singh@....com>
> Signed-off-by: Ashish Kalra <ashish.kalra@....com>
> [mdr: defer per_cpu() assignment and order it with barrier() to fix case
> where kvm_vcpu_map() causes reschedule on different CPU]
> Signed-off-by: Michael Roth <michael.roth@....com>
> ---
> arch/x86/kvm/svm/sev.c | 131 ++++++++++++++++++++++++++---------------
> arch/x86/kvm/svm/svm.c | 18 +++---
> arch/x86/kvm/svm/svm.h | 24 +++++++-
> 3 files changed, 116 insertions(+), 57 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index d5c6e48055fb..6ac0cb6e3484 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -2921,15 +2921,40 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
> kvfree(svm->sev_es.ghcb_sa);
> }
>
> +static inline int svm_map_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map)
> +{
> + struct vmcb_control_area *control = &svm->vmcb->control;
> + u64 gfn = gpa_to_gfn(control->ghcb_gpa);
> +
> + if (kvm_vcpu_map(&svm->vcpu, gfn, map)) {
> + /* Unable to map GHCB from guest */
> + pr_err("error mapping GHCB GFN [%#llx] from guest\n", gfn);
> + return -EFAULT;
> + }
> +
> + return 0;
> +}
> +
> +static inline void svm_unmap_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map)
> +{
> + kvm_vcpu_unmap(&svm->vcpu, map, true);
> +}
> +
> static void dump_ghcb(struct vcpu_svm *svm)
> {
> - struct ghcb *ghcb = svm->sev_es.ghcb;
> + struct kvm_host_map map;
> unsigned int nbits;
> + struct ghcb *ghcb;
> +
> + if (svm_map_ghcb(svm, &map))
> + return;
> +
> + ghcb = map.hva;
dump_ghcb() is called from sev_es_validate_vmgexit() with the ghcb already
mapped. How about passing 'struct kvm_host_map *' (or struct ghcb *) as a
param to avoid double mapping?
>
> /* Re-use the dump_invalid_vmcb module parameter */
> if (!dump_invalid_vmcb) {
> pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
> - return;
> + goto e_unmap;
> }
>
> nbits = sizeof(ghcb->save.valid_bitmap) * 8;
> @@ -2944,12 +2969,21 @@ static void dump_ghcb(struct vcpu_svm *svm)
> pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
> ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
> pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
> +
> +e_unmap:
> + svm_unmap_ghcb(svm, &map);
> }
>
> -static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
> +static bool sev_es_sync_to_ghcb(struct vcpu_svm *svm)
> {
> struct kvm_vcpu *vcpu = &svm->vcpu;
> - struct ghcb *ghcb = svm->sev_es.ghcb;
> + struct kvm_host_map map;
> + struct ghcb *ghcb;
> +
> + if (svm_map_ghcb(svm, &map))
> + return false;
> +
> + ghcb = map.hva;
>
> /*
> * The GHCB protocol so far allows for the following data
> @@ -2963,13 +2997,24 @@ static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
> ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
> ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
> ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
> +
> + /*
> + * Copy the return values from the exit_info_{1,2}.
> + */
> + ghcb_set_sw_exit_info_1(ghcb, svm->sev_es.ghcb_sw_exit_info_1);
> + ghcb_set_sw_exit_info_2(ghcb, svm->sev_es.ghcb_sw_exit_info_2);
> +
> + trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, ghcb);
> +
> + svm_unmap_ghcb(svm, &map);
> +
> + return true;
> }
>
> -static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
> +static void sev_es_sync_from_ghcb(struct vcpu_svm *svm, struct ghcb *ghcb)
> {
> struct vmcb_control_area *control = &svm->vmcb->control;
> struct kvm_vcpu *vcpu = &svm->vcpu;
> - struct ghcb *ghcb = svm->sev_es.ghcb;
> u64 exit_code;
>
> /*
> @@ -3013,20 +3058,25 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
> memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
> }
>
> -static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
> +static int sev_es_validate_vmgexit(struct vcpu_svm *svm, u64 *exit_code)
> {
> - struct kvm_vcpu *vcpu;
> + struct kvm_vcpu *vcpu = &svm->vcpu;
> + struct kvm_host_map map;
> struct ghcb *ghcb;
> - u64 exit_code;
> u64 reason;
>
> - ghcb = svm->sev_es.ghcb;
> + if (svm_map_ghcb(svm, &map))
> + return -EFAULT;
> +
> + ghcb = map.hva;
> +
> + trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
>
> /*
> * Retrieve the exit code now even though it may not be marked valid
> * as it could help with debugging.
> */
> - exit_code = ghcb_get_sw_exit_code(ghcb);
> + *exit_code = ghcb_get_sw_exit_code(ghcb);
>
> /* Only GHCB Usage code 0 is supported */
> if (ghcb->ghcb_usage) {
> @@ -3119,6 +3169,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
> goto vmgexit_err;
> }
>
> + sev_es_sync_from_ghcb(svm, ghcb);
> +
> + svm_unmap_ghcb(svm, &map);
> return 0;
>
> vmgexit_err:
> @@ -3129,10 +3182,10 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
> ghcb->ghcb_usage);
> } else if (reason == GHCB_ERR_INVALID_EVENT) {
> vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
> - exit_code);
> + *exit_code);
> } else {
> vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
> - exit_code);
> + *exit_code);
> dump_ghcb(svm);
> }
>
> @@ -3142,6 +3195,8 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
> ghcb_set_sw_exit_info_1(ghcb, 2);
> ghcb_set_sw_exit_info_2(ghcb, reason);
>
> + svm_unmap_ghcb(svm, &map);
> +
> /* Resume the guest to "return" the error code. */
> return 1;
> }
Powered by blists - more mailing lists