[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <419e72d3-5142-41c0-61d8-21b9db14ec5b@amd.com>
Date: Fri, 21 Mar 2025 09:36:59 -0500
From: Tom Lendacky <thomas.lendacky@....com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, x86@...nel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, Ingo Molnar <mingo@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>, Michael Roth <michael.roth@....com>
Subject: Re: [PATCH 1/5] KVM: SVM: Decrypt SEV VMSA in dump_vmcb() if
debugging is enabled
On 3/20/25 08:26, Tom Lendacky wrote:
> An SEV-ES/SEV-SNP VM save area (VMSA) can be decrypted if the guest
> policy allows debugging. Update the dump_vmcb() routine to output
> some of the SEV VMSA contents if possible. This can be useful for
> debug purposes.
>
> Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
> ---
> arch/x86/kvm/svm/sev.c | 98 ++++++++++++++++++++++++++++++++++++++++++
> arch/x86/kvm/svm/svm.c | 13 ++++++
> arch/x86/kvm/svm/svm.h | 11 +++++
> 3 files changed, 122 insertions(+)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 661108d65ee7..6e3f5042d9ce 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -563,6 +563,8 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
> if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
> return -EFAULT;
>
> + sev->policy = params.policy;
> +
> memset(&start, 0, sizeof(start));
>
> dh_blob = NULL;
> @@ -2220,6 +2222,8 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
> if (params.policy & SNP_POLICY_MASK_SINGLE_SOCKET)
> return -EINVAL;
>
> + sev->policy = params.policy;
> +
> sev->snp_context = snp_context_create(kvm, argp);
> if (!sev->snp_context)
> return -ENOTTY;
> @@ -4975,3 +4979,97 @@ int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
>
> return level;
> }
> +
> +struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
> +{
> + struct vcpu_svm *svm = to_svm(vcpu);
> + struct vmcb_save_area *vmsa;
> + struct kvm_sev_info *sev;
> + int error = 0;
> + int ret;
> +
> + if (!sev_es_guest(vcpu->kvm))
> + return NULL;
> +
> + /*
> + * If the VMSA has not yet been encrypted, return a pointer to the
> + * current un-encrypted VMSA.
> + */
> + if (!vcpu->arch.guest_state_protected)
> + return (struct vmcb_save_area *)svm->sev_es.vmsa;
> +
> + sev = to_kvm_sev_info(vcpu->kvm);
> +
> + /* Check if the SEV policy allows debugging */
> + if (sev_snp_guest(vcpu->kvm)) {
> + if (!(sev->policy & SNP_POLICY_DEBUG))
> + return NULL;
> + } else {
> + if (sev->policy & SEV_POLICY_NODBG)
> + return NULL;
> + }
> +
> + if (sev_snp_guest(vcpu->kvm)) {
> + struct sev_data_snp_dbg dbg = {0};
> +
> + vmsa = snp_alloc_firmware_page(__GFP_ZERO);
> + if (!vmsa)
> + return NULL;
> +
> + dbg.gctx_paddr = __psp_pa(sev->snp_context);
> + dbg.src_addr = svm->vmcb->control.vmsa_pa;
> + dbg.dst_addr = __psp_pa(vmsa);
> +
> + ret = sev_issue_cmd(vcpu->kvm, SEV_CMD_SNP_DBG_DECRYPT, &dbg, &error);
> +
> + /*
> + * Return the target page to a hypervisor page no matter what.
> + * If this fails, the page can't be used, so leak it and don't
> + * try to use it.
> + */
> + if (snp_page_reclaim(vcpu->kvm, PHYS_PFN(__pa(vmsa))))
> + return NULL;
And actually I should call snp_leak_pages() here to record that. I'll add
that to the next version.
Thanks,
Tom
> +
> + if (ret) {
> + pr_err("SEV: SNP_DBG_DECRYPT failed ret=%d, fw_error=%d (%#x)\n",
> + ret, error, error);
> + free_page((unsigned long)vmsa);
> +
> + return NULL;
> + }
> + } else {
> + struct sev_data_dbg dbg = {0};
> + struct page *vmsa_page;
> +
> + vmsa_page = alloc_page(GFP_KERNEL);
> + if (!vmsa_page)
> + return NULL;
> +
> + vmsa = page_address(vmsa_page);
> +
> + dbg.handle = sev->handle;
> + dbg.src_addr = svm->vmcb->control.vmsa_pa;
> + dbg.dst_addr = __psp_pa(vmsa);
> + dbg.len = PAGE_SIZE;
> +
> + ret = sev_issue_cmd(vcpu->kvm, SEV_CMD_DBG_DECRYPT, &dbg, &error);
> + if (ret) {
> + pr_err("SEV: SEV_CMD_DBG_DECRYPT failed ret=%d, fw_error=%d (0x%x)\n",
> + ret, error, error);
> + __free_page(vmsa_page);
> +
> + return NULL;
> + }
> + }
> +
> + return vmsa;
> +}
> +
> +void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa)
> +{
> + /* If the VMSA has not yet been encrypted, nothing was allocated */
> + if (!vcpu->arch.guest_state_protected || !vmsa)
> + return;
> +
> + free_page((unsigned long)vmsa);
> +}
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index e67de787fc71..21477871073c 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3423,6 +3423,15 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
> pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
> pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
> pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
> +
> + if (sev_es_guest(vcpu->kvm)) {
> + save = sev_decrypt_vmsa(vcpu);
> + if (!save)
> + goto no_vmsa;
> +
> + save01 = save;
> + }
> +
> pr_err("VMCB State Save Area:\n");
> pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
> "es:",
> @@ -3493,6 +3502,10 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
> pr_err("%-15s %016llx %-13s %016llx\n",
> "excp_from:", save->last_excp_from,
> "excp_to:", save->last_excp_to);
> +
> +no_vmsa:
> + if (sev_es_guest(vcpu->kvm))
> + sev_free_decrypted_vmsa(vcpu, save);
> }
>
> static bool svm_check_exit_valid(u64 exit_code)
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index ea44c1da5a7c..66979ddc3659 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -98,6 +98,7 @@ struct kvm_sev_info {
> unsigned int asid; /* ASID used for this guest */
> unsigned int handle; /* SEV firmware handle */
> int fd; /* SEV device fd */
> + unsigned long policy;
> unsigned long pages_locked; /* Number of pages locked */
> struct list_head regions_list; /* List of registered regions */
> u64 ap_jump_table; /* SEV-ES AP Jump Table address */
> @@ -114,6 +115,9 @@ struct kvm_sev_info {
> struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
> };
>
> +#define SEV_POLICY_NODBG BIT_ULL(0)
> +#define SNP_POLICY_DEBUG BIT_ULL(19)
> +
> struct kvm_svm {
> struct kvm kvm;
>
> @@ -756,6 +760,8 @@ void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
> int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
> void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
> int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
> +struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
> +void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
> #else
> static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
> {
> @@ -787,6 +793,11 @@ static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> return 0;
> }
>
> +static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
> +{
> + return NULL;
> +}
> +static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
> #endif
>
> /* vmenter.S */
Powered by blists - more mailing lists