[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <673b9be2-67b0-7386-0f9a-abfe103e6bc4@amd.com>
Date: Fri, 21 Mar 2025 09:40:11 -0500
From: Tom Lendacky <thomas.lendacky@....com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, x86@...nel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, Ingo Molnar <mingo@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>, Michael Roth <michael.roth@....com>
Subject: Re: [PATCH 1/5] KVM: SVM: Decrypt SEV VMSA in dump_vmcb() if
debugging is enabled
On 3/21/25 09:36, Tom Lendacky wrote:
> On 3/20/25 08:26, Tom Lendacky wrote:
>> An SEV-ES/SEV-SNP VM save area (VMSA) can be decrypted if the guest
>> policy allows debugging. Update the dump_vmcb() routine to output
>> some of the SEV VMSA contents if possible. This can be useful for
>> debug purposes.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
>> ---
>> + /*
>> + * Return the target page to a hypervisor page no matter what.
>> + * If this fails, the page can't be used, so leak it and don't
>> + * try to use it.
>> + */
>> + if (snp_page_reclaim(vcpu->kvm, PHYS_PFN(__pa(vmsa))))
>> + return NULL;
>
> And actually I should call snp_leak_pages() here to record that. I'll add
> that to the next version.
Err... snp_page_reclaim() already does that. Nevermind.
Thanks,
Tom
>
> Thanks,
> Tom
>
>> +
>> + if (ret) {
>> + pr_err("SEV: SNP_DBG_DECRYPT failed ret=%d, fw_error=%d (%#x)\n",
>> + ret, error, error);
>> + free_page((unsigned long)vmsa);
>> +
>> + return NULL;
>> + }
>> + } else {
>> + struct sev_data_dbg dbg = {0};
>> + struct page *vmsa_page;
>> +
>> + vmsa_page = alloc_page(GFP_KERNEL);
>> + if (!vmsa_page)
>> + return NULL;
>> +
>> + vmsa = page_address(vmsa_page);
>> +
>> + dbg.handle = sev->handle;
>> + dbg.src_addr = svm->vmcb->control.vmsa_pa;
>> + dbg.dst_addr = __psp_pa(vmsa);
>> + dbg.len = PAGE_SIZE;
>> +
>> + ret = sev_issue_cmd(vcpu->kvm, SEV_CMD_DBG_DECRYPT, &dbg, &error);
>> + if (ret) {
>> + pr_err("SEV: SEV_CMD_DBG_DECRYPT failed ret=%d, fw_error=%d (0x%x)\n",
>> + ret, error, error);
>> + __free_page(vmsa_page);
>> +
>> + return NULL;
>> + }
>> + }
>> +
>> + return vmsa;
>> +}
>> +
>> +void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa)
>> +{
>> + /* If the VMSA has not yet been encrypted, nothing was allocated */
>> + if (!vcpu->arch.guest_state_protected || !vmsa)
>> + return;
>> +
>> + free_page((unsigned long)vmsa);
>> +}
>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>> index e67de787fc71..21477871073c 100644
>> --- a/arch/x86/kvm/svm/svm.c
>> +++ b/arch/x86/kvm/svm/svm.c
>> @@ -3423,6 +3423,15 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
>> pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
>> pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
>> pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
>> +
>> + if (sev_es_guest(vcpu->kvm)) {
>> + save = sev_decrypt_vmsa(vcpu);
>> + if (!save)
>> + goto no_vmsa;
>> +
>> + save01 = save;
>> + }
>> +
>> pr_err("VMCB State Save Area:\n");
>> pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
>> "es:",
>> @@ -3493,6 +3502,10 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
>> pr_err("%-15s %016llx %-13s %016llx\n",
>> "excp_from:", save->last_excp_from,
>> "excp_to:", save->last_excp_to);
>> +
>> +no_vmsa:
>> + if (sev_es_guest(vcpu->kvm))
>> + sev_free_decrypted_vmsa(vcpu, save);
>> }
>>
>> static bool svm_check_exit_valid(u64 exit_code)
>> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
>> index ea44c1da5a7c..66979ddc3659 100644
>> --- a/arch/x86/kvm/svm/svm.h
>> +++ b/arch/x86/kvm/svm/svm.h
>> @@ -98,6 +98,7 @@ struct kvm_sev_info {
>> unsigned int asid; /* ASID used for this guest */
>> unsigned int handle; /* SEV firmware handle */
>> int fd; /* SEV device fd */
>> + unsigned long policy;
>> unsigned long pages_locked; /* Number of pages locked */
>> struct list_head regions_list; /* List of registered regions */
>> u64 ap_jump_table; /* SEV-ES AP Jump Table address */
>> @@ -114,6 +115,9 @@ struct kvm_sev_info {
>> struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
>> };
>>
>> +#define SEV_POLICY_NODBG BIT_ULL(0)
>> +#define SNP_POLICY_DEBUG BIT_ULL(19)
>> +
>> struct kvm_svm {
>> struct kvm kvm;
>>
>> @@ -756,6 +760,8 @@ void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
>> int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
>> void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
>> int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
>> +struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
>> +void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
>> #else
>> static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
>> {
>> @@ -787,6 +793,11 @@ static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
>> return 0;
>> }
>>
>> +static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
>> +{
>> + return NULL;
>> +}
>> +static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
>> #endif
>>
>> /* vmenter.S */
Powered by blists - more mailing lists