[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e60f352abde6bfa9c989d63213d4fb04c3721c11.1724795971.git.thomas.lendacky@amd.com>
Date: Tue, 27 Aug 2024 16:59:25 -0500
From: Tom Lendacky <thomas.lendacky@....com>
To: <kvm@...r.kernel.org>, <linux-kernel@...r.kernel.org>, <x86@...nel.org>,
<linux-coco@...ts.linux.dev>
CC: Paolo Bonzini <pbonzini@...hat.com>, Sean Christopherson
<seanjc@...gle.com>, Borislav Petkov <bp@...en8.de>, Dave Hansen
<dave.hansen@...ux.intel.com>, Ingo Molnar <mingo@...hat.com>, "Thomas
Gleixner" <tglx@...utronix.de>, Michael Roth <michael.roth@....com>, "Ashish
Kalra" <ashish.kalra@....com>, Joerg Roedel <jroedel@...e.de>, Roy Hopkins
<roy.hopkins@...e.com>
Subject: [RFC PATCH 1/7] KVM: SVM: Implement GET_AP_APIC_IDS NAE event
Implement the GET_APIC_IDS NAE event to gather and return the list of
APIC IDs for all vCPUs in the guest.
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
---
arch/x86/include/asm/sev-common.h | 1 +
arch/x86/include/uapi/asm/svm.h | 1 +
arch/x86/kvm/svm/sev.c | 84 ++++++++++++++++++++++++++++++-
3 files changed, 85 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index 98726c2b04f8..d63c861ef91f 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -136,6 +136,7 @@ enum psc_op {
#define GHCB_HV_FT_SNP BIT_ULL(0)
#define GHCB_HV_FT_SNP_AP_CREATION BIT_ULL(1)
+#define GHCB_HV_FT_APIC_ID_LIST BIT_ULL(4)
#define GHCB_HV_FT_SNP_MULTI_VMPL BIT_ULL(5)
/*
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 1814b413fd57..f8fa3c4c0322 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -115,6 +115,7 @@
#define SVM_VMGEXIT_AP_CREATE_ON_INIT 0
#define SVM_VMGEXIT_AP_CREATE 1
#define SVM_VMGEXIT_AP_DESTROY 2
+#define SVM_VMGEXIT_GET_APIC_IDS 0x80000017
#define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 532df12b43c5..199bdc7c7db1 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -39,7 +39,9 @@
#define GHCB_VERSION_DEFAULT 2ULL
#define GHCB_VERSION_MIN 1ULL
-#define GHCB_HV_FT_SUPPORTED (GHCB_HV_FT_SNP | GHCB_HV_FT_SNP_AP_CREATION)
+#define GHCB_HV_FT_SUPPORTED (GHCB_HV_FT_SNP | \
+ GHCB_HV_FT_SNP_AP_CREATION | \
+ GHCB_HV_FT_APIC_ID_LIST)
/* enable/disable SEV support */
static bool sev_enabled = true;
@@ -3390,6 +3392,10 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
if (!kvm_ghcb_rax_is_valid(svm))
goto vmgexit_err;
break;
+ case SVM_VMGEXIT_GET_APIC_IDS:
+ if (!kvm_ghcb_rax_is_valid(svm))
+ goto vmgexit_err;
+ break;
case SVM_VMGEXIT_NMI_COMPLETE:
case SVM_VMGEXIT_AP_HLT_LOOP:
case SVM_VMGEXIT_AP_JUMP_TABLE:
@@ -4124,6 +4130,77 @@ static int snp_handle_ext_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t r
return 1; /* resume guest */
}
+struct sev_apic_id_desc {
+ u32 num_entries;
+ u32 apic_ids[];
+};
+
+static void sev_get_apic_ids(struct vcpu_svm *svm)
+{
+ struct ghcb *ghcb = svm->sev_es.ghcb;
+ struct kvm_vcpu *vcpu = &svm->vcpu, *loop_vcpu;
+ struct kvm *kvm = vcpu->kvm;
+ unsigned int id_desc_size;
+ struct sev_apic_id_desc *desc;
+ kvm_pfn_t pfn;
+ gpa_t gpa;
+ u64 pages;
+ unsigned long i;
+ int n;
+
+ pages = vcpu->arch.regs[VCPU_REGS_RAX];
+
+ /* Each APIC ID is 32-bits in size, so make sure there is room */
+ n = atomic_read(&kvm->online_vcpus);
+ /*TODO: is this possible? */
+ if (n < 0)
+ return;
+
+ id_desc_size = sizeof(*desc);
+ id_desc_size += n * sizeof(desc->apic_ids[0]);
+ if (id_desc_size > (pages * PAGE_SIZE)) {
+ vcpu->arch.regs[VCPU_REGS_RAX] = PFN_UP(id_desc_size);
+ return;
+ }
+
+ gpa = svm->vmcb->control.exit_info_1;
+
+ ghcb_set_sw_exit_info_1(ghcb, 2);
+ ghcb_set_sw_exit_info_2(ghcb, 5);
+
+ if (!page_address_valid(vcpu, gpa))
+ return;
+
+ pfn = gfn_to_pfn(kvm, gpa_to_gfn(gpa));
+ if (is_error_noslot_pfn(pfn))
+ return;
+
+ if (!pages)
+ return;
+
+ /* Allocate a buffer to hold the APIC IDs */
+ desc = kvzalloc(id_desc_size, GFP_KERNEL_ACCOUNT);
+ if (!desc)
+ return;
+
+ desc->num_entries = n;
+ kvm_for_each_vcpu(i, loop_vcpu, kvm) {
+ /*TODO: is this possible? */
+ if (i > n)
+ break;
+
+ desc->apic_ids[i] = loop_vcpu->vcpu_id;
+ }
+
+ if (!kvm_write_guest(kvm, gpa, desc, id_desc_size)) {
+ /* IDs were successfully written */
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+ }
+
+ kvfree(desc);
+}
+
static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
@@ -4404,6 +4481,11 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
case SVM_VMGEXIT_EXT_GUEST_REQUEST:
ret = snp_handle_ext_guest_req(svm, control->exit_info_1, control->exit_info_2);
break;
+ case SVM_VMGEXIT_GET_APIC_IDS:
+ sev_get_apic_ids(svm);
+
+ ret = 1;
+ break;
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
--
2.43.2
Powered by blists - more mailing lists