[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250923050942.206116-5-Neeraj.Upadhyay@amd.com>
Date: Tue, 23 Sep 2025 10:39:11 +0530
From: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
To: <kvm@...r.kernel.org>, <seanjc@...gle.com>, <pbonzini@...hat.com>
CC: <linux-kernel@...r.kernel.org>, <Thomas.Lendacky@....com>,
<nikunj@....com>, <Santosh.Shukla@....com>, <Vasant.Hegde@....com>,
<Suravee.Suthikulpanit@....com>, <bp@...en8.de>, <David.Kaplan@....com>,
<huibo.wang@....com>, <naveen.rao@....com>, <pgonda@...gle.com>,
<linux-kselftest@...r.kernel.org>, <shuah@...nel.org>, <tiala@...rosoft.com>
Subject: [RFC PATCH v2 04/35] Add GHCB allocations and helpers
From: Peter Gonda <pgonda@...gle.com>
Add GHCB management functionality similar to the ucall management.
Allows for selftest vCPUs to acquire GHCBs for their usage.
Cc: Vishal Annapurve <vannapurve@...gle.com>
Cc: Ackerley Tng <ackerleytng@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: Claudio Imbrenda <imbrenda@...ux.ibm.com>
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: Carlos Bilbao <carlos.bilbao@....com>
Cc: Tom Lendacky <thomas.lendacky@....com>
Cc: Michael Roth <michael.roth@....com>
Cc: kvm@...r.kernel.org
Cc: linux-kselftest@...r.kernel.org
Signed-off-by: Peter Gonda <pgonda@...gle.com>
Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
---
tools/testing/selftests/kvm/include/x86/sev.h | 2 +
.../testing/selftests/kvm/lib/x86/processor.c | 9 +++
tools/testing/selftests/kvm/lib/x86/sev.c | 78 +++++++++++++++++++
3 files changed, 89 insertions(+)
diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h
index 008b4169f5e2..6cda0acd22e4 100644
--- a/tools/testing/selftests/kvm/include/x86/sev.h
+++ b/tools/testing/selftests/kvm/include/x86/sev.h
@@ -46,6 +46,8 @@ static inline bool is_sev_vm(struct kvm_vm *vm)
return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM;
}
+int ghcb_nr_pages_required(uint64_t page_size);
+
void sev_vm_launch(struct kvm_vm *vm, uint32_t policy);
void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
void sev_vm_launch_finish(struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index d4c19ac885a9..d72eb96efb7c 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -651,6 +651,15 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
sync_global_to_guest(vm, guest_tsc_khz);
}
+int kvm_arch_vm_additional_pages_required(struct vm_shape shape, uint64_t page_size)
+{
+ if (shape.type == KVM_X86_SEV_ES_VM ||
+ shape.type == KVM_X86_SNP_VM)
+ return ghcb_nr_pages_required(page_size);
+
+ return 0;
+}
+
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
struct kvm_regs regs;
diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c
index c3a9838f4806..c400faa9cc5f 100644
--- a/tools/testing/selftests/kvm/lib/x86/sev.c
+++ b/tools/testing/selftests/kvm/lib/x86/sev.c
@@ -3,6 +3,80 @@
#include <stdbool.h>
#include "sev.h"
+#include "linux/bitmap.h"
+#include "svm.h"
+#include "svm_util.h"
+
+struct ghcb_entry {
+ struct ghcb ghcb;
+
+ /* Guest physical address of this GHCB. */
+ void *gpa;
+
+ /* Host virtual address of this struct. */
+ struct ghcb_entry *hva;
+};
+
+struct ghcb_header {
+ struct ghcb_entry ghcbs[KVM_MAX_VCPUS];
+ DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
+};
+
+static struct ghcb_header *ghcb_pool;
+
+int ghcb_nr_pages_required(uint64_t page_size)
+{
+ return align_up(sizeof(struct ghcb_header), page_size) / page_size;
+}
+
+void ghcb_init(struct kvm_vm *vm)
+{
+ struct ghcb_header *hdr;
+ struct ghcb_entry *entry;
+ vm_vaddr_t vaddr;
+ int i;
+
+ vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
+ MEM_REGION_DATA);
+ hdr = (struct ghcb_header *)addr_gva2hva(vm, vaddr);
+ memset(hdr, 0, sizeof(*hdr));
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ entry = &hdr->ghcbs[i];
+ entry->hva = entry;
+ entry->gpa = (void *)addr_hva2gpa(vm, &entry->ghcb);
+ }
+
+ write_guest_global(vm, ghcb_pool, (struct ghcb_header *)vaddr);
+}
+
+static struct ghcb_entry *ghcb_alloc(void)
+{
+ return &ghcb_pool->ghcbs[0];
+ struct ghcb_entry *entry;
+ int i;
+
+ if (!ghcb_pool)
+ goto ucall_failed;
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (!test_and_set_bit(i, ghcb_pool->in_use)) {
+ entry = &ghcb_pool->ghcbs[i];
+ memset(&entry->ghcb, 0, sizeof(entry->ghcb));
+ return entry;
+ }
+ }
+
+ucall_failed:
+ return NULL;
+}
+
+static void ghcb_free(struct ghcb_entry *entry)
+{
+ /* Beware, here be pointer arithmetic. */
+ clear_bit(entry - ghcb_pool->ghcbs, ghcb_pool->in_use);
+}
+
/*
* sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the
@@ -88,7 +162,11 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
struct kvm_sev_guest_status status;
int ctr;
+ if (is_sev_es_vm(vm))
+ ghcb_init(vm);
+
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);
+
vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
TEST_ASSERT_EQ(status.policy, policy);
--
2.34.1
Powered by blists - more mailing lists