[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240409133959.2888018-5-pgonda@google.com>
Date: Tue, 9 Apr 2024 06:39:57 -0700
From: Peter Gonda <pgonda@...gle.com>
To: pgonda@...gle.com, seanjc@...gle.com
Cc: linux-kernel@...r.kernel.org, Vishal Annapurve <vannapurve@...gle.com>,
Ackerley Tng <ackerleytng@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>, Carlos Bilbao <carlos.bilbao@....com>,
Tom Lendacky <thomas.lendacky@....com>, Michael Roth <michael.roth@....com>, kvm@...r.kernel.org,
linux-kselftest@...r.kernel.org
Subject: [PATCH 4/6] Add GHCB allocations and helpers
Add GHCB management functionality similar to the ucall management.
Allows for selftest vCPUs to acquire GHCBs for their usage.
Cc: Vishal Annapurve <vannapurve@...gle.com>
Cc: Ackerley Tng <ackerleytng@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: Claudio Imbrenda <imbrenda@...ux.ibm.com>
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: Carlos Bilbao <carlos.bilbao@....com>
Cc: Tom Lendacky <thomas.lendacky@....com>
Cc: Michael Roth <michael.roth@....com>
Cc: kvm@...r.kernel.org
Cc: linux-kselftest@...r.kernel.org
Signed-off-by: Peter Gonda <pgonda@...gle.com>
---
.../selftests/kvm/include/x86_64/sev.h | 2 +
.../selftests/kvm/lib/x86_64/processor.c | 8 ++
tools/testing/selftests/kvm/lib/x86_64/sev.c | 77 +++++++++++++++++++
3 files changed, 87 insertions(+)
diff --git a/tools/testing/selftests/kvm/include/x86_64/sev.h b/tools/testing/selftests/kvm/include/x86_64/sev.h
index 8a1bf88474c9..bfd481707f67 100644
--- a/tools/testing/selftests/kvm/include/x86_64/sev.h
+++ b/tools/testing/selftests/kvm/include/x86_64/sev.h
@@ -27,6 +27,8 @@ enum sev_guest_state {
#define GHCB_MSR_TERM_REQ 0x100
+int ghcb_nr_pages_required(uint64_t page_size);
+
void sev_vm_launch(struct kvm_vm *vm, uint32_t policy);
void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
void sev_vm_launch_finish(struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 49288fe10cd3..fd94a1bd82c9 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -584,6 +584,14 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
sev_es_vm_init(vm);
}
+int kvm_arch_vm_additional_pages_required(struct vm_shape shape, uint64_t page_size)
+{
+ if (shape.subtype == VM_SUBTYPE_SEV_ES)
+ return ghcb_nr_pages_required(page_size);
+
+ return 0;
+}
+
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
struct kvm_regs regs;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/sev.c b/tools/testing/selftests/kvm/lib/x86_64/sev.c
index e248d3364b9c..27ae1d3b1355 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/sev.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/sev.c
@@ -4,6 +4,80 @@
#include <stdbool.h>
#include "sev.h"
+#include "linux/bitmap.h"
+#include "svm.h"
+#include "svm_util.h"
+
+struct ghcb_entry {
+ struct ghcb ghcb;
+
+ /* Guest physical address of this GHCB. */
+ void *gpa;
+
+ /* Host virtual address of this struct. */
+ struct ghcb_entry *hva;
+};
+
+struct ghcb_header {
+ struct ghcb_entry ghcbs[KVM_MAX_VCPUS];
+ DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
+};
+
+static struct ghcb_header *ghcb_pool;
+
+int ghcb_nr_pages_required(uint64_t page_size)
+{
+ return align_up(sizeof(struct ghcb_header), page_size) / page_size;
+}
+
+void ghcb_init(struct kvm_vm *vm)
+{
+ struct ghcb_header *hdr;
+ struct ghcb_entry *entry;
+ vm_vaddr_t vaddr;
+ int i;
+
+ vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
+ MEM_REGION_DATA);
+ hdr = (struct ghcb_header *)addr_gva2hva(vm, vaddr);
+ memset(hdr, 0, sizeof(*hdr));
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ entry = &hdr->ghcbs[i];
+ entry->hva = entry;
+ entry->gpa = addr_hva2gpa(vm, &entry->ghcb);
+ }
+
+ write_guest_global(vm, ghcb_pool, (struct ghcb_header *)vaddr);
+}
+
+static struct ghcb_entry *ghcb_alloc(void)
+{
+ return &ghcb_pool->ghcbs[0];
+ struct ghcb_entry *entry;
+ int i;
+
+ if (!ghcb_pool)
+ goto ucall_failed;
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (!test_and_set_bit(i, ghcb_pool->in_use)) {
+ entry = &ghcb_pool->ghcbs[i];
+ memset(&entry->ghcb, 0, sizeof(entry->ghcb));
+ return entry;
+ }
+ }
+
+ucall_failed:
+ return NULL;
+}
+
+static void ghcb_free(struct ghcb_entry *entry)
+{
+ /* Beware, here be pointer arithmetic. */
+ clear_bit(entry - ghcb_pool->ghcbs, ghcb_pool->in_use);
+}
+
/*
* sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the
@@ -44,6 +118,9 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
struct kvm_sev_guest_status status;
int ctr;
+ if (policy & SEV_POLICY_ES)
+ ghcb_init(vm);
+
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);
vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
--
2.44.0.478.gd926399ef9-goog
Powered by blists - more mailing lists