[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250401161106.790710-9-pbonzini@redhat.com>
Date: Tue, 1 Apr 2025 18:10:45 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Cc: roy.hopkins@...e.com,
seanjc@...gle.com,
thomas.lendacky@....com,
ashish.kalra@....com,
michael.roth@....com,
jroedel@...e.de,
nsaenz@...zon.com,
anelkz@...zon.de,
James.Bottomley@...senPartnership.com
Subject: [PATCH 08/29] KVM: move vcpu_array to struct kvm_plane
Different planes may have only a subset of the vCPUs available in
the initial plane, therefore vcpu_array must also be moved to
struct kvm_plane. New functions allow accessing the vCPUs of
a struct kvm_plane and, as usual, the older names automatically
go through kvm->planes[0].
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
include/linux/kvm_host.h | 29 +++++++++++++++++++++--------
virt/kvm/kvm_main.c | 22 +++++++++++++++-------
2 files changed, 36 insertions(+), 15 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0db27814294f..0a91b556767e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -763,6 +763,7 @@ struct kvm_memslots {
struct kvm_plane {
struct kvm *kvm;
+ struct xarray vcpu_array;
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
/* Protected by slots_locks (for writes) and RCU (for reads) */
struct xarray mem_attr_array;
@@ -795,7 +796,6 @@ struct kvm {
struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
/* The current active memslot set for each address space */
struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
- struct xarray vcpu_array;
struct kvm_plane *planes[KVM_MAX_VCPU_PLANES];
@@ -990,20 +990,20 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
!refcount_read(&kvm->users_count));
}
-static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+static inline struct kvm_vcpu *kvm_get_plane_vcpu(struct kvm_plane *plane, int i)
{
- struct kvm_vcpu *vcpu = xa_load(&kvm->vcpu_array, i);
+ struct kvm_vcpu *vcpu = xa_load(&plane->vcpu_array, i);
if (vcpu && unlikely(vcpu->plane == -1))
return NULL;
return vcpu;
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- xa_for_each(&kvm->vcpu_array, idx, vcpup) \
+#define kvm_for_each_plane_vcpu(idx, vcpup, plane_) \
+ xa_for_each(&(plane_)->vcpu_array, idx, vcpup) \
if ((vcpup)->plane == -1) ; else \
-static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
+static inline struct kvm_vcpu *kvm_get_plane_vcpu_by_id(struct kvm_plane *plane, int id)
{
struct kvm_vcpu *vcpu = NULL;
unsigned long i;
@@ -1011,15 +1011,28 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
if (id < 0)
return NULL;
if (id < KVM_MAX_VCPUS)
- vcpu = kvm_get_vcpu(kvm, id);
+ vcpu = kvm_get_plane_vcpu(plane, id);
if (vcpu && vcpu->vcpu_id == id)
return vcpu;
- kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_for_each_plane_vcpu(i, vcpu, plane)
if (vcpu->vcpu_id == id)
return vcpu;
return NULL;
}
+static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+{
+ return kvm_get_plane_vcpu(kvm->planes[0], i);
+}
+
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ kvm_for_each_plane_vcpu(idx, vcpup, kvm->planes[0])
+
+static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
+{
+ return kvm_get_plane_vcpu_by_id(kvm->planes[0], id);
+}
+
void kvm_destroy_vcpus(struct kvm *kvm);
void vcpu_load(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index eba02cb7cc57..cd4dfc399cad 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -481,12 +481,19 @@ static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_destroy_vcpus(struct kvm *kvm)
{
+ int j;
unsigned long i;
struct kvm_vcpu *vcpu;
- kvm_for_each_vcpu(i, vcpu, kvm) {
- kvm_vcpu_destroy(vcpu);
- xa_erase(&kvm->vcpu_array, i);
+ for (j = ARRAY_SIZE(kvm->planes) - 1; j >= 0; j--) {
+ struct kvm_plane *plane = kvm->planes[j];
+ if (!plane)
+ continue;
+
+ kvm_for_each_plane_vcpu(i, vcpu, plane) {
+ kvm_vcpu_destroy(vcpu);
+ xa_erase(&plane->vcpu_array, i);
+ }
}
atomic_set(&kvm->online_vcpus, 0);
@@ -1110,6 +1117,7 @@ static struct kvm_plane *kvm_create_vm_plane(struct kvm *kvm, unsigned plane_id)
plane->kvm = kvm;
plane->plane = plane_id;
+ xa_init(&plane->vcpu_array);
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
xa_init(&plane->mem_attr_array);
#endif
@@ -1137,7 +1145,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
mutex_init(&kvm->slots_arch_lock);
spin_lock_init(&kvm->mn_invalidate_lock);
rcuwait_init(&kvm->mn_memslots_update_rcuwait);
- xa_init(&kvm->vcpu_array);
INIT_LIST_HEAD(&kvm->gpc_list);
spin_lock_init(&kvm->gpc_lock);
@@ -3930,6 +3937,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
int nr_vcpus, start, i, idx, yielded;
struct kvm *kvm = me->kvm;
+ struct kvm_plane *plane = kvm->planes[me->plane];
struct kvm_vcpu *vcpu;
int try = 3;
@@ -3967,7 +3975,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
if (idx == me->vcpu_idx)
continue;
- vcpu = xa_load(&kvm->vcpu_array, idx);
+ vcpu = xa_load(&plane->vcpu_array, idx);
if (!READ_ONCE(vcpu->ready))
continue;
if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
@@ -4192,7 +4200,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
*/
vcpu->plane = -1;
vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
- r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
+ r = xa_insert(&kvm->planes[0]->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
WARN_ON_ONCE(r == -EBUSY);
if (r)
goto unlock_vcpu_destroy;
@@ -4228,7 +4236,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
kvm_put_xa_erase:
mutex_unlock(&vcpu->mutex);
kvm_put_kvm_no_destroy(kvm);
- xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
+ xa_erase(&kvm->planes[0]->vcpu_array, vcpu->vcpu_idx);
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
kvm_dirty_ring_free(&vcpu->dirty_ring);
--
2.49.0
Powered by blists - more mailing lists