[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220427200314.276673-9-mlevitsk@redhat.com>
Date: Wed, 27 Apr 2022 23:03:03 +0300
From: Maxim Levitsky <mlevitsk@...hat.com>
To: kvm@...r.kernel.org
Cc: Wanpeng Li <wanpengli@...cent.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Jani Nikula <jani.nikula@...ux.intel.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Tvrtko Ursulin <tvrtko.ursulin@...ux.intel.com>,
Rodrigo Vivi <rodrigo.vivi@...el.com>,
Zhenyu Wang <zhenyuw@...ux.intel.com>,
Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>,
Tom Lendacky <thomas.lendacky@....com>,
Ingo Molnar <mingo@...hat.com>,
David Airlie <airlied@...ux.ie>,
Thomas Gleixner <tglx@...utronix.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
intel-gfx@...ts.freedesktop.org,
Sean Christopherson <seanjc@...gle.com>,
Daniel Vetter <daniel@...ll.ch>,
Borislav Petkov <bp@...en8.de>, Joerg Roedel <joro@...tes.org>,
linux-kernel@...r.kernel.org, Jim Mattson <jmattson@...gle.com>,
Zhi Wang <zhi.a.wang@...el.com>,
Brijesh Singh <brijesh.singh@....com>,
"H. Peter Anvin" <hpa@...or.com>,
intel-gvt-dev@...ts.freedesktop.org,
dri-devel@...ts.freedesktop.org,
Maxim Levitsky <mlevitsk@...hat.com>
Subject: [RFC PATCH v3 08/19] KVM: x86: SVM: move avic state to separate struct
This will make the code a bit easier to read when nested AVIC support
is added.
No functional change intended.
Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
---
arch/x86/kvm/svm/avic.c | 51 +++++++++++++++++++++++------------------
arch/x86/kvm/svm/svm.h | 14 ++++++-----
2 files changed, 37 insertions(+), 28 deletions(-)
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 1102421668a11..e5cbbb97fbab6 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -69,6 +69,8 @@ int avic_ga_log_notifier(u32 ga_tag)
unsigned long flags;
struct kvm_svm *kvm_svm;
struct kvm_vcpu *vcpu = NULL;
+ struct kvm_svm_avic *avic;
+
u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
@@ -76,9 +78,13 @@ int avic_ga_log_notifier(u32 ga_tag)
trace_kvm_avic_ga_log(vm_id, vcpu_id);
spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
- hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
- if (kvm_svm->avic_vm_id != vm_id)
+ hash_for_each_possible(svm_vm_data_hash, avic, hnode, vm_id) {
+
+
+ if (avic->vm_id != vm_id)
continue;
+
+ kvm_svm = container_of(avic, struct kvm_svm, avic);
vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
break;
}
@@ -98,18 +104,18 @@ int avic_ga_log_notifier(u32 ga_tag)
void avic_vm_destroy(struct kvm *kvm)
{
unsigned long flags;
- struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
+ struct kvm_svm_avic *avic = &to_kvm_svm(kvm)->avic;
if (!enable_apicv)
return;
- if (kvm_svm->avic_logical_id_table_page)
- __free_page(kvm_svm->avic_logical_id_table_page);
- if (kvm_svm->avic_physical_id_table_page)
- __free_page(kvm_svm->avic_physical_id_table_page);
+ if (avic->logical_id_table_page)
+ __free_page(avic->logical_id_table_page);
+ if (avic->physical_id_table_page)
+ __free_page(avic->physical_id_table_page);
spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
- hash_del(&kvm_svm->hnode);
+ hash_del(&avic->hnode);
spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
}
@@ -117,10 +123,9 @@ int avic_vm_init(struct kvm *kvm)
{
unsigned long flags;
int err = -ENOMEM;
- struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
- struct kvm_svm *k2;
struct page *p_page;
struct page *l_page;
+ struct kvm_svm_avic *avic = &to_kvm_svm(kvm)->avic;
u32 vm_id;
if (!enable_apicv)
@@ -131,14 +136,14 @@ int avic_vm_init(struct kvm *kvm)
if (!p_page)
goto free_avic;
- kvm_svm->avic_physical_id_table_page = p_page;
+ avic->physical_id_table_page = p_page;
/* Allocating logical APIC ID table (4KB) */
l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!l_page)
goto free_avic;
- kvm_svm->avic_logical_id_table_page = l_page;
+ avic->logical_id_table_page = l_page;
spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
again:
@@ -149,13 +154,15 @@ int avic_vm_init(struct kvm *kvm)
}
/* Is it still in use? Only possible if wrapped at least once */
if (next_vm_id_wrapped) {
- hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
- if (k2->avic_vm_id == vm_id)
+ struct kvm_svm_avic *avic2;
+
+ hash_for_each_possible(svm_vm_data_hash, avic2, hnode, vm_id) {
+ if (avic2->vm_id == vm_id)
goto again;
}
}
- kvm_svm->avic_vm_id = vm_id;
- hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
+ avic->vm_id = vm_id;
+ hash_add(svm_vm_data_hash, &avic->hnode, avic->vm_id);
spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
return 0;
@@ -169,8 +176,8 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
{
struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
- phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
- phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
+ phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic.logical_id_table_page));
+ phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic.physical_id_table_page));
vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
@@ -193,7 +200,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
return NULL;
- avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
+ avic_physical_id_table = page_address(kvm_svm->avic.physical_id_table_page);
return &avic_physical_id_table[index];
}
@@ -296,7 +303,7 @@ static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source
int dest_mode = icrl & APIC_DEST_MASK;
int shorthand = icrl & APIC_SHORT_MASK;
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
- u32 *avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
+ u32 *avic_logical_id_table = page_address(kvm_svm->avic.logical_id_table_page);
if (shorthand != APIC_DEST_NOSHORT)
return -EINVAL;
@@ -453,7 +460,7 @@ static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
index = (cluster << 2) + apic;
}
- logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
+ logical_apic_id_table = (u32 *) page_address(kvm_svm->avic.logical_id_table_page);
return &logical_apic_id_table[index];
}
@@ -803,7 +810,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
/* Try to enable guest_mode in IRTE */
pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
AVIC_HPA_MASK);
- pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
+ pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic.vm_id,
svm->vcpu.vcpu_id);
pi.is_guest_mode = true;
pi.vcpu_data = &vcpu_info;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 32220a1b0ea20..6fcb164a6ee4a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -88,15 +88,17 @@ struct kvm_sev_info {
atomic_t migration_in_progress;
};
-struct kvm_svm {
- struct kvm kvm;
- /* Struct members for AVIC */
- u32 avic_vm_id;
- struct page *avic_logical_id_table_page;
- struct page *avic_physical_id_table_page;
+struct kvm_svm_avic {
+ u32 vm_id;
+ struct page *logical_id_table_page;
+ struct page *physical_id_table_page;
struct hlist_node hnode;
+};
+struct kvm_svm {
+ struct kvm kvm;
+ struct kvm_svm_avic avic;
struct kvm_sev_info sev_info;
};
--
2.26.3
Powered by blists - more mailing lists