[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1458281388-14452-3-git-send-email-Suravee.Suthikulpanit@amd.com>
Date: Fri, 18 Mar 2016 01:09:38 -0500
From: Suravee Suthikulpanit <Suravee.Suthikulpanit@....com>
To: <pbonzini@...hat.com>, <rkrcmar@...hat.com>, <joro@...tes.org>,
<bp@...en8.de>, <gleb@...nel.org>, <alex.williamson@...hat.com>
CC: <kvm@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<wei@...hat.com>, <sherry.hurwitz@....com>,
Suravee Suthikulpanit <Suravee.Suthikulpanit@....com>,
Suravee Suthikulpanit <suravee.suthikulpanit@....com>
Subject: [PART1 RFC v3 02/12] KVM: x86: Introducing kvm_x86_ops VM init/uninit hooks
Adding function pointers in struct kvm_x86_ops for processor-specific
layer to provide hooks for when KVM initialize and un-initialize VM.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
---
arch/x86/include/asm/kvm_host.h | 3 +++
arch/x86/kvm/x86.c | 10 +++++++++-
virt/kvm/kvm_main.c | 8 ++++----
3 files changed, 16 insertions(+), 5 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 44adbb8..4b0dd0f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -828,6 +828,9 @@ struct kvm_x86_ops {
bool (*cpu_has_high_real_mode_segbase)(void);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
+ int (*vm_init)(struct kvm *kvm);
+ void (*vm_uninit)(struct kvm *kvm);
+
/* Create, but do not attach this VCPU */
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 429c3f5..4d2961d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7700,6 +7700,8 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
+ int ret = 0;
+
if (type)
return -EINVAL;
@@ -7724,7 +7726,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
- return 0;
+ if (kvm_x86_ops->vm_init)
+ ret = kvm_x86_ops->vm_init(kvm);
+
+ return ret;
}
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -7751,6 +7756,9 @@ static void kvm_free_vcpus(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_free(vcpu);
+ if (kvm_x86_ops->vm_uninit)
+ kvm_x86_ops->vm_uninit(kvm);
+
mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm->vcpus[i] = NULL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1ca0258..5460325 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -536,10 +536,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (!kvm)
return ERR_PTR(-ENOMEM);
- r = kvm_arch_init_vm(kvm, type);
- if (r)
- goto out_err_no_disable;
-
r = hardware_enable_all();
if (r)
goto out_err_no_disable;
@@ -578,6 +574,10 @@ static struct kvm *kvm_create_vm(unsigned long type)
atomic_set(&kvm->users_count, 1);
INIT_LIST_HEAD(&kvm->devices);
+ r = kvm_arch_init_vm(kvm, type);
+ if (r)
+ goto out_err;
+
r = kvm_init_mmu_notifier(kvm);
if (r)
goto out_err;
--
1.9.1
Powered by blists - more mailing lists