[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230228070057.3687180-3-zhaotianrui@loongson.cn>
Date: Tue, 28 Feb 2023 15:00:29 +0800
From: Tianrui Zhao <zhaotianrui@...ngson.cn>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: Huacai Chen <chenhuacai@...nel.org>,
WANG Xuerui <kernel@...0n.name>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
loongarch@...ts.linux.dev, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, Jens Axboe <axboe@...nel.dk>,
Mark Brown <broonie@...nel.org>,
Alex Deucher <alexander.deucher@....com>,
Oliver Upton <oliver.upton@...ux.dev>, maobibo@...ngson.cn,
Xi Ruoyao <xry111@...111.site>
Subject: [PATCH v3 02/29] LoongArch: KVM: Implement kvm module related interface
Implement loongarch kvm module init, module exit interface,
using kvm context to save the vpid info and vcpu world switch
interface pointer.
Signed-off-by: Tianrui Zhao <zhaotianrui@...ngson.cn>
---
arch/loongarch/kvm/main.c | 100 ++++++++++++++++++++++++++++++++++++++
1 file changed, 100 insertions(+)
create mode 100644 arch/loongarch/kvm/main.c
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
new file mode 100644
index 000000000000..f72634ea2027
--- /dev/null
+++ b/arch/loongarch/kvm/main.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_host.h>
+#include <asm/cacheflush.h>
+
+static struct kvm_context __percpu *vmcs;
+struct kvm_world_switch *kvm_loongarch_ops;
+unsigned long vpid_mask;
+
+static int kvm_loongarch_env_init(void)
+{
+ struct kvm_context *context;
+ int cpu, order;
+ void *addr;
+
+ vmcs = alloc_percpu(struct kvm_context);
+ if (!vmcs) {
+ pr_err("kvm: failed to allocate percpu kvm_context\n");
+ return -ENOMEM;
+ }
+
+ kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL);
+ if (!kvm_loongarch_ops)
+ return -ENOMEM;
+
+ /*
+ * There will be problem in world switch code if there
+ * is page fault reenter, since pgd register is shared
+ * between root kernel and kvm hypervisor. World switch
+ * entry need be unmapped area, cannot be tlb mapped area.
+ * In future if hw pagetable walking is supported, or there
+ * is separate pgd registers between root kernel and kvm
+ * hypervisor, copying about world switch code will not be used.
+ */
+
+ order = get_order(kvm_vector_size + kvm_enter_guest_size);
+ addr = (void *)__get_free_pages(GFP_KERNEL, order);
+ if (!addr) {
+ free_percpu(vmcs);
+ return -ENOMEM;
+ }
+
+ memcpy(addr, kvm_vector_entry, kvm_vector_size);
+ memcpy(addr + kvm_vector_size, kvm_enter_guest, kvm_enter_guest_size);
+ flush_icache_range((unsigned long)addr, (unsigned long)addr +
+ kvm_vector_size + kvm_enter_guest_size);
+ kvm_loongarch_ops->guest_eentry = addr;
+ kvm_loongarch_ops->enter_guest = addr + kvm_vector_size;
+ kvm_loongarch_ops->page_order = order;
+
+ vpid_mask = read_csr_gstat();
+ vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
+ if (vpid_mask)
+ vpid_mask = GENMASK(vpid_mask - 1, 0);
+
+ for_each_possible_cpu(cpu) {
+ context = per_cpu_ptr(vmcs, cpu);
+ context->vpid_cache = vpid_mask + 1;
+ context->last_vcpu = NULL;
+ }
+
+ _kvm_init_fault();
+
+ return 0;
+}
+
+static void kvm_loongarch_env_exit(void)
+{
+ free_pages((unsigned long)kvm_loongarch_ops->guest_eentry, kvm_loongarch_ops->page_order);
+ free_percpu(vmcs);
+}
+
+static int kvm_loongarch_init(void)
+{
+ int r;
+
+ if (!cpu_has_lvz)
+ return 0;
+
+ r = kvm_loongarch_env_init();
+ if (r)
+ return r;
+
+ return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+}
+
+static void kvm_loongarch_exit(void)
+{
+ kvm_exit();
+ kvm_loongarch_env_exit();
+}
+
+module_init(kvm_loongarch_init);
+module_exit(kvm_loongarch_exit);
--
2.31.1
Powered by blists - more mailing lists