[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231225125847.2778638-10-guoren@kernel.org>
Date: Mon, 25 Dec 2023 07:58:42 -0500
From: guoren@...nel.org
To: paul.walmsley@...ive.com,
palmer@...belt.com,
guoren@...nel.org,
panqinglin2020@...as.ac.cn,
bjorn@...osinc.com,
conor.dooley@...rochip.com,
leobras@...hat.com,
peterz@...radead.org,
anup@...infault.org,
keescook@...omium.org,
wuwei2016@...as.ac.cn,
xiaoguang.xing@...hgo.com,
chao.wei@...hgo.com,
unicorn_wang@...look.com,
uwu@...nowy.me,
jszhang@...nel.org,
wefu@...hat.com,
atishp@...shpatra.org
Cc: linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org,
kvm@...r.kernel.org,
virtualization@...ts.linux-foundation.org,
Guo Ren <guoren@...ux.alibaba.com>
Subject: [PATCH V12 09/14] RISC-V: paravirt: Add pvqspinlock KVM backend
From: Guo Ren <guoren@...ux.alibaba.com>
Add the files functions needed to support the SBI PVLOCK (paravirt
qspinlock kick_cpu) extension. Implement kvm_sbi_ext_pvlock_kick_-
cpu(), and we only need to call the kvm_vcpu_kick() and bring
target_vcpu from the halt state. No irq raised, no other request,
just a pure vcpu_kick.
Reviewed-by: Leonardo Bras <leobras@...hat.com>
Signed-off-by: Guo Ren <guoren@...ux.alibaba.com>
Signed-off-by: Guo Ren <guoren@...nel.org>
---
arch/riscv/include/asm/kvm_vcpu_sbi.h | 1 +
arch/riscv/include/uapi/asm/kvm.h | 1 +
arch/riscv/kvm/Makefile | 1 +
arch/riscv/kvm/vcpu_sbi.c | 4 ++
arch/riscv/kvm/vcpu_sbi_pvlock.c | 57 +++++++++++++++++++++++++++
5 files changed, 64 insertions(+)
create mode 100644 arch/riscv/kvm/vcpu_sbi_pvlock.c
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index 6a453f7f8b56..a051e4875542 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -76,6 +76,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pvlock;
#ifdef CONFIG_RISCV_PMU_SBI
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 60d3b21dead7..24bbada1a9fb 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -157,6 +157,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_EXPERIMENTAL,
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
+ KVM_RISCV_SBI_EXT_PVLOCK,
KVM_RISCV_SBI_EXT_MAX,
};
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 4c2067fc59fc..6112750a3a0c 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -26,6 +26,7 @@ kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
kvm-y += vcpu_sbi_base.o
kvm-y += vcpu_sbi_replace.o
kvm-y += vcpu_sbi_hsm.o
+kvm-y += vcpu_sbi_pvlock.o
kvm-y += vcpu_timer.o
kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o
kvm-y += aia.o
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index a04ff98085d9..7078bd57806b 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -78,6 +78,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
.ext_ptr = &vcpu_sbi_ext_vendor,
},
+ {
+ .ext_idx = KVM_RISCV_SBI_EXT_PVLOCK,
+ .ext_ptr = &vcpu_sbi_ext_pvlock,
+ },
};
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
diff --git a/arch/riscv/kvm/vcpu_sbi_pvlock.c b/arch/riscv/kvm/vcpu_sbi_pvlock.c
new file mode 100644
index 000000000000..914fc58aedfe
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_pvlock.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c), 2023 Alibaba Cloud
+ *
+ * Authors:
+ * Guo Ren <guoren@...ux.alibaba.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static int kvm_sbi_ext_pvlock_kick_cpu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *target;
+
+ target = kvm_get_vcpu_by_id(kvm, cp->a0);
+ if (!target)
+ return SBI_ERR_INVALID_PARAM;
+
+ kvm_vcpu_kick(target);
+
+ if (READ_ONCE(target->ready))
+ kvm_vcpu_yield_to(target);
+
+ return SBI_SUCCESS;
+}
+
+static int kvm_sbi_ext_pvlock_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata)
+{
+ int ret = 0;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long funcid = cp->a6;
+
+ switch (funcid) {
+ case SBI_EXT_PVLOCK_KICK_CPU:
+ ret = kvm_sbi_ext_pvlock_kick_cpu(vcpu);
+ break;
+ default:
+ ret = SBI_ERR_NOT_SUPPORTED;
+ }
+
+ retdata->err_val = ret;
+
+ return 0;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pvlock = {
+ .extid_start = SBI_EXT_PVLOCK,
+ .extid_end = SBI_EXT_PVLOCK,
+ .handler = kvm_sbi_ext_pvlock_handler,
+};
--
2.40.1
Powered by blists - more mailing lists