[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120080013.2153519-5-anup.patel@oss.qualcomm.com>
Date: Tue, 20 Jan 2026 13:29:50 +0530
From: Anup Patel <anup.patel@....qualcomm.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Atish Patra <atish.patra@...ux.dev>
Cc: Palmer Dabbelt <palmer@...belt.com>, Paul Walmsley <pjw@...nel.org>,
Alexandre Ghiti <alex@...ti.fr>, Shuah Khan <shuah@...nel.org>,
Anup Patel <anup@...infault.org>,
Andrew Jones <andrew.jones@....qualcomm.com>,
kvm-riscv@...ts.infradead.org, kvm@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Anup Patel <anup.patel@....qualcomm.com>
Subject: [PATCH 04/27] RISC-V: KVM: Introduce common kvm_riscv_isa_check_host()
Rename kvm_riscv_vcpu_isa_check_host() to kvm_riscv_isa_check_host()
and use it as common function with KVM RISC-V to check isa extensions
supported by host.
Signed-off-by: Anup Patel <anup.patel@....qualcomm.com>
---
arch/riscv/include/asm/kvm_host.h | 4 ++++
arch/riscv/kvm/aia_device.c | 2 +-
arch/riscv/kvm/vcpu_fp.c | 8 +++----
arch/riscv/kvm/vcpu_onereg.c | 38 ++++++++++++++++---------------
arch/riscv/kvm/vcpu_pmu.c | 2 +-
arch/riscv/kvm/vcpu_timer.c | 2 +-
arch/riscv/kvm/vcpu_vector.c | 4 ++--
7 files changed, 33 insertions(+), 27 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 24585304c02b..47a350c25555 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -308,6 +308,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
+int __kvm_riscv_isa_check_host(unsigned long kvm_ext, unsigned long *guest_ext);
+#define kvm_riscv_isa_check_host(ext) \
+ __kvm_riscv_isa_check_host(KVM_RISCV_ISA_EXT_##ext, NULL)
+
void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
index bed4d2c8c44c..4cecab9bf102 100644
--- a/arch/riscv/kvm/aia_device.c
+++ b/arch/riscv/kvm/aia_device.c
@@ -23,7 +23,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
if (irqchip_in_kernel(kvm))
return -EEXIST;
- if (!riscv_isa_extension_available(NULL, SSAIA))
+ if (kvm_riscv_isa_check_host(SSAIA))
return -ENODEV;
ret = -EBUSY;
diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c
index 030904d82b58..32ab5938a2ec 100644
--- a/arch/riscv/kvm/vcpu_fp.c
+++ b/arch/riscv/kvm/vcpu_fp.c
@@ -59,17 +59,17 @@ void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
{
/* No need to check host sstatus as it can be modified outside */
- if (riscv_isa_extension_available(NULL, d))
+ if (!kvm_riscv_isa_check_host(D))
__kvm_riscv_fp_d_save(cntx);
- else if (riscv_isa_extension_available(NULL, f))
+ else if (!kvm_riscv_isa_check_host(F))
__kvm_riscv_fp_f_save(cntx);
}
void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
{
- if (riscv_isa_extension_available(NULL, d))
+ if (!kvm_riscv_isa_check_host(D))
__kvm_riscv_fp_d_restore(cntx);
- else if (riscv_isa_extension_available(NULL, f))
+ else if (!kvm_riscv_isa_check_host(F))
__kvm_riscv_fp_f_restore(cntx);
}
#endif
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 6dab4deed86d..f0f8c293d950 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -119,7 +119,7 @@ static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
return KVM_RISCV_ISA_EXT_MAX;
}
-static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *guest_ext)
+int __kvm_riscv_isa_check_host(unsigned long kvm_ext, unsigned long *base_ext)
{
unsigned long host_ext;
@@ -127,8 +127,7 @@ static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *g
kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
return -ENOENT;
- *guest_ext = kvm_isa_ext_arr[kvm_ext];
- switch (*guest_ext) {
+ switch (kvm_isa_ext_arr[kvm_ext]) {
case RISCV_ISA_EXT_SMNPM:
/*
* Pointer masking effective in (H)S-mode is provided by the
@@ -139,13 +138,16 @@ static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *g
host_ext = RISCV_ISA_EXT_SSNPM;
break;
default:
- host_ext = *guest_ext;
+ host_ext = kvm_isa_ext_arr[kvm_ext];
break;
}
if (!__riscv_isa_extension_available(NULL, host_ext))
return -ENOENT;
+ if (base_ext)
+ *base_ext = kvm_isa_ext_arr[kvm_ext];
+
return 0;
}
@@ -156,7 +158,7 @@ static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
return false;
case KVM_RISCV_ISA_EXT_SSCOFPMF:
/* Sscofpmf depends on interrupt filtering defined in ssaia */
- return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
+ return !kvm_riscv_isa_check_host(SSAIA);
case KVM_RISCV_ISA_EXT_SVADU:
/*
* The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
@@ -263,7 +265,7 @@ void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
unsigned long guest_ext, i;
for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
- if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
+ if (__kvm_riscv_isa_check_host(i, &guest_ext))
continue;
if (kvm_riscv_vcpu_isa_enable_allowed(i))
set_bit(guest_ext, vcpu->arch.isa);
@@ -288,17 +290,17 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
- if (!riscv_isa_extension_available(NULL, ZICBOM))
+ if (kvm_riscv_isa_check_host(ZICBOM))
return -ENOENT;
reg_val = riscv_cbom_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
- if (!riscv_isa_extension_available(NULL, ZICBOZ))
+ if (kvm_riscv_isa_check_host(ZICBOZ))
return -ENOENT;
reg_val = riscv_cboz_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
- if (!riscv_isa_extension_available(NULL, ZICBOP))
+ if (kvm_riscv_isa_check_host(ZICBOP))
return -ENOENT;
reg_val = riscv_cbop_block_size;
break;
@@ -382,19 +384,19 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
}
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
- if (!riscv_isa_extension_available(NULL, ZICBOM))
+ if (kvm_riscv_isa_check_host(ZICBOM))
return -ENOENT;
if (reg_val != riscv_cbom_block_size)
return -EINVAL;
break;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
- if (!riscv_isa_extension_available(NULL, ZICBOZ))
+ if (kvm_riscv_isa_check_host(ZICBOZ))
return -ENOENT;
if (reg_val != riscv_cboz_block_size)
return -EINVAL;
break;
case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
- if (!riscv_isa_extension_available(NULL, ZICBOP))
+ if (kvm_riscv_isa_check_host(ZICBOP))
return -ENOENT;
if (reg_val != riscv_cbop_block_size)
return -EINVAL;
@@ -660,7 +662,7 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
unsigned long guest_ext;
int ret;
- ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
+ ret = __kvm_riscv_isa_check_host(reg_num, &guest_ext);
if (ret)
return ret;
@@ -678,7 +680,7 @@ static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
unsigned long guest_ext;
int ret;
- ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
+ ret = __kvm_riscv_isa_check_host(reg_num, &guest_ext);
if (ret)
return ret;
@@ -837,13 +839,13 @@ static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
* was not available.
*/
if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
- !riscv_isa_extension_available(NULL, ZICBOM))
+ kvm_riscv_isa_check_host(ZICBOM))
continue;
else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
- !riscv_isa_extension_available(NULL, ZICBOZ))
+ kvm_riscv_isa_check_host(ZICBOZ))
continue;
else if (i == KVM_REG_RISCV_CONFIG_REG(zicbop_block_size) &&
- !riscv_isa_extension_available(NULL, ZICBOP))
+ kvm_riscv_isa_check_host(ZICBOP))
continue;
size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
@@ -1064,7 +1066,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
- if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
+ if (__kvm_riscv_isa_check_host(i, &guest_ext))
continue;
if (uindices) {
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index 4d8d5e9aa53d..9759143c1785 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -819,7 +819,7 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
* filtering is available in the host. Otherwise, guest will always count
* events while the execution is in hypervisor mode.
*/
- if (!riscv_isa_extension_available(NULL, SSCOFPMF))
+ if (kvm_riscv_isa_check_host(SSCOFPMF))
return;
ret = riscv_pmu_get_hpm_info(&hpm_width, &num_hw_ctrs);
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index f36247e4c783..cac4f3a5f213 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -253,7 +253,7 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
t->next_set = false;
/* Enable sstc for every vcpu if available in hardware */
- if (riscv_isa_extension_available(NULL, SSTC)) {
+ if (!kvm_riscv_isa_check_host(SSTC)) {
t->sstc_enabled = true;
hrtimer_setup(&t->hrt, kvm_riscv_vcpu_vstimer_expired, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c
index 05f3cc2d8e31..8c7315a96b9e 100644
--- a/arch/riscv/kvm/vcpu_vector.c
+++ b/arch/riscv/kvm/vcpu_vector.c
@@ -63,13 +63,13 @@ void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
{
/* No need to check host sstatus as it can be modified outside */
- if (riscv_isa_extension_available(NULL, v))
+ if (!kvm_riscv_isa_check_host(V))
__kvm_riscv_vector_save(cntx);
}
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
{
- if (riscv_isa_extension_available(NULL, v))
+ if (!kvm_riscv_isa_check_host(V))
__kvm_riscv_vector_restore(cntx);
}
--
2.43.0
Powered by blists - more mailing lists