[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <377a678c-16e4-4e85-8831-421e59e9bf6e@redhat.com>
Date: Fri, 2 May 2025 09:41:25 +1000
From: Gavin Shan <gshan@...hat.com>
To: Steven Price <steven.price@....com>, kvm@...r.kernel.org,
kvmarm@...ts.linux.dev
Cc: Jean-Philippe Brucker <jean-philippe@...aro.org>,
Catalin Marinas <catalin.marinas@....com>, Marc Zyngier <maz@...nel.org>,
Will Deacon <will@...nel.org>, James Morse <james.morse@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Suzuki K Poulose <suzuki.poulose@....com>, Zenghui Yu
<yuzenghui@...wei.com>, linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org, Joey Gouly <joey.gouly@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Christoffer Dall <christoffer.dall@....com>, Fuad Tabba <tabba@...gle.com>,
linux-coco@...ts.linux.dev,
Ganapatrao Kulkarni <gankulkarni@...amperecomputing.com>,
Shanker Donthineni <sdonthineni@...dia.com>, Alper Gun
<alpergun@...gle.com>, "Aneesh Kumar K . V" <aneesh.kumar@...nel.org>
Subject: Re: [PATCH v8 40/43] arm64: RME: Provide accurate register list
On 4/16/25 11:42 PM, Steven Price wrote:
> From: Jean-Philippe Brucker <jean-philippe@...aro.org>
>
> Userspace can set a few registers with KVM_SET_ONE_REG (9 GP registers
> at runtime, and 3 system registers during initialization). Update the
> register list returned by KVM_GET_REG_LIST.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe@...aro.org>
> Signed-off-by: Steven Price <steven.price@....com>
> ---
> Changes since v7:
> * Reworked on upstream changes.
> ---
> arch/arm64/kvm/guest.c | 19 ++++++++++++++-----
> arch/arm64/kvm/hypercalls.c | 4 ++--
> arch/arm64/kvm/sys_regs.c | 28 ++++++++++++++++++++++------
> 3 files changed, 38 insertions(+), 13 deletions(-)
>
With below nitpicks addressed:
Reviewed-by: Gavin Shan <gshan@...hat.com>
> diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> index 1288920fc73d..e9bb7647aa65 100644
> --- a/arch/arm64/kvm/guest.c
> +++ b/arch/arm64/kvm/guest.c
> @@ -618,8 +618,6 @@ static const u64 timer_reg_list[] = {
> KVM_REG_ARM_PTIMER_CVAL,
> };
>
> -#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
> -
> static bool is_timer_reg(u64 index)
> {
> switch (index) {
> @@ -634,9 +632,14 @@ static bool is_timer_reg(u64 index)
> return false;
> }
>
> +static unsigned long num_timer_regs(struct kvm_vcpu *vcpu)
> +{
> + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(timer_reg_list);
> +}
> +
static inline unsigned long num_timer_regs(struct kvm_vcpu *vcpu)
> static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
> {
> - for (int i = 0; i < NUM_TIMER_REGS; i++) {
> + for (int i = 0; i < num_timer_regs(vcpu); i++) {
> if (put_user(timer_reg_list[i], uindices))
> return -EFAULT;
> uindices++;
for (unsigned long i = 0; ...)
> @@ -674,6 +677,9 @@ static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
> if (!vcpu_has_sve(vcpu) || !kvm_arm_vcpu_sve_finalized(vcpu))
> return 0;
>
> + if (kvm_is_realm(vcpu->kvm))
> + return 1; /* KVM_REG_ARM64_SVE_VLS */
> +
> return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
> + 1; /* KVM_REG_ARM64_SVE_VLS */
> }
> @@ -701,6 +707,9 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
> return -EFAULT;
> ++num_regs;
>
> + if (kvm_is_realm(vcpu->kvm))
> + return num_regs;
> +
> for (i = 0; i < slices; i++) {
> for (n = 0; n < SVE_NUM_ZREGS; n++) {
> reg = KVM_REG_ARM64_SVE_ZREG(n, i);
> @@ -739,7 +748,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
> res += num_sve_regs(vcpu);
> res += kvm_arm_num_sys_reg_descs(vcpu);
> res += kvm_arm_get_fw_num_regs(vcpu);
> - res += NUM_TIMER_REGS;
> + res += num_timer_regs(vcpu);
>
> return res;
> }
> @@ -773,7 +782,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
> ret = copy_timer_indices(vcpu, uindices);
> if (ret < 0)
> return ret;
> - uindices += NUM_TIMER_REGS;
> + uindices += num_timer_regs(vcpu);
>
> return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
> }
> diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
> index 569941eeb3fe..fb2bba0c7e86 100644
> --- a/arch/arm64/kvm/hypercalls.c
> +++ b/arch/arm64/kvm/hypercalls.c
> @@ -412,14 +412,14 @@ void kvm_arm_teardown_hypercalls(struct kvm *kvm)
>
> int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
> {
> - return ARRAY_SIZE(kvm_arm_fw_reg_ids);
> + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(kvm_arm_fw_reg_ids);
> }
>
> int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
> {
> int i;
>
> - for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) {
> + for (i = 0; i < kvm_arm_get_fw_num_regs(vcpu); i++) {
> if (put_user(kvm_arm_fw_reg_ids[i], uindices++))
> return -EFAULT;
> }
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index a53a06700867..7d384016d1ba 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -4960,18 +4960,18 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
> sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
> }
>
> -static unsigned int num_demux_regs(void)
> +static unsigned int num_demux_regs(struct kvm_vcpu *vcpu)
> {
> - return CSSELR_MAX;
> + return kvm_is_realm(vcpu->kvm) ? 0 : CSSELR_MAX;
> }
>
static inline unsigned int num_demux_regs(struct kvm_vcpu *vcpu)
> -static int write_demux_regids(u64 __user *uindices)
> +static int write_demux_regids(struct kvm_vcpu *vcpu, u64 __user *uindices)
> {
> u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
> unsigned int i;
>
> val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
> - for (i = 0; i < CSSELR_MAX; i++) {
> + for (i = 0; i < num_demux_regs(vcpu); i++) {
> if (put_user(val | i, uindices))
> return -EFAULT;
> uindices++;
> @@ -5002,11 +5002,27 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
> return true;
> }
>
> +static bool kvm_realm_sys_reg_hidden_user(const struct kvm_vcpu *vcpu, u64 reg)
> +{
> + if (!kvm_is_realm(vcpu->kvm))
> + return false;
> +
> + switch (reg) {
> + case SYS_ID_AA64DFR0_EL1:
> + case SYS_PMCR_EL0:
> + return false;
> + }
> + return true;
> +}
> +
static inline bool kvm_realm_sys_reg_hidden_user(const struct kvm_vcpu *vcpu, u64 reg)
> static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
> const struct sys_reg_desc *rd,
> u64 __user **uind,
> unsigned int *total)
> {
> + if (kvm_realm_sys_reg_hidden_user(vcpu, reg_to_encoding(rd)))
> + return 0;
> +
> /*
> * Ignore registers we trap but don't save,
> * and for which no custom user accessor is provided.
> @@ -5044,7 +5060,7 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
>
> unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
> {
> - return num_demux_regs()
> + return num_demux_regs(vcpu)
> + walk_sys_regs(vcpu, (u64 __user *)NULL);
> }
>
> @@ -5057,7 +5073,7 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
> return err;
> uindices += err;
>
> - return write_demux_regids(uindices);
> + return write_demux_regids(vcpu, uindices);
> }
>
> #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
Thanks,
Gavin
Powered by blists - more mailing lists