[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTx8hu=WEu54Bt82PtcsOdQHaVkw9ivQSSC9=SV7bf1Zvg@mail.gmail.com>
Date: Thu, 8 Jan 2026 14:09:50 +0000
From: Fuad Tabba <tabba@...gle.com>
To: Mark Brown <broonie@...nel.org>
Cc: Marc Zyngier <maz@...nel.org>, Joey Gouly <joey.gouly@....com>,
Catalin Marinas <catalin.marinas@....com>, Suzuki K Poulose <suzuki.poulose@....com>,
Will Deacon <will@...nel.org>, Paolo Bonzini <pbonzini@...hat.com>, Jonathan Corbet <corbet@....net>,
Shuah Khan <shuah@...nel.org>, Oliver Upton <oupton@...nel.org>, Dave Martin <Dave.Martin@....com>,
Mark Rutland <mark.rutland@....com>, Ben Horgan <ben.horgan@....com>,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org, linux-doc@...r.kernel.org,
linux-kselftest@...r.kernel.org, Peter Maydell <peter.maydell@...aro.org>,
Eric Auger <eric.auger@...hat.com>
Subject: Re: [PATCH v9 08/30] KVM: arm64: Move SVE state access macros after
feature test macros
On Tue, 23 Dec 2025 at 01:22, Mark Brown <broonie@...nel.org> wrote:
>
> In preparation for SME support move the macros used to access SVE state
> after the feature test macros, we will need to test for SME subfeatures to
> determine the size of the SME state.
>
> Signed-off-by: Mark Brown <broonie@...nel.org>
Reviewed-by: Fuad Tabba <tabba@...gle.com>
Cheers,
/fuad
> ---
> arch/arm64/include/asm/kvm_host.h | 50 +++++++++++++++++++--------------------
> 1 file changed, 25 insertions(+), 25 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index ac7f970c7883..e6d25db10a6b 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -1048,31 +1048,6 @@ struct kvm_vcpu_arch {
> #define NESTED_SERROR_PENDING __vcpu_single_flag(sflags, BIT(8))
>
>
> -/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
> -#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
> - sve_ffr_offset((vcpu)->arch.sve_max_vl))
> -
> -#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
> -
> -#define vcpu_sve_zcr_elx(vcpu) \
> - (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
> -
> -#define sve_state_size_from_vl(sve_max_vl) ({ \
> - size_t __size_ret; \
> - unsigned int __vq; \
> - \
> - if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \
> - __size_ret = 0; \
> - } else { \
> - __vq = sve_vq_from_vl(sve_max_vl); \
> - __size_ret = SVE_SIG_REGS_SIZE(__vq); \
> - } \
> - \
> - __size_ret; \
> -})
> -
> -#define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl)
> -
> #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
> KVM_GUESTDBG_USE_SW_BP | \
> KVM_GUESTDBG_USE_HW | \
> @@ -1108,6 +1083,31 @@ struct kvm_vcpu_arch {
>
> #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
>
> +/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
> +#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
> + sve_ffr_offset((vcpu)->arch.sve_max_vl))
> +
> +#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
> +
> +#define vcpu_sve_zcr_elx(vcpu) \
> + (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
> +
> +#define sve_state_size_from_vl(sve_max_vl) ({ \
> + size_t __size_ret; \
> + unsigned int __vq; \
> + \
> + if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \
> + __size_ret = 0; \
> + } else { \
> + __vq = sve_vq_from_vl(sve_max_vl); \
> + __size_ret = SVE_SIG_REGS_SIZE(__vq); \
> + } \
> + \
> + __size_ret; \
> +})
> +
> +#define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl)
> +
> /*
> * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
> * memory backed version of a register, and not the one most recently
>
> --
> 2.47.3
>
Powered by blists - more mailing lists