lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 16 Nov 2023 15:19:23 +0800
From:   WANG Xuerui <kernel@...0n.name>
To:     Tianrui Zhao <zhaotianrui@...ngson.cn>,
        linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:     Paolo Bonzini <pbonzini@...hat.com>,
        Huacai Chen <chenhuacai@...nel.org>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        loongarch@...ts.linux.dev, Jens Axboe <axboe@...nel.dk>,
        Mark Brown <broonie@...nel.org>,
        Alex Deucher <alexander.deucher@....com>,
        Oliver Upton <oliver.upton@...ux.dev>, maobibo@...ngson.cn,
        Xi Ruoyao <xry111@...111.site>
Subject: Re: [PATCH v1 2/2] LoongArch: KVM: Add lasx support

On 11/15/23 17:19, Tianrui Zhao wrote:
> This patch adds LASX support for LoongArch KVM. The LASX means
> LoongArch 256-bits vector instruction.
> There will be LASX exception in KVM when guest use the LASX
> instruction. KVM will enable LASX and restore the vector
> registers for guest then return to guest to continue running.
>
> Signed-off-by: Tianrui Zhao <zhaotianrui@...ngson.cn>
> ---
>   arch/loongarch/include/asm/kvm_host.h |  6 ++++
>   arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++++
>   arch/loongarch/kernel/fpu.S           |  1 +
>   arch/loongarch/kvm/exit.c             | 18 +++++++++++
>   arch/loongarch/kvm/switch.S           | 16 ++++++++++
>   arch/loongarch/kvm/trace.h            |  4 ++-
>   arch/loongarch/kvm/vcpu.c             | 43 ++++++++++++++++++++++++++-
>   7 files changed, 96 insertions(+), 2 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> index 6c65c25169..4c05b5eca0 100644
> --- a/arch/loongarch/include/asm/kvm_host.h
> +++ b/arch/loongarch/include/asm/kvm_host.h
> @@ -95,6 +95,7 @@ enum emulation_result {
>   #define KVM_LARCH_SWCSR_LATEST	(0x1 << 1)
>   #define KVM_LARCH_HWCSR_USABLE	(0x1 << 2)
>   #define KVM_LARCH_LSX		(0x1 << 3)
> +#define KVM_LARCH_LASX		(0x1 << 4)
>   
>   struct kvm_vcpu_arch {
>   	/*
> @@ -181,6 +182,11 @@ static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
>   	return arch->cpucfg[2] & CPUCFG2_LSX;
>   }
>   
> +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
> +{
> +	return arch->cpucfg[2] & CPUCFG2_LASX;
> +}
> +
>   /* Debug: dump vcpu state */
>   int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>   
> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
> index c629771e12..4f87f16018 100644
> --- a/arch/loongarch/include/asm/kvm_vcpu.h
> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
> @@ -67,6 +67,16 @@ static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
>   static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu) { }
>   #endif
>   
> +#ifdef CONFIG_CPU_HAS_LASX
> +void kvm_own_lasx(struct kvm_vcpu *vcpu);
> +void kvm_save_lasx(struct loongarch_fpu *fpu);
> +void kvm_restore_lasx(struct loongarch_fpu *fpu);
> +#else
> +static inline void kvm_own_lasx(struct kvm_vcpu *vcpu) { }
> +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
> +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
> +#endif
> +
>   void kvm_acquire_timer(struct kvm_vcpu *vcpu);
>   void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>   void kvm_reset_timer(struct kvm_vcpu *vcpu);
> diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
> index d53ab10f46..f4524fe866 100644
> --- a/arch/loongarch/kernel/fpu.S
> +++ b/arch/loongarch/kernel/fpu.S
> @@ -384,6 +384,7 @@ SYM_FUNC_START(_restore_lasx_upper)
>   	lasx_restore_all_upper a0 t0 t1
>   	jr	ra
>   SYM_FUNC_END(_restore_lasx_upper)
> +EXPORT_SYMBOL(_restore_lasx_upper)

Why the added export? It doesn't seem necessary, given the previous 
patch doesn't have a similar export added for _restore_lsx_upper. (Or if 
it's truly needed it should probably become EXPORT_SYMBOL_GPL.)

-- 
WANG "xen0n" Xuerui

Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ