[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c4907689ab89cf783d000f88f0c1c123ac97b26a.camel@redhat.com>
Date: Wed, 11 May 2022 14:34:19 +0300
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>, kvm@...r.kernel.org,
Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Michael Kelley <mikelley@...rosoft.com>,
Siddharth Chandrasekaran <sidcha@...zon.de>,
linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 23/34] KVM: selftests: Better XMM read/write helpers
On Thu, 2022-04-14 at 15:20 +0200, Vitaly Kuznetsov wrote:
> set_xmm()/get_xmm() helpers are fairly useless as they only read 64 bits
> from 128-bit registers. Moreover, these helpers are not used. Borrow
> _kvm_read_sse_reg()/_kvm_write_sse_reg() from KVM limiting them to
> XMM0-XMM8 for now.
>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---
> .../selftests/kvm/include/x86_64/processor.h | 70 ++++++++++---------
> 1 file changed, 36 insertions(+), 34 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
> index 37db341d4cc5..9ad7602a257b 100644
> --- a/tools/testing/selftests/kvm/include/x86_64/processor.h
> +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
> @@ -296,71 +296,73 @@ static inline void cpuid(uint32_t *eax, uint32_t *ebx,
> : "memory");
> }
>
> -#define SET_XMM(__var, __xmm) \
> - asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)
> +typedef u32 __attribute__((vector_size(16))) sse128_t;
> +#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
> +#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
> +#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
>
> -static inline void set_xmm(int n, unsigned long val)
> +static inline void read_sse_reg(int reg, sse128_t *data)
> {
> - switch (n) {
> + switch (reg) {
> case 0:
> - SET_XMM(val, xmm0);
> + asm("movdqa %%xmm0, %0" : "=m"(*data));
> break;
> case 1:
> - SET_XMM(val, xmm1);
> + asm("movdqa %%xmm1, %0" : "=m"(*data));
> break;
> case 2:
> - SET_XMM(val, xmm2);
> + asm("movdqa %%xmm2, %0" : "=m"(*data));
> break;
> case 3:
> - SET_XMM(val, xmm3);
> + asm("movdqa %%xmm3, %0" : "=m"(*data));
> break;
> case 4:
> - SET_XMM(val, xmm4);
> + asm("movdqa %%xmm4, %0" : "=m"(*data));
> break;
> case 5:
> - SET_XMM(val, xmm5);
> + asm("movdqa %%xmm5, %0" : "=m"(*data));
> break;
> case 6:
> - SET_XMM(val, xmm6);
> + asm("movdqa %%xmm6, %0" : "=m"(*data));
> break;
> case 7:
> - SET_XMM(val, xmm7);
> + asm("movdqa %%xmm7, %0" : "=m"(*data));
> break;
> + default:
> + BUG();
> }
> }
>
> -#define GET_XMM(__xmm) \
> -({ \
> - unsigned long __val; \
> - asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
> - __val; \
> -})
> -
> -static inline unsigned long get_xmm(int n)
> +static inline void write_sse_reg(int reg, const sse128_t *data)
> {
> - assert(n >= 0 && n <= 7);
> -
> - switch (n) {
> + switch (reg) {
> case 0:
> - return GET_XMM(xmm0);
> + asm("movdqa %0, %%xmm0" : : "m"(*data));
> + break;
> case 1:
> - return GET_XMM(xmm1);
> + asm("movdqa %0, %%xmm1" : : "m"(*data));
> + break;
> case 2:
> - return GET_XMM(xmm2);
> + asm("movdqa %0, %%xmm2" : : "m"(*data));
> + break;
> case 3:
> - return GET_XMM(xmm3);
> + asm("movdqa %0, %%xmm3" : : "m"(*data));
> + break;
> case 4:
> - return GET_XMM(xmm4);
> + asm("movdqa %0, %%xmm4" : : "m"(*data));
> + break;
> case 5:
> - return GET_XMM(xmm5);
> + asm("movdqa %0, %%xmm5" : : "m"(*data));
> + break;
> case 6:
> - return GET_XMM(xmm6);
> + asm("movdqa %0, %%xmm6" : : "m"(*data));
> + break;
> case 7:
> - return GET_XMM(xmm7);
> + asm("movdqa %0, %%xmm7" : : "m"(*data));
> + break;
> + default:
> + BUG();
> }
> -
> - /* never reached */
> - return 0;
> }
>
> static inline void cpu_relax(void)
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists