[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <afadfe85-413b-44fc-afa6-5770413a0488@intel.com>
Date: Mon, 10 Nov 2025 20:54:12 +0800
From: Xiaoyao Li <xiaoyao.li@...el.com>
To: Binbin Wu <binbin.wu@...ux.intel.com>, seanjc@...gle.com,
pbonzini@...hat.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, chao.gao@...el.com
Subject: Re: [PATCH v2] KVM: x86: Add a helper to dedup loading guest/host
XCR0 and XSS
On 11/10/2025 1:05 PM, Binbin Wu wrote:
> Add and use a helper, kvm_load_xfeatures(), to dedup the code that loads
> guest/host xfeatures.
>
> Opportunistically return early if X86_CR4_OSXSAVE is not set to reduce
> indentations.
>
> No functional change intended.
>
> Suggested-by: Chao Gao <chao.gao@...el.com>
> Reviewed-by: Chao Gao <chao.gao@...el.com>
> Signed-off-by: Binbin Wu <binbin.wu@...ux.intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@...el.com>
> ---
> v2:
> - Pass a bool to distinguish guest/host. [Chao, Xiaoyao]
> - Fix a typo in the short log. [Chao]
> - Opportunistically return early if X86_CR4_OSXSAVE is not set to reduce
> indentations.
>
> v1:
> - https://lore.kernel.org/kvm/20251106101138.2756175-1-binbin.wu@linux.intel.com
> ---
> arch/x86/kvm/x86.c | 33 ++++++++++-----------------------
> 1 file changed, 10 insertions(+), 23 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 9c2e28028c2b..2c521902e2c6 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1219,34 +1219,21 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
> }
> EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw);
>
> -static void kvm_load_guest_xfeatures(struct kvm_vcpu *vcpu)
> +static void kvm_load_xfeatures(struct kvm_vcpu *vcpu, bool load_guest)
> {
> if (vcpu->arch.guest_state_protected)
> return;
>
> - if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
> - if (vcpu->arch.xcr0 != kvm_host.xcr0)
> - xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
> -
> - if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
> - vcpu->arch.ia32_xss != kvm_host.xss)
> - wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss);
> - }
> -}
> -
> -static void kvm_load_host_xfeatures(struct kvm_vcpu *vcpu)
> -{
> - if (vcpu->arch.guest_state_protected)
> + if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE))
> return;
>
> - if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
> - if (vcpu->arch.xcr0 != kvm_host.xcr0)
> - xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
> + if (vcpu->arch.xcr0 != kvm_host.xcr0)
> + xsetbv(XCR_XFEATURE_ENABLED_MASK,
> + load_guest ? vcpu->arch.xcr0 : kvm_host.xcr0);
>
> - if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
> - vcpu->arch.ia32_xss != kvm_host.xss)
> - wrmsrq(MSR_IA32_XSS, kvm_host.xss);
> - }
> + if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
> + vcpu->arch.ia32_xss != kvm_host.xss)
> + wrmsrq(MSR_IA32_XSS, load_guest ? vcpu->arch.ia32_xss : kvm_host.xss);
> }
>
> static void kvm_load_guest_pkru(struct kvm_vcpu *vcpu)
> @@ -11333,7 +11320,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> if (vcpu->arch.guest_fpu.xfd_err)
> wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
>
> - kvm_load_guest_xfeatures(vcpu);
> + kvm_load_xfeatures(vcpu, true);
>
> if (unlikely(vcpu->arch.switch_db_regs &&
> !(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
> @@ -11429,7 +11416,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> vcpu->mode = OUTSIDE_GUEST_MODE;
> smp_wmb();
>
> - kvm_load_host_xfeatures(vcpu);
> + kvm_load_xfeatures(vcpu, false);
>
> /*
> * Sync xfd before calling handle_exit_irqoff() which may
>
> base-commit: 9052f4f6c539ea1fb7b282a34e6bb33154ce0b63
Powered by blists - more mailing lists