lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <27f345bd-2f2b-4e40-8601-d2bd7c12ce5e@linux.intel.com>
Date: Wed, 11 Jun 2025 15:09:05 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>, kvm@...r.kernel.org,
 linux-kernel@...r.kernel.org, Chao Gao <chao.gao@...el.com>,
 Borislav Petkov <bp@...en8.de>, Xin Li <xin@...or.com>,
 Dapeng Mi <dapeng1.mi@...ux.intel.com>,
 Francesco Lavra <francescolavra.fl@...il.com>,
 Manali Shukla <Manali.Shukla@....com>
Subject: Re: [PATCH v2 20/32] KVM: x86: Rename msr_filter_changed() =>
 recalc_msr_intercepts()



On 6/11/2025 6:57 AM, Sean Christopherson wrote:
> Rename msr_filter_changed() to recalc_msr_intercepts() and drop the
> trampoline wrapper now that both SVM and VMX use a filter-agnostic recalc
> helper to react to the new userspace filter.
>
> No functional change intended.
>
> Reviewed-by: Xin Li (Intel) <xin@...or.com>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>

Reviewed-by: Binbin Wu <binbin.wu@...ux.intel.com>

> ---
>   arch/x86/include/asm/kvm-x86-ops.h | 2 +-
>   arch/x86/include/asm/kvm_host.h    | 2 +-
>   arch/x86/kvm/svm/svm.c             | 8 +-------
>   arch/x86/kvm/vmx/main.c            | 6 +++---
>   arch/x86/kvm/vmx/vmx.c             | 7 +------
>   arch/x86/kvm/vmx/x86_ops.h         | 2 +-
>   arch/x86/kvm/x86.c                 | 8 +++++++-
>   7 files changed, 15 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 8d50e3e0a19b..19a6735d6dd8 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -139,7 +139,7 @@ KVM_X86_OP(check_emulate_instruction)
>   KVM_X86_OP(apic_init_signal_blocked)
>   KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
>   KVM_X86_OP_OPTIONAL(migrate_timers)
> -KVM_X86_OP(msr_filter_changed)
> +KVM_X86_OP(recalc_msr_intercepts)
>   KVM_X86_OP(complete_emulated_msr)
>   KVM_X86_OP(vcpu_deliver_sipi_vector)
>   KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 330cdcbed1a6..89a626e5b80f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1885,7 +1885,7 @@ struct kvm_x86_ops {
>   	int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
>   
>   	void (*migrate_timers)(struct kvm_vcpu *vcpu);
> -	void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
> +	void (*recalc_msr_intercepts)(struct kvm_vcpu *vcpu);
>   	int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
>   
>   	void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index de3d59c71229..710bc5f965dc 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -896,11 +896,6 @@ static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
>   	 */
>   }
>   
> -static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
> -{
> -	svm_recalc_msr_intercepts(vcpu);
> -}
> -
>   void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
>   {
>   	to_vmcb->save.dbgctl		= from_vmcb->save.dbgctl;
> @@ -929,7 +924,6 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
>   	struct vcpu_svm *svm = to_svm(vcpu);
>   
>   	KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
> -
>   	svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
>   	svm_recalc_lbr_msr_intercepts(vcpu);
>   
> @@ -5227,7 +5221,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
>   
>   	.apic_init_signal_blocked = svm_apic_init_signal_blocked,
>   
> -	.msr_filter_changed = svm_msr_filter_changed,
> +	.recalc_msr_intercepts = svm_recalc_msr_intercepts,
>   	.complete_emulated_msr = svm_complete_emulated_msr,
>   
>   	.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index d1e02e567b57..b3c58731a2f5 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -220,7 +220,7 @@ static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   	return vmx_get_msr(vcpu, msr_info);
>   }
>   
> -static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
> +static void vt_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
>   {
>   	/*
>   	 * TDX doesn't allow VMM to configure interception of MSR accesses.
> @@ -231,7 +231,7 @@ static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
>   	if (is_td_vcpu(vcpu))
>   		return;
>   
> -	vmx_msr_filter_changed(vcpu);
> +	vmx_recalc_msr_intercepts(vcpu);
>   }
>   
>   static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
> @@ -1034,7 +1034,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
>   	.apic_init_signal_blocked = vt_op(apic_init_signal_blocked),
>   	.migrate_timers = vmx_migrate_timers,
>   
> -	.msr_filter_changed = vt_op(msr_filter_changed),
> +	.recalc_msr_intercepts = vt_op(recalc_msr_intercepts),
>   	.complete_emulated_msr = vt_op(complete_emulated_msr),
>   
>   	.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index ce7a1c07e402..bdff81f8288d 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -4074,7 +4074,7 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
>   	}
>   }
>   
> -static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
> +void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
>   {
>   	if (!cpu_has_vmx_msr_bitmap())
>   		return;
> @@ -4123,11 +4123,6 @@ static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
>   	 */
>   }
>   
> -void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
> -{
> -	vmx_recalc_msr_intercepts(vcpu);
> -}
> -
>   static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
>   						int vector)
>   {
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index b4596f651232..34c6e683e321 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -52,7 +52,7 @@ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
>   			   int trig_mode, int vector);
>   void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
>   bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
> -void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
> +void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
>   void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
>   void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
>   int vmx_get_feature_msr(u32 msr, u64 *data);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index dd34a2ec854c..cc9a01b6dbc8 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -10926,8 +10926,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>   			kvm_vcpu_update_apicv(vcpu);
>   		if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
>   			kvm_check_async_pf_completion(vcpu);
> +
> +		/*
> +		 * Recalc MSR intercepts as userspace may want to intercept
> +		 * accesses to MSRs that KVM would otherwise pass through to
> +		 * the guest.
> +		 */
>   		if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
> -			kvm_x86_call(msr_filter_changed)(vcpu);
> +			kvm_x86_call(recalc_msr_intercepts)(vcpu);
>   
>   		if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
>   			kvm_x86_call(update_cpu_dirty_logging)(vcpu);


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ