lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <06607e2f-f785-4d62-9bdd-9874aa665341@linux.intel.com>
Date: Wed, 6 Aug 2025 15:25:08 +0800
From: "Mi, Dapeng" <dapeng1.mi@...ux.intel.com>
To: Sean Christopherson <seanjc@...gle.com>,
 Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, Xin Li
 <xin@...or.com>, Sandipan Das <sandipan.das@....com>
Subject: Re: [PATCH 10/18] KVM: x86/pmu: Add wrappers for counting emulated
 instructions/branches


On 8/6/2025 3:05 AM, Sean Christopherson wrote:
> Add wrappers for triggering instruction retired and branch retired PMU
> events in anticipation of reworking the internal mechanisms to track
> which PMCs need to be evaluated, e.g. to avoid having to walk and check
> every PMC.
>
> Opportunistically bury "struct kvm_pmu_emulated_event_selectors" in pmu.c.
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
>  arch/x86/kvm/pmu.c        | 22 ++++++++++++++++++----
>  arch/x86/kvm/pmu.h        |  9 ++-------
>  arch/x86/kvm/vmx/nested.c |  2 +-
>  arch/x86/kvm/x86.c        |  6 +++---
>  4 files changed, 24 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index eb17d90916ea..e1911b366c43 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -29,8 +29,11 @@
>  struct x86_pmu_capability __read_mostly kvm_pmu_cap;
>  EXPORT_SYMBOL_GPL(kvm_pmu_cap);
>  
> -struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
> -EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
> +struct kvm_pmu_emulated_event_selectors {
> +	u64 INSTRUCTIONS_RETIRED;
> +	u64 BRANCH_INSTRUCTIONS_RETIRED;
> +};
> +static struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
>  
>  /* Precise Distribution of Instructions Retired (PDIR) */
>  static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
> @@ -907,7 +910,7 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
>  							 select_user;
>  }
>  
> -void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
> +static void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
>  {
>  	DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
>  	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
> @@ -944,7 +947,18 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
>  		kvm_pmu_incr_counter(pmc);
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
> +
> +void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu)
> +{
> +	kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
> +}
> +EXPORT_SYMBOL_GPL(kvm_pmu_instruction_retired);
> +
> +void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu)
> +{
> +	kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
> +}
> +EXPORT_SYMBOL_GPL(kvm_pmu_branch_retired);
>  
>  static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
>  {
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index 13477066eb40..740af816af37 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -23,11 +23,6 @@
>  
>  #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
>  
> -struct kvm_pmu_emulated_event_selectors {
> -	u64 INSTRUCTIONS_RETIRED;
> -	u64 BRANCH_INSTRUCTIONS_RETIRED;
> -};
> -
>  struct kvm_pmu_ops {
>  	struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
>  		unsigned int idx, u64 *mask);
> @@ -178,7 +173,6 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
>  }
>  
>  extern struct x86_pmu_capability kvm_pmu_cap;
> -extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
>  
>  void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops);
>  
> @@ -227,7 +221,8 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu);
>  void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
>  void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
>  int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
> -void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
> +void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu);
> +void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu);
>  
>  bool is_vmware_backdoor_pmc(u32 pmc_idx);
>  
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index b8ea1969113d..db2fd4eedc90 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3690,7 +3690,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
>  		return 1;
>  	}
>  
> -	kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
> +	kvm_pmu_branch_retired(vcpu);
>  
>  	if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
>  		return nested_vmx_failInvalid(vcpu);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index a4441f036929..f2b2eaaec6f8 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -8824,7 +8824,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
>  	if (unlikely(!r))
>  		return 0;
>  
> -	kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
> +	kvm_pmu_instruction_retired(vcpu);
>  
>  	/*
>  	 * rflags is the old, "raw" value of the flags.  The new value has
> @@ -9158,9 +9158,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>  		 */
>  		if (!ctxt->have_exception ||
>  		    exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
> -			kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
> +			kvm_pmu_instruction_retired(vcpu);
>  			if (ctxt->is_branch)
> -				kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
> +				kvm_pmu_branch_retired(vcpu);
>  			kvm_rip_write(vcpu, ctxt->eip);
>  			if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
>  				r = kvm_vcpu_do_singlestep(vcpu);

LGTM. Reviewed-by: Dapeng Mi <dapeng1.mi@...ux.intel.com>



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ