lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251205123940.GY2528459@noisy.programming.kicks-ass.net>
Date: Fri, 5 Dec 2025 13:39:40 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Dapeng Mi <dapeng1.mi@...ux.intel.com>
Cc: Ingo Molnar <mingo@...hat.com>,
	Arnaldo Carvalho de Melo <acme@...nel.org>,
	Namhyung Kim <namhyung@...nel.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	Ian Rogers <irogers@...gle.com>,
	Adrian Hunter <adrian.hunter@...el.com>,
	Jiri Olsa <jolsa@...nel.org>,
	Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
	Andi Kleen <ak@...ux.intel.com>,
	Eranian Stephane <eranian@...gle.com>,
	Mark Rutland <mark.rutland@....com>, broonie@...nel.org,
	Ravi Bangoria <ravi.bangoria@....com>, linux-kernel@...r.kernel.org,
	linux-perf-users@...r.kernel.org, Zide Chen <zide.chen@...el.com>,
	Falcon Thomas <thomas.falcon@...el.com>,
	Dapeng Mi <dapeng1.mi@...el.com>, Xudong Hao <xudong.hao@...el.com>
Subject: Re: [Patch v5 16/19] perf/x86: Activate back-to-back NMI detection
 for arch-PEBS induced NMIs

On Wed, Dec 03, 2025 at 02:54:57PM +0800, Dapeng Mi wrote:
> When two or more identical PEBS events with the same sampling period are
> programmed on a mix of PDIST and non-PDIST counters, multiple
> back-to-back NMIs can be triggered.

This is a hardware defect -- albeit a fairly common one.


> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
> index da48bcde8fce..a130d3f14844 100644
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -3351,8 +3351,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
>  	 */
>  	if (__test_and_clear_bit(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT,
>  				 (unsigned long *)&status)) {
> -		handled++;
> -		static_call(x86_pmu_drain_pebs)(regs, &data);
> +		handled += static_call(x86_pmu_drain_pebs)(regs, &data);
>  
>  		if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
>  		    is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))

Note that the old code would return handled++, while the new code:

> diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
> index a01c72c03bd6..c7cdcd585574 100644
> --- a/arch/x86/events/intel/ds.c
> +++ b/arch/x86/events/intel/ds.c
> @@ -2759,7 +2759,7 @@ __intel_pmu_pebs_events(struct perf_event *event,
>  	__intel_pmu_pebs_last_event(event, iregs, regs, data, at, count, setup_sample);
>  }
>  
> -static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
> +static int intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
>  {
>  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>  	struct debug_store *ds = cpuc->ds;
> @@ -2768,7 +2768,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_
>  	int n;
>  
>  	if (!x86_pmu.pebs_active)
> -		return;
> +		return 0;
>  
>  	at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
>  	top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
> @@ -2779,22 +2779,24 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_
>  	ds->pebs_index = ds->pebs_buffer_base;
>  
>  	if (!test_bit(0, cpuc->active_mask))
> -		return;
> +		return 0;
>  
>  	WARN_ON_ONCE(!event);
>  
>  	if (!event->attr.precise_ip)
> -		return;
> +		return 0;
>  
>  	n = top - at;
>  	if (n <= 0) {
>  		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
>  			intel_pmu_save_and_restart_reload(event, 0);
> -		return;
> +		return 0;
>  	}
>  
>  	__intel_pmu_pebs_events(event, iregs, data, at, top, 0, n,
>  				setup_pebs_fixed_sample_data);
> +
> +	return 0;
>  }
>  
>  static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask)
> @@ -2817,7 +2819,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64
>  	}
>  }
>  
> -static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
> +static int intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
>  {
>  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>  	struct debug_store *ds = cpuc->ds;
> @@ -2830,7 +2832,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
>  	u64 mask;
>  
>  	if (!x86_pmu.pebs_active)
> -		return;
> +		return 0;
>  
>  	base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
>  	top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
> @@ -2846,7 +2848,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
>  
>  	if (unlikely(base >= top)) {
>  		intel_pmu_pebs_event_update_no_drain(cpuc, mask);
> -		return;
> +		return 0;
>  	}
>  
>  	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
> @@ -2931,6 +2933,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
>  						setup_pebs_fixed_sample_data);
>  		}
>  	}
> +
> +	return 0;
>  }
>  
>  static __always_inline void
> @@ -2984,7 +2988,7 @@ __intel_pmu_handle_last_pebs_record(struct pt_regs *iregs,
>  
>  }
>  
> -static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
> +static int intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
>  {
>  	short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
>  	void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS];
> @@ -2997,7 +3001,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
>  	u64 mask;
>  
>  	if (!x86_pmu.pebs_active)
> -		return;
> +		return 0;
>  
>  	base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
>  	top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
> @@ -3010,7 +3014,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
>  
>  	if (unlikely(base >= top)) {
>  		intel_pmu_pebs_event_update_no_drain(cpuc, mask);
> -		return;
> +		return 0;
>  	}
>  
>  	if (!iregs)
> @@ -3032,9 +3036,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
>  
>  	__intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, counts, last,
>  					    setup_pebs_adaptive_sample_data);
> +
> +	return 0;
>  }

will now return handled+=0 for all these. Which is a change in
behaviour. Also:

> -static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs,
> +static int intel_pmu_drain_arch_pebs(struct pt_regs *iregs,
>  				      struct perf_sample_data *data)
>  {
>  	short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
> @@ -3044,13 +3050,14 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs,
>  	struct x86_perf_regs perf_regs;
>  	struct pt_regs *regs = &perf_regs.regs;
>  	void *base, *at, *top;
> +	u64 events_bitmap = 0;
>  	u64 mask;
>  
>  	rdmsrq(MSR_IA32_PEBS_INDEX, index.whole);
>  
>  	if (unlikely(!index.wr)) {
>  		intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX);
> -		return;
> +		return 0;
>  	}
>  
>  	base = cpuc->pebs_vaddr;
> @@ -3089,6 +3096,7 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs,
>  
>  		basic = at + sizeof(struct arch_pebs_header);
>  		pebs_status = mask & basic->applicable_counters;
> +		events_bitmap |= pebs_status;
>  		__intel_pmu_handle_pebs_record(iregs, regs, data, at,
>  					       pebs_status, counts, last,
>  					       setup_arch_pebs_sample_data);
> @@ -3108,6 +3116,8 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs,
>  	__intel_pmu_handle_last_pebs_record(iregs, regs, data, mask,
>  					    counts, last,
>  					    setup_arch_pebs_sample_data);
> +
	/*
	 * Comment that explains the arch pebs defect goes here.
	 */
> +	return hweight64(events_bitmap);
>  }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ