lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20161216080745.GA6109@krava>
Date:   Fri, 16 Dec 2016 09:07:45 +0100
From:   Jiri Olsa <jolsa@...hat.com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     kbuild-all@...org, Peter Zijlstra <peterz@...radead.org>,
        kbuild test robot <lkp@...el.com>,
        Andi Kleen <andi@...stfloor.org>,
        lkml <linux-kernel@...r.kernel.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Vince Weaver <vince@...ter.net>, Ingo Molnar <mingo@...nel.org>
Subject: [PATCHv3] perf/x86/intel: Account interrupts for PEBS errors

On Fri, Dec 16, 2016 at 07:41:04AM +0800, kbuild test robot wrote:
> Hi Jiri,
> 
> [auto build test WARNING on tip/perf/core]
> [also build test WARNING on v4.9 next-20161215]
> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
> 
> url:    https://github.com/0day-ci/linux/commits/Jiri-Olsa/perf-x86-intel-Account-interrupts-for-PEBS-errors/20161216-042644
> config: x86_64-randconfig-a0-12160640 (attached as .config)
> compiler: gcc-4.4 (Debian 4.4.7-8) 4.4.7
> reproduce:
>         # save the attached .config to linux build tree
>         make ARCH=x86_64 
> 
> All warnings (new ones prefixed by >>):
> 
>    kernel/events/core.c: In function '__perf_event_overflow':
> >> kernel/events/core.c:7086: warning: unused variable 'hwc'
>    kernel/events/core.o: warning: objtool: perf_try_init_event()+0x43: function has unreachable instruction
> 
> vim +/hwc +7086 kernel/events/core.c
> 
>   7070	}
>   7071	
>   7072	int perf_event_account_interrupt(struct perf_event *event)
>   7073	{
>   7074		return __perf_event_account_interrupt(event, 1);
>   7075	}
>   7076	
>   7077	/*
>   7078	 * Generic event overflow handling, sampling.
>   7079	 */
>   7080	
>   7081	static int __perf_event_overflow(struct perf_event *event,
>   7082					   int throttle, struct perf_sample_data *data,
>   7083					   struct pt_regs *regs)
>   7084	{
>   7085		int events = atomic_read(&event->event_limit);
> > 7086		struct hw_perf_event *hwc = &event->hw;
>   7087		int ret = 0;
>   7088	

ugh, I guess I'm too used to the perf tool to fail build on unused variable :-\

v3 attached

thanks,
jirka


---
It's possible to setup PEBS events and get only errors and not
a single data, like on SNB-X (model 45) and IVB-EP (model 62)
via 2 perf commands running simultaneously:

    taskset -c 1 ./perf record -c 4 -e branches:pp -j any -C 10

This leads to soft lock up, because the error path of the
intel_pmu_drain_pebs_nhm does not account event->hw.interrupt
for error PEBS interrupts so the event is not eventually
stopped when it gets over the max_samples_per_tick limit.

  NMI watchdog: BUG: soft lockup - CPU#22 stuck for 22s! [perf_fuzzer:5816]
  ...
  task: ffff880273148000 task.stack: ffffc90002d58000
  RIP: 0010:[<ffffffff81159232>]  [<ffffffff81159232>] smp_call_function_single+0xe2/0x140
  RSP: 0018:ffffc90002d5bd60  EFLAGS: 00000202
  ...
  Call Trace:
   ? trace_hardirqs_on_caller+0xf5/0x1b0
   ? perf_cgroup_attach+0x70/0x70
   perf_install_in_context+0x199/0x1b0
   ? ctx_resched+0x90/0x90
   SYSC_perf_event_open+0x641/0xf90
   SyS_perf_event_open+0x9/0x10
   do_syscall_64+0x6c/0x1f0
   entry_SYSCALL64_slow_path+0x25/0x25

Adding  perf_event_account_interrupt with event's interrupt
and frequency checks and calling it from drain_pebs's error
path.

Keeping pending_kill and pending_wakeup check up logic only
in __perf_event_overflow path, because they make sense only
in case if there's any data to deliver.

Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
 arch/x86/events/intel/ds.c |  6 +++++-
 include/linux/perf_event.h |  1 +
 kernel/events/core.c       | 46 ++++++++++++++++++++++++++++++----------------
 3 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index be202390bbd3..9dfeeeca0ea8 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 			continue;
 
 		/* log dropped samples number */
-		if (error[bit])
+		if (error[bit]) {
 			perf_log_lost_samples(event, error[bit]);
 
+			if (perf_event_account_interrupt(event))
+				x86_pmu_stop(event, 0);
+		}
+
 		if (counts[bit]) {
 			__intel_pmu_pebs_event(event, iregs, base,
 					       top, bit, counts[bit]);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4741ecdb9817..78ed8105e64d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_disable_local(struct perf_event *event);
 extern void perf_event_disable_inatomic(struct perf_event *event);
 extern void perf_event_task_tick(void);
+extern int perf_event_account_interrupt(struct perf_event *event);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
 perf_aux_output_begin(struct perf_output_handle *handle,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 02c8421f8c01..bb24d5abc40a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7034,25 +7034,11 @@ static void perf_log_itrace_start(struct perf_event *event)
 	perf_output_end(&handle);
 }
 
-/*
- * Generic event overflow handling, sampling.
- */
-
-static int __perf_event_overflow(struct perf_event *event,
-				   int throttle, struct perf_sample_data *data,
-				   struct pt_regs *regs)
+static int __perf_event_account_interrupt(struct perf_event *event, int throttle)
 {
-	int events = atomic_read(&event->event_limit);
 	struct hw_perf_event *hwc = &event->hw;
-	u64 seq;
 	int ret = 0;
-
-	/*
-	 * Non-sampling counters might still use the PMI to fold short
-	 * hardware counters, ignore those.
-	 */
-	if (unlikely(!is_sampling_event(event)))
-		return 0;
+	u64 seq;
 
 	seq = __this_cpu_read(perf_throttled_seq);
 	if (seq != hwc->interrupts_seq) {
@@ -7080,6 +7066,34 @@ static int __perf_event_overflow(struct perf_event *event,
 			perf_adjust_period(event, delta, hwc->last_period, true);
 	}
 
+	return ret;
+}
+
+int perf_event_account_interrupt(struct perf_event *event)
+{
+	return __perf_event_account_interrupt(event, 1);
+}
+
+/*
+ * Generic event overflow handling, sampling.
+ */
+
+static int __perf_event_overflow(struct perf_event *event,
+				   int throttle, struct perf_sample_data *data,
+				   struct pt_regs *regs)
+{
+	int events = atomic_read(&event->event_limit);
+	int ret = 0;
+
+	/*
+	 * Non-sampling counters might still use the PMI to fold short
+	 * hardware counters, ignore those.
+	 */
+	if (unlikely(!is_sampling_event(event)))
+		return 0;
+
+	ret = __perf_event_account_interrupt(event, throttle);
+
 	/*
 	 * XXX event_limit might not quite work as expected on inherited
 	 * events
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ