lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu,  8 Dec 2016 16:27:11 -0500
From:   kan.liang@...el.com
To:     peterz@...radead.org, mingo@...hat.com, acme@...nel.org,
        linux-kernel@...r.kernel.org
Cc:     alexander.shishkin@...ux.intel.com, tglx@...utronix.de,
        namhyung@...nel.org, jolsa@...nel.org, adrian.hunter@...el.com,
        wangnan0@...wei.com, mark.rutland@....com, andi@...stfloor.org,
        Kan Liang <kan.liang@...el.com>
Subject: [PATCH V3 3/6] perf/x86: implement overhead stat for x86 pmu

From: Kan Liang <kan.liang@...el.com>

In STAT_START, resets overhead counter for each possible cpuctx of event
pmu.
In STAT_DONE, generate overhead information for each possible cpuctx and
reset the overhead counteris.

Signed-off-by: Kan Liang <kan.liang@...el.com>
---
 arch/x86/events/core.c | 37 +++++++++++++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 6e395c9..09ab36a 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2198,6 +2198,40 @@ static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
 		x86_pmu.sched_task(ctx, sched_in);
 }
 
+static int x86_pmu_stat(struct perf_event *event, u32 flag)
+{
+	struct perf_cpu_context *cpuctx;
+	struct pmu *pmu = event->pmu;
+	int cpu, i;
+
+	if (!(flag & (PERF_IOC_FLAG_STAT_START | PERF_IOC_FLAG_STAT_DONE)))
+		return -EINVAL;
+
+	if (!event->attr.overhead)
+		return -EINVAL;
+
+	if (flag & PERF_IOC_FLAG_STAT_DONE) {
+		for_each_possible_cpu(cpu) {
+			cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+
+			for (i = 0; i < PERF_OVERHEAD_MAX; i++) {
+				if (!cpuctx->overhead[i].nr)
+					continue;
+				perf_log_overhead(event, i, cpu,
+						  cpuctx->overhead[i].nr,
+						  cpuctx->overhead[i].time);
+			}
+		}
+	}
+
+	for_each_possible_cpu(cpu) {
+		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+		memset(cpuctx->overhead, 0, PERF_OVERHEAD_MAX * sizeof(struct perf_overhead_entry));
+	}
+
+	return 0;
+}
+
 void perf_check_microcode(void)
 {
 	if (x86_pmu.check_microcode)
@@ -2228,6 +2262,9 @@ static struct pmu pmu = {
 
 	.event_idx		= x86_pmu_event_idx,
 	.sched_task		= x86_pmu_sched_task,
+
+	.stat			= x86_pmu_stat,
+
 	.task_ctx_size          = sizeof(struct x86_perf_task_context),
 };
 
-- 
2.4.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ