[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1454056197-5893-3-git-send-email-ray.huang@amd.com>
Date: Fri, 29 Jan 2016 16:29:57 +0800
From: Huang Rui <ray.huang@....com>
To: Borislav Petkov <bp@...e.de>,
Peter Zijlstra <peterz@...radead.org>,
"Ingo Molnar" <mingo@...nel.org>,
Andy Lutomirski <luto@...capital.net>,
"Thomas Gleixner" <tglx@...utronix.de>,
Robert Richter <rric@...nel.org>,
Jacob Shin <jacob.w.shin@...il.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
"Kan Liang" <kan.liang@...el.com>
CC: <linux-kernel@...r.kernel.org>, <spg_linux_kernel@....com>,
<x86@...nel.org>,
Suravee Suthikulpanit <suravee.suthikulpanit@....com>,
Aravind Gopalakrishnan <Aravind.Gopalakrishnan@....com>,
Borislav Petkov <bp@...en8.de>,
Fengguang Wu <fengguang.wu@...el.com>,
Huang Rui <ray.huang@....com>
Subject: [PATCH 2/2] perf/x86/msr: Add AMD instructions retired performance counter
AMD Zeppelin (Family 17h, Model 00h) introduces an instructions
retired performance counter which indicated by
CPUID.8000_0008H:EBX[1]. And dedicated Instructions Retired register
(MSR 0xC000_000E9) increments on once for every instruction retired.
Signed-off-by: Huang Rui <ray.huang@....com>
---
arch/x86/include/asm/cpufeature.h | 1 +
arch/x86/include/asm/msr-index.h | 3 +++
arch/x86/kernel/cpu/perf_event_msr.c | 30 +++++++++++++++++++-----------
3 files changed, 23 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 1f9d682..a0ec7cc 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -259,6 +259,7 @@
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
+#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
/*
* BUG word(s)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 0e873e3..982dcda 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -308,6 +308,9 @@
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+/* Fam 17h MSRs */
+#define MSR_F17H_IRPERF 0xc00000e9
+
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index 6f6772f..7111400 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -7,6 +7,7 @@ enum perf_msr_id {
PERF_MSR_PPERF = 3,
PERF_MSR_SMI = 4,
PERF_MSR_PTSC = 5,
+ PERF_MSR_IRPERF = 6,
PERF_MSR_EVENT_MAX,
};
@@ -21,6 +22,11 @@ static bool test_ptsc(int idx)
return boot_cpu_has(X86_FEATURE_PTSC);
}
+static bool test_irperf(int idx)
+{
+ return boot_cpu_has(X86_FEATURE_IRPERF);
+}
+
static bool test_intel(int idx)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
@@ -75,20 +81,22 @@ struct perf_msr {
bool (*test)(int idx);
};
-PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
-PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
-PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
-PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
-PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
-PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05");
+PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
+PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
+PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
+PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
+PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
+PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05");
+PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
static struct perf_msr msr[] = {
- [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
- [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
- [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
- [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
- [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
+ [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
+ [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
+ [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
+ [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
+ [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
[PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
+ [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
};
static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
--
1.9.1
Powered by blists - more mailing lists