[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20110506091950.GA5081@elte.hu>
Date: Fri, 6 May 2011 11:19:50 +0200
From: Ingo Molnar <mingo@...e.hu>
To: Lin Ming <ming.m.lin@...el.com>
Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>,
linux-kernel <linux-kernel@...r.kernel.org>,
Mike Galbraith <efault@....de>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Frédéric Weisbecker <fweisbec@...il.com>,
Steven Rostedt <rostedt@...dmis.org>
Subject: [PATCH] perf events, x86: Implement Sandybridge last-level cache
events
Btw., there's another missing Intel SandyBridge related perf events feature as
well which was not implemented with the Intel offcore bits.
Peter did a raw first cut - entirely untested, see it below. Would you be
interested in testing it on Intel SandyBridge hw and sending (the working
version) to lkml with your Signed-off-by if the events looks good to you in
some real tests (i.e. are counting real LL cache events)?
Thanks,
Ingo
------------------->
Subject: perf events, x86: Implement Sandybridge last-level cache events
From: Peter Zijlstra <peterz@...radead.org>
Date: Fri May 06 11:15:30 CEST 2011
Signed-off-by: Ingo Molnar <mingo@...e.hu>
---
arch/x86/kernel/cpu/perf_event_intel.c | 68 +++++++++++++++++++++++++++++++++
1 file changed, 68 insertions(+)
Index: tip/arch/x86/kernel/cpu/perf_event_intel.c
===================================================================
--- tip.orig/arch/x86/kernel/cpu/perf_event_intel.c
+++ tip/arch/x86/kernel/cpu/perf_event_intel.c
@@ -150,6 +150,72 @@ static u64 intel_pmu_event_map(int hw_ev
return intel_perfmon_event_map[hw_event];
}
+/*
+ * Sandy Bridge MSR_OFFCORE_RESPONSE bits;
+ * See IA32 SDM Vol 3B 30.8.5
+ */
+
+#define SNB_DMND_DATA_RD (1 << 0)
+#define SNB_DMND_RFO (1 << 1)
+#define SNB_DMND_IFETCH (1 << 2)
+#define SNB_DMND_WB (1 << 3)
+#define SNB_PF_DATA_RD (1 << 4)
+#define SNB_PF_DATA_RFO (1 << 5)
+#define SNB_PF_IFETCH (1 << 6)
+#define SNB_PF_LLC_DATA_RD (1 << 7)
+#define SNB_PF_LLC_RFO (1 << 8)
+#define SNB_PF_LLC_IFETCH (1 << 9)
+#define SNB_BUS_LOCKS (1 << 10)
+#define SNB_STRM_ST (1 << 11)
+ /* hole */
+#define SNB_OFFCORE_OTHER (1 << 15)
+#define SNB_COMMON (1 << 16)
+#define SNB_NO_SUPP (1 << 17)
+#define SNB_LLC_HITM (1 << 18)
+#define SNB_LLC_HITE (1 << 19)
+#define SNB_LLC_HITS (1 << 20)
+#define SNB_LLC_HITF (1 << 21)
+ /* hole */
+#define SNB_SNP_NONE (1 << 31)
+#define SNB_SNP_NOT_NEEDED (1 << 32)
+#define SNB_SNP_MISS (1 << 33)
+#define SNB_SNP_NO_FWD (1 << 34)
+#define SNB_SNP_FWD (1 << 35)
+#define SNB_HITM (1 << 36)
+#define SNB_NON_DRAM (1 << 37)
+
+#define SNB_DMND_READ (SNB_DMND_DATA_RD)
+#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_DMND_WB|SNB_STRM_ST)
+#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_DATA_RFO)
+
+#define SNB_L3_HIT ()
+#define SNB_L3_MISS ()
+#define SNB_L3_ACCESS (SNB_L3_HIT|SNB_L3_MISS)
+
+#define SNB_ALL_DRAM ()
+#define SNB_REMOTE_DRAM ()
+
+static __initconst const u64 snb_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
+ [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
+ [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
+ [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
+ },
+ }
+};
+
static __initconst const u64 snb_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1497,6 +1563,8 @@ static __init int intel_pmu_init(void)
case 42: /* SandyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists