[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CABPqkBRsgT0J45AGgerMReyitHx+UUSs3dT0AYzpiMm_Ncw8EQ@mail.gmail.com>
Date: Fri, 1 Feb 2013 18:22:37 +0100
From: Stephane Eranian <eranian@...gle.com>
To: Andi Kleen <andi@...stfloor.org>
Cc: Ingo Molnar <mingo@...nel.org>,
LKML <linux-kernel@...r.kernel.org>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Andrew Morton <akpm@...ux-foundation.org>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
Andi Kleen <ak@...ux.intel.com>
Subject: Re: [PATCH 02/12] perf, x86: Basic Haswell PMU support v3
On Thu, Jan 31, 2013 at 11:51 PM, Andi Kleen <andi@...stfloor.org> wrote:
> From: Andi Kleen <ak@...ux.intel.com>
>
> Add basic Haswell PMU support.
>
> Similar to SandyBridge, but has a few new events. Further
> differences are handled in followon patches.
>
> There are some new counter flags that need to be prevented
> from being set on fixed counters.
>
> Contains fixes from Stephane Eranian
>
> v2: Folded TSX bits into standard FIXED_EVENT_CONSTRAINTS
> v3: Use SNB LBR init code. Comment fix (Stephane Eranian)
> Signed-off-by: Andi Kleen <ak@...ux.intel.com>
Looks ok to me, just minor typo in comment, no big deal.
Reviewed-by: Stephane Eranian <eranian@...gle.com>
> ---
> arch/x86/include/asm/perf_event.h | 3 +++
> arch/x86/kernel/cpu/perf_event.h | 5 ++++-
> arch/x86/kernel/cpu/perf_event_intel.c | 31 ++++++++++++++++++++++++++++++-
> 3 files changed, 37 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
> index 4fabcdf..4003bb6 100644
> --- a/arch/x86/include/asm/perf_event.h
> +++ b/arch/x86/include/asm/perf_event.h
> @@ -29,6 +29,9 @@
> #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
> #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
>
> +#define HSW_INTX (1ULL << 32)
> +#define HSW_INTX_CHECKPOINTED (1ULL << 33)
> +
> #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
> #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
>
> diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
> index 115c1ea..8941899 100644
> --- a/arch/x86/kernel/cpu/perf_event.h
> +++ b/arch/x86/kernel/cpu/perf_event.h
> @@ -219,11 +219,14 @@ struct cpu_hw_events {
> * - inv
> * - edge
> * - cnt-mask
> + * - intx
> + * - intx_cp
> * The other filters are supported by fixed counters.
> * The any-thread option is supported starting with v3.
> */
> +#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_INTX|HSW_INTX_CHECKPOINTED)
> #define FIXED_EVENT_CONSTRAINT(c, n) \
> - EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
> + EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
>
> /*
> * Constraint on the Event code + UMask
> diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
> index 93b9e11..78045e5 100644
> --- a/arch/x86/kernel/cpu/perf_event_intel.c
> +++ b/arch/x86/kernel/cpu/perf_event_intel.c
> @@ -101,7 +101,7 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
> FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
> FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
> FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
> - INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
> + INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
Minor tidbit: You fixed it for SNB but not HSW!
> INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
> INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
> EVENT_CONSTRAINT_END
> @@ -133,6 +133,17 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
> EVENT_EXTRA_END
> };
>
> +static struct event_constraint intel_hsw_event_constraints[] =
> +{
> + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
> + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
> + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
> + INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
That's where it needs to be fixed too.
> + INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
> + INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
> + EVENT_CONSTRAINT_END
> +};
> +
> static u64 intel_pmu_event_map(int hw_event)
> {
> return intel_perfmon_event_map[hw_event];
> @@ -2107,6 +2118,24 @@ __init int intel_pmu_init(void)
> break;
>
>
> + case 60: /* Haswell Client */
> + case 70:
> + case 71:
> + memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
> + sizeof(hw_cache_event_ids));
> +
> + intel_pmu_lbr_init_snb();
> +
> + x86_pmu.event_constraints = intel_hsw_event_constraints;
> +
> + x86_pmu.extra_regs = intel_snb_extra_regs;
> + /* all extra regs are per-cpu when HT is on */
> + x86_pmu.er_flags |= ERF_HAS_RSP_1;
> + x86_pmu.er_flags |= ERF_NO_HT_SHARING;
> +
> + pr_cont("Haswell events, ");
> + break;
> +
> default:
> switch (x86_pmu.version) {
> case 1:
> --
> 1.7.7.6
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists