[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YEoST/CMrA99gPZB@hirez.programming.kicks-ass.net>
Date: Thu, 11 Mar 2021 13:51:27 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: kan.liang@...ux.intel.com
Cc: mingo@...nel.org, linux-kernel@...r.kernel.org, acme@...nel.org,
tglx@...utronix.de, bp@...en8.de, namhyung@...nel.org,
jolsa@...hat.com, ak@...ux.intel.com, yao.jin@...ux.intel.com,
alexander.shishkin@...ux.intel.com, adrian.hunter@...el.com
Subject: Re: [PATCH V2 20/25] perf/x86/intel: Add Alder Lake Hybrid support
On Wed, Mar 10, 2021 at 08:37:56AM -0800, kan.liang@...ux.intel.com wrote:
> @@ -4059,6 +4099,34 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
> return c;
> }
>
> +static struct event_constraint *
> +adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
> + struct perf_event *event)
> +{
> + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
> +
> + if (pmu->cpu_type == INTEL_HYBRID_TYPE_CORE)
> + return spr_get_event_constraints(cpuc, idx, event);
> + else if (pmu->cpu_type == INTEL_HYBRID_TYPE_ATOM)
> + return tnt_get_event_constraints(cpuc, idx, event);
> +
> + WARN_ON(1);
> + return &emptyconstraint;
> +}
> +
> +static int adl_hw_config(struct perf_event *event)
> +{
> + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
> +
> + if (pmu->cpu_type == INTEL_HYBRID_TYPE_CORE)
> + return hsw_hw_config(event);
> + else if (pmu->cpu_type == INTEL_HYBRID_TYPE_ATOM)
> + return intel_pmu_hw_config(event);
> +
> + WARN_ON(1);
> + return -EOPNOTSUPP;
> +}
> +
> /*
> * Broadwell:
> *
> @@ -5266,6 +5342,84 @@ static const struct attribute_group *attr_update[] = {
> NULL,
> };
>
> +EVENT_ATTR_STR_HYBRID(slots, slots_hybrid, "event=0x00,umask=0x4", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_hybrid, "event=0xc2,umask=0x0;event=0x00,umask=0x80", INTEL_HYBRID_TYPE_ATOM | INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_hybrid, "event=0x73,umask=0x0;event=0x00,umask=0x81", INTEL_HYBRID_TYPE_ATOM | INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_hybrid, "event=0x71,umask=0x0;event=0x00,umask=0x82", INTEL_HYBRID_TYPE_ATOM | INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_hybrid, "event=0x74,umask=0x0;event=0x00,umask=0x83", INTEL_HYBRID_TYPE_ATOM | INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_hybrid, "event=0x00,umask=0x84", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mispredict_hybrid, "event=0x00,umask=0x85", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_hybrid, "event=0x00,umask=0x86", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_hybrid, "event=0x00,umask=0x87", INTEL_HYBRID_TYPE_CORE);
> +
> +static struct attribute *adl_hybrid_events_attrs[] = {
> + EVENT_PTR(slots_hybrid),
> + EVENT_PTR(td_retiring_hybrid),
> + EVENT_PTR(td_bad_spec_hybrid),
> + EVENT_PTR(td_fe_bound_hybrid),
> + EVENT_PTR(td_be_bound_hybrid),
> + EVENT_PTR(td_heavy_ops_hybrid),
> + EVENT_PTR(td_br_mispredict_hybrid),
> + EVENT_PTR(td_fetch_lat_hybrid),
> + EVENT_PTR(td_mem_bound_hybrid),
> + NULL,
> +};
> +
> +/* Must be in IDX order */
> +EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl_hybrid, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", INTEL_HYBRID_TYPE_ATOM | INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl_hybrid, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", INTEL_HYBRID_TYPE_ATOM | INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_hybrid, "event=0x03,umask=0x82", INTEL_HYBRID_TYPE_CORE);
> +
> +static struct attribute *adl_hybrid_mem_attrs[] = {
> + EVENT_PTR(mem_ld_adl_hybrid),
> + EVENT_PTR(mem_st_adl_hybrid),
> + EVENT_PTR(mem_ld_aux_hybrid),
> + NULL,
> +};
> +
> +EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl_hybrid, "event=0xc9,umask=0x1", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl_hybrid, "event=0xc9,umask=0x2", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl_hybrid, "event=0xc9,umask=0x4", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl_hybrid, "event=0x54,umask=0x1", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl_hybrid, "event=0x3c,in_tx=1", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl_hybrid, "event=0x3c,in_tx=1,in_tx_cp=1", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl_hybrid, "event=0x54,umask=0x80", INTEL_HYBRID_TYPE_CORE);
> +EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl_hybrid, "event=0x54,umask=0x2", INTEL_HYBRID_TYPE_CORE);
> +
> +static struct attribute *adl_hybrid_tsx_attrs[] = {
> + EVENT_PTR(tx_start_adl_hybrid),
> + EVENT_PTR(tx_abort_adl_hybrid),
> + EVENT_PTR(tx_commit_adl_hybrid),
> + EVENT_PTR(tx_capacity_read_adl_hybrid),
> + EVENT_PTR(tx_capacity_write_adl_hybrid),
> + EVENT_PTR(tx_conflict_adl_hybrid),
> + EVENT_PTR(cycles_t_adl_hybrid),
> + EVENT_PTR(cycles_ct_adl_hybrid),
> + NULL,
> +};
> +
> +FORMAT_ATTR_HYBRID(in_tx, INTEL_HYBRID_TYPE_CORE);
> +FORMAT_ATTR_HYBRID(in_tx_cp, INTEL_HYBRID_TYPE_CORE);
> +FORMAT_ATTR_HYBRID(offcore_rsp, INTEL_HYBRID_TYPE_CORE | INTEL_HYBRID_TYPE_ATOM);
> +FORMAT_ATTR_HYBRID(ldlat, INTEL_HYBRID_TYPE_CORE | INTEL_HYBRID_TYPE_ATOM);
> +FORMAT_ATTR_HYBRID(frontend, INTEL_HYBRID_TYPE_CORE);
This really could do with something like:
enum {
BIGGIE = 0x40,
smalls = 0x20,
B_I_G = BIGGIE | smalls,
};
s/INTEL_HYBRID_TYPE_CORE/BIGGIE/g
s/INTEL_HYBRID_TYPE_ATOM/smalls/g
s/INTEL_HYBRID_TYPE_ATOM | INTEL_HYBRID_TYPE_CORE/B_I_G/g
Also, many of thaot ATTR thingies can do with some horizontal alignment,
they're unreadable gibberish.
Powered by blists - more mailing lists