lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YEoFitWfyKOkx61M@hirez.programming.kicks-ass.net>
Date:   Thu, 11 Mar 2021 12:56:58 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     kan.liang@...ux.intel.com
Cc:     mingo@...nel.org, linux-kernel@...r.kernel.org, acme@...nel.org,
        tglx@...utronix.de, bp@...en8.de, namhyung@...nel.org,
        jolsa@...hat.com, ak@...ux.intel.com, yao.jin@...ux.intel.com,
        alexander.shishkin@...ux.intel.com, adrian.hunter@...el.com
Subject: Re: [PATCH V2 16/25] perf/x86: Register hybrid PMUs

On Wed, Mar 10, 2021 at 08:37:52AM -0800, kan.liang@...ux.intel.com wrote:
> +static void init_hybrid_pmu(int cpu)
> +{
> +	unsigned int fixed_mask, unused_eax, unused_ebx, unused_edx;
> +	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> +	u8 cpu_type = get_hybrid_cpu_type(cpu);
> +	struct x86_hybrid_pmu *pmu = NULL;
> +	struct perf_cpu_context *cpuctx;
> +	int i;
> +
> +	for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
> +		if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
> +			pmu = &x86_pmu.hybrid_pmu[i];
> +			break;
> +		}
> +	}
> +	if (WARN_ON_ONCE(!pmu))
> +		return;
> +
> +	cpuc->pmu = &pmu->pmu;
> +
> +	/* Only register PMU for the first CPU */
> +	if (!cpumask_empty(&pmu->supported_cpus)) {
> +		cpumask_set_cpu(cpu, &pmu->supported_cpus);
> +		goto end;
> +	}
> +
> +	if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
> +		return;
> +
> +	if ((pmu->pmu.type == -1) &&
> +	    perf_pmu_register(&pmu->pmu, pmu->name, x86_get_hybrid_pmu_type(pmu->cpu_type)))
> +		return;
> +
> +	/*
> +	 * Except for ECX, other fields have been stored in the x86 struct
> +	 * at boot time.
> +	 */
> +	cpuid(10, &unused_eax, &unused_ebx, &fixed_mask, &unused_edx);
> +
> +	intel_pmu_check_num_counters(&pmu->num_counters,
> +				     &pmu->num_counters_fixed,
> +				     &pmu->intel_ctrl,
> +				     (u64)fixed_mask);
> +
> +	pr_info("%s PMU driver: ", pmu->name);
> +
> +	if (pmu->intel_cap.perf_metrics) {
> +		pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
> +		pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
> +	}
> +
> +	if (pmu->intel_cap.pebs_output_pt_available) {
> +		pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
> +		pr_cont("PEBS-via-PT ");
> +	}
> +
> +	intel_pmu_check_event_constraints(pmu->event_constraints,
> +					  pmu->num_counters,
> +					  pmu->num_counters_fixed,
> +					  pmu->intel_ctrl);
> +
> +	intel_pmu_check_extra_regs(pmu->extra_regs);
> +
> +	pr_cont("\n");
> +
> +	x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
> +			     pmu->intel_ctrl);
> +
> +	cpumask_set_cpu(cpu, &pmu->supported_cpus);
> +end:
> +	/*
> +	 * The cpuctx of all CPUs are allocated when registering the
> +	 * boot CPU's PMU. At that time, the PMU for other hybrid CPUs
> +	 * is not registered yet. The boot CPU's PMU was
> +	 * unconditionally assigned to each cpuctx->ctx.pmu.
> +	 * Update the cpuctx->ctx.pmu when the PMU for other hybrid
> +	 * CPUs is known.
> +	 */
> +	cpuctx = per_cpu_ptr(pmu->pmu.pmu_cpu_context, cpu);
> +	cpuctx->ctx.pmu = &pmu->pmu;
> +}
> +
>  static void intel_pmu_cpu_starting(int cpu)
>  {
>  	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
>  	int core_id = topology_core_id(cpu);
>  	int i;
>  
> +	if (is_hybrid())
> +		init_hybrid_pmu(cpu);
> +
>  	init_debug_store_on_cpu(cpu);
>  	/*
>  	 * Deal with CPUs that don't clear their LBRs on power-up.

This is buggered. CPU_STARTING is the initial IRQs disabled part of
hotplug, but you're calling perf_pmu_register() which does mutex_lock().

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ