lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180528113319.GD3452@worktop.programming.kicks-ass.net>
Date:   Mon, 28 May 2018 13:33:19 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     Song Liu <songliubraving@...com>
Cc:     linux-kernel@...r.kernel.org, kernel-team@...com, tj@...nel.org,
        jolsa@...nel.org
Subject: Re: [RFC 2/2] perf: Sharing PMU counters across compatible events

On Fri, May 04, 2018 at 04:11:02PM -0700, Song Liu wrote:
> +static void add_event_to_dup_event_list(struct perf_event *event,
> +					struct perf_cpu_context *cpuctx)
> +{
> +	int i;
> +
> +	for (i = 0; i < cpuctx->dup_event_count; ++i)
> +		if (memcmp(&event->attr,
> +			   &cpuctx->dup_event_list[i].first->attr,
> +			   sizeof(event->attr)) == 0) {
> +			event->dup_id = i;
> +			return;
> +		}
> +	i = cpuctx->dup_event_count++;
> +	cpuctx->dup_event_list[i].first = event;
> +	cpuctx->dup_event_list[i].master = NULL;
> +	INIT_LIST_HEAD(&cpuctx->dup_event_list[i].active_dup);
> +	event->dup_id = i;
> +	INIT_LIST_HEAD(&event->dup_sibling_entry);
> +}
> +
> +static int add_group_to_dup_event_list(struct perf_event *event, void *data)
> +{
> +	struct sched_in_data *sid = data;
> +	struct perf_event *sibling;
> +
> +	add_event_to_dup_event_list(event, sid->cpuctx);
> +	for_each_sibling_event(sibling, event)
> +		add_event_to_dup_event_list(sibling, sid->cpuctx);
> +
> +	return 0;
> +}
> +
> +static void rebuild_event_dup_list(struct perf_cpu_context *cpuctx)
> +{
> +	int dup_count = cpuctx->ctx.nr_events;
> +	struct perf_event_context *ctx = cpuctx->task_ctx;
> +	struct sched_in_data sid = {
> +		.ctx = ctx,
> +		.cpuctx = cpuctx,
> +		.can_add_hw = 1,
> +	};
> +
> +	if (ctx)
> +		dup_count += ctx->nr_events;
> +
> +	kfree(cpuctx->dup_event_list);
> +	cpuctx->dup_event_count = 0;
> +
> +	cpuctx->dup_event_list =
> +		kzalloc(sizeof(struct perf_event_dup) * dup_count, GFP_ATOMIC);
> +	if (!cpuctx->dup_event_list)
> +		return;
> +
> +	visit_groups_merge(&cpuctx->ctx.pinned_groups, smp_processor_id(),
> +			   add_group_to_dup_event_list, &sid);
> +	visit_groups_merge(&cpuctx->ctx.flexible_groups, smp_processor_id(),
> +			   add_group_to_dup_event_list, &sid);
> +	if (ctx) {
> +		visit_groups_merge(&ctx->pinned_groups, smp_processor_id(),
> +				   add_group_to_dup_event_list, &sid);
> +		visit_groups_merge(&ctx->flexible_groups, smp_processor_id(),
> +				   add_group_to_dup_event_list, &sid);
> +	}
> +}

Oooh, wait a second, this isn't O(n), this looks like O(n^2).

We do that linear search for every single event... that's not good.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ