lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 30 Apr 2019 10:56:40 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     kan.liang@...ux.intel.com
Cc:     tglx@...utronix.de, mingo@...hat.com, linux-kernel@...r.kernel.org,
        eranian@...gle.com, tj@...nel.org, ak@...ux.intel.com
Subject: Re: [PATCH 1/4] perf: Fix system-wide events miscounting during
 cgroup monitoring

On Mon, Apr 29, 2019 at 07:44:02AM -0700, kan.liang@...ux.intel.com wrote:

> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index e47ef76..039e2f2 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -795,6 +795,7 @@ struct perf_cpu_context {
>  #ifdef CONFIG_CGROUP_PERF
>  	struct perf_cgroup		*cgrp;
>  	struct list_head		cgrp_cpuctx_entry;
> +	unsigned int			cgrp_switch		:1;

If you're not adding more bits, why not just keep it an int?

>  #endif
>  
>  	struct list_head		sched_cb_entry;
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index dc7dead..388dd42 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -809,6 +809,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
>  
>  		perf_ctx_lock(cpuctx, cpuctx->task_ctx);
>  		perf_pmu_disable(cpuctx->ctx.pmu);
> +		cpuctx->cgrp_switch = true;
>  
>  		if (mode & PERF_CGROUP_SWOUT) {
>  			cpu_ctx_sched_out(cpuctx, EVENT_ALL);
> @@ -832,6 +833,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
>  							     &cpuctx->ctx);
>  			cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
>  		}
> +		cpuctx->cgrp_switch = false;
>  		perf_pmu_enable(cpuctx->ctx.pmu);
>  		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
>  	}

That is a bit of a hack...

> @@ -2944,13 +2946,25 @@ static void ctx_sched_out(struct perf_event_context *ctx,
>  
>  	perf_pmu_disable(ctx->pmu);
>  	if (is_active & EVENT_PINNED) {
> -		list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
> +		list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) {
> +#ifdef CONFIG_CGROUP_PERF
> +			/* Don't sched system-wide event when cgroup context switch */
> +			if (cpuctx->cgrp_switch && !event->cgrp)
> +				continue;
> +#endif
>  			group_sched_out(event, cpuctx, ctx);
> +		}
>  	}

This works by accident, however..

>  
>  	if (is_active & EVENT_FLEXIBLE) {
> -		list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
> +		list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) {
> +#ifdef CONFIG_CGROUP_PERF
> +			/* Don't sched system-wide event when cgroup context switch */
> +			if (cpuctx->cgrp_switch && !event->cgrp)
> +				continue;
> +#endif
>  			group_sched_out(event, cpuctx, ctx);
> +		}
>  	}
>  	perf_pmu_enable(ctx->pmu);
>  }

this one is just wrong afaict.

Suppose the new cgroup has pinned events, which we cannot schedule
because you left !cgroup flexible events on.

Powered by blists - more mailing lists