[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAM9d7ciukm4RAH+44YWhZRummKzk1HTbnZ0Sc4Xd5ZyCo=x0xQ@mail.gmail.com>
Date: Fri, 4 Dec 2020 16:14:33 +0900
From: Namhyung Kim <namhyung@...nel.org>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Kan Liang <kan.liang@...ux.intel.com>,
Ingo Molnar <mingo@...nel.org>,
linux-kernel <linux-kernel@...r.kernel.org>,
Stephane Eranian <eranian@...gle.com>,
Ian Rogers <irogers@...gle.com>,
Gabriel Marin <gmx@...gle.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Jiri Olsa <jolsa@...hat.com>, Andi Kleen <ak@...ux.intel.com>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>
Subject: Re: [PATCH V2 3/3] perf: Optimize sched_task() in a context switch
Hi Peter,
On Wed, Dec 2, 2020 at 2:29 AM Peter Zijlstra <peterz@...radead.org> wrote:
>
> On Mon, Nov 30, 2020 at 11:38:42AM -0800, kan.liang@...ux.intel.com wrote:
> > From: Kan Liang <kan.liang@...ux.intel.com>
> >
> > Some calls to sched_task() in a context switch can be avoided. For
> > example, large PEBS only requires flushing the buffer in context switch
> > out. The current code still invokes the sched_task() for large PEBS in
> > context switch in.
>
> I still hate this one, how's something like this then?
> Which I still don't really like.. but at least its simpler.
>
> (completely untested, may contain spurious edits, might ICE the
> compiler and set your pets on fire if it doesn't)
I've tested this version... and it worked well besides the optimization.. :)
[SNIP]
> +static void context_sched_task(struct perf_cpu_context *cpuctx,
> + struct perf_event_context *ctx,
> + bool sched_in)
> +{
> + struct pmu *pmu = ctx->pmu;
> +
> + if (cpuctx->sched_cb_dir[sched_in] && pmu->sched_task)
> + pmu->sched_task(ctx, false);
applied: s/false/sched_in/
> +}
> +
> static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
> struct task_struct *next)
> {
> @@ -3424,9 +3433,7 @@ static void perf_event_context_sched_out
> WRITE_ONCE(next_ctx->task, task);
>
> perf_pmu_disable(pmu);
> -
> - if (cpuctx->sched_cb_usage && pmu->sched_task)
> - pmu->sched_task(ctx, false);
> + context_sched_task(cpuctx, ctx, false);
>
> /*
> * PMU specific parts of task perf context can require
> @@ -3465,8 +3472,7 @@ static void perf_event_context_sched_out
> raw_spin_lock(&ctx->lock);
> perf_pmu_disable(pmu);
>
> - if (cpuctx->sched_cb_usage && pmu->sched_task)
> - pmu->sched_task(ctx, false);
> + context_sched_task(cpuctx, ctx, false);
> task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
>
> perf_pmu_enable(pmu);
[SNIP]
> @@ -3563,8 +3582,7 @@ void __perf_event_task_sched_out(struct
> {
> int ctxn;
>
> - if (__this_cpu_read(perf_sched_cb_usage))
> - perf_pmu_sched_task(task, next, false);
> + perf_pmu_sched_task(task, next, false);
I think the reason is this change. It now calls perf_pmu_sched_task()
without checking the counter. And this is for per-cpu events.
>
> if (atomic_read(&nr_switch_events))
> perf_event_switch(task, next, false);
> @@ -3828,8 +3846,7 @@ static void perf_event_context_sched_in(
> cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
> perf_event_sched_in(cpuctx, ctx, task);
>
> - if (cpuctx->sched_cb_usage && pmu->sched_task)
> - pmu->sched_task(cpuctx->task_ctx, true);
> + context_sched_task(cpuctx, ctx, true);
>
> perf_pmu_enable(pmu);
>
> @@ -3875,8 +3892,7 @@ void __perf_event_task_sched_in(struct t
> if (atomic_read(&nr_switch_events))
> perf_event_switch(task, prev, true);
>
> - if (__this_cpu_read(perf_sched_cb_usage))
> - perf_pmu_sched_task(prev, task, true);
> + perf_pmu_sched_task(prev, task, true);
Ditto.
> }
>
> static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
So I made a change like below.. and it could bring the optimization back.
Thanks,
Namhyung
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9107e7c3ccfb..a30243a9fab5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3528,6 +3528,9 @@ static void __perf_pmu_sched_task(struct
perf_cpu_context *cpuctx, bool sched_in
{
struct pmu *pmu;
+ if (!cpuctx->sched_cb_dir[sched_in])
+ return;
+
pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
if (WARN_ON_ONCE(!pmu->sched_task))
Powered by blists - more mailing lists