[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190429151225.GC2182@lakrids.cambridge.arm.com>
Date: Mon, 29 Apr 2019 16:12:26 +0100
From: Mark Rutland <mark.rutland@....com>
To: kan.liang@...ux.intel.com
Cc: peterz@...radead.org, tglx@...utronix.de, mingo@...hat.com,
linux-kernel@...r.kernel.org, eranian@...gle.com, tj@...nel.org,
ak@...ux.intel.com
Subject: Re: [PATCH 2/4] perf: Add filter_match() as a parameter for
pinned/flexible_sched_in()
On Mon, Apr 29, 2019 at 07:44:03AM -0700, kan.liang@...ux.intel.com wrote:
> From: Kan Liang <kan.liang@...ux.intel.com>
>
> A fast path will be introduced in the following patches to speed up the
> cgroup events sched in, which only needs a simpler filter_match().
>
> Add filter_match() as a parameter for pinned/flexible_sched_in().
>
> No functional change.
I suspect that the cost you're trying to avoid is pmu_filter_match()
iterating over the entire group, which arm systems rely upon for correct
behaviour on big.LITTLE systems.
Is that the case?
Thanks,
Mark.
>
> Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
> ---
> kernel/events/core.c | 15 +++++++++------
> 1 file changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index 388dd42..782fd86 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -3251,7 +3251,8 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
> }
>
> static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
> - int (*func)(struct perf_event *, void *), void *data)
> + int (*func)(struct perf_event *, void *, int (*)(struct perf_event *)),
> + void *data)
> {
> struct perf_event **evt, *evt1, *evt2;
> int ret;
> @@ -3271,7 +3272,7 @@ static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
> evt = &evt2;
> }
>
> - ret = func(*evt, data);
> + ret = func(*evt, data, event_filter_match);
> if (ret)
> return ret;
>
> @@ -3287,7 +3288,8 @@ struct sched_in_data {
> int can_add_hw;
> };
>
> -static int pinned_sched_in(struct perf_event *event, void *data)
> +static int pinned_sched_in(struct perf_event *event, void *data,
> + int (*filter_match)(struct perf_event *))
> {
> struct sched_in_data *sid = data;
>
> @@ -3300,7 +3302,7 @@ static int pinned_sched_in(struct perf_event *event, void *data)
> return 0;
> #endif
>
> - if (!event_filter_match(event))
> + if (!filter_match(event))
> return 0;
>
> if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
> @@ -3318,7 +3320,8 @@ static int pinned_sched_in(struct perf_event *event, void *data)
> return 0;
> }
>
> -static int flexible_sched_in(struct perf_event *event, void *data)
> +static int flexible_sched_in(struct perf_event *event, void *data,
> + int (*filter_match)(struct perf_event *))
> {
> struct sched_in_data *sid = data;
>
> @@ -3331,7 +3334,7 @@ static int flexible_sched_in(struct perf_event *event, void *data)
> return 0;
> #endif
>
> - if (!event_filter_match(event))
> + if (!filter_match(event))
> return 0;
>
> if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
> --
> 2.7.4
>
Powered by blists - more mailing lists