lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190621082422.GH3436@hirez.programming.kicks-ass.net>
Date:   Fri, 21 Jun 2019 10:24:22 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     Ian Rogers <irogers@...gle.com>
Cc:     Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...hat.com>,
        Namhyung Kim <namhyung@...nel.org>,
        linux-kernel@...r.kernel.org,
        Kan Liang <kan.liang@...ux.intel.com>, ak@...ux.intel.com,
        eranian@...gle.com
Subject: Re: [PATCH] perf cgroups: Don't rotate events for cgroups
 unnecessarily

On Sat, Jun 01, 2019 at 01:27:22AM -0700, Ian Rogers wrote:
> @@ -3325,6 +3331,15 @@ static int flexible_sched_in(struct perf_event *event, void *data)
>  			sid->can_add_hw = 0;
>  	}
>  
> +	/*
> +	 * If the group wasn't scheduled then set that multiplexing is necessary
> +	 * for the context. Note, this won't be set if the event wasn't
> +	 * scheduled due to event_filter_match failing due to the earlier
> +	 * return.
> +	 */
> +	if (event->state == PERF_EVENT_STATE_INACTIVE)
> +		sid->ctx->rotate_necessary = 1;
> +
>  	return 0;
>  }

That looked odd; which had me look harder at this function which
resulted in the below. Should we not terminate the context interation
the moment one flexible thingy fails to schedule?

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2314,12 +2314,8 @@ group_sched_in(struct perf_event *group_
 		return 0;
 
 	pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
-
-	if (event_sched_in(group_event, cpuctx, ctx)) {
-		pmu->cancel_txn(pmu);
-		perf_mux_hrtimer_restart(cpuctx);
-		return -EAGAIN;
-	}
+	if (event_sched_in(group_event, cpuctx, ctx))
+		goto cancel;
 
 	/*
 	 * Schedule in siblings as one group (if any):
@@ -2348,10 +2344,9 @@ group_sched_in(struct perf_event *group_
 	}
 	event_sched_out(group_event, cpuctx, ctx);
 
+cancel:
 	pmu->cancel_txn(pmu);
-
 	perf_mux_hrtimer_restart(cpuctx);
-
 	return -EAGAIN;
 }
 
@@ -3317,6 +3312,7 @@ static int pinned_sched_in(struct perf_e
 static int flexible_sched_in(struct perf_event *event, void *data)
 {
 	struct sched_in_data *sid = data;
+	int ret;
 
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
@@ -3325,21 +3321,15 @@ static int flexible_sched_in(struct perf
 		return 0;
 
 	if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
-		if (!group_sched_in(event, sid->cpuctx, sid->ctx))
-			list_add_tail(&event->active_list, &sid->ctx->flexible_active);
-		else
+		ret = group_sched_in(event, sid->cpuctx, sid->ctx);
+		if (ret) {
 			sid->can_add_hw = 0;
+			sid->ctx->rotate_necessary = 1;
+			return ret;
+		}
+		list_add_tail(&event->active_list, &sid->ctx->flexible_active);
 	}
 
-	/*
-	 * If the group wasn't scheduled then set that multiplexing is necessary
-	 * for the context. Note, this won't be set if the event wasn't
-	 * scheduled due to event_filter_match failing due to the earlier
-	 * return.
-	 */
-	if (event->state == PERF_EVENT_STATE_INACTIVE)
-		sid->ctx->rotate_necessary = 1;
-
 	return 0;
 }
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ