lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190724223746.153620-2-irogers@google.com>
Date:   Wed, 24 Jul 2019 15:37:40 -0700
From:   Ian Rogers <irogers@...gle.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...hat.com>,
        Namhyung Kim <namhyung@...nel.org>,
        linux-kernel@...r.kernel.org
Cc:     Kan Liang <kan.liang@...ux.intel.com>,
        Stephane Eranian <eranian@...gle.com>,
        Ian Rogers <irogers@...gle.com>
Subject: [PATCH v2 1/7] perf: propagate perf_install_in_context errors up

The current __perf_install_in_context can fail and the error is ignored.
Changing __perf_install_in_context can add new failure modes that need
errors propagating up. This change prepares for this.

Signed-off-by: Ian Rogers <irogers@...gle.com>
---
 kernel/events/core.c | 39 ++++++++++++++++++++++++++-------------
 1 file changed, 26 insertions(+), 13 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index eea9d52b010c..84a22a5c88b0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2561,11 +2561,12 @@ static bool exclusive_event_installable(struct perf_event *event,
  *
  * Very similar to event_function_call, see comment there.
  */
-static void
+static int
 perf_install_in_context(struct perf_event_context *ctx,
 			struct perf_event *event,
 			int cpu)
 {
+	int err;
 	struct task_struct *task = READ_ONCE(ctx->task);
 
 	lockdep_assert_held(&ctx->mutex);
@@ -2582,15 +2583,15 @@ perf_install_in_context(struct perf_event_context *ctx,
 	smp_store_release(&event->ctx, ctx);
 
 	if (!task) {
-		cpu_function_call(cpu, __perf_install_in_context, event);
-		return;
+		err = cpu_function_call(cpu, __perf_install_in_context, event);
+		return err;
 	}
 
 	/*
 	 * Should not happen, we validate the ctx is still alive before calling.
 	 */
 	if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
-		return;
+		return 0;
 
 	/*
 	 * Installing events is tricky because we cannot rely on ctx->is_active
@@ -2624,9 +2625,11 @@ perf_install_in_context(struct perf_event_context *ctx,
 	 */
 	smp_mb();
 again:
-	if (!task_function_call(task, __perf_install_in_context, event))
-		return;
+	err = task_function_call(task, __perf_install_in_context, event);
+	if (!err)
+		return 0;
 
+	WARN_ON_ONCE(err != -ESRCH);
 	raw_spin_lock_irq(&ctx->lock);
 	task = ctx->task;
 	if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
@@ -2636,7 +2639,7 @@ perf_install_in_context(struct perf_event_context *ctx,
 		 * against perf_event_exit_task_context().
 		 */
 		raw_spin_unlock_irq(&ctx->lock);
-		return;
+		return 0;
 	}
 	/*
 	 * If the task is not running, ctx->lock will avoid it becoming so,
@@ -2648,6 +2651,7 @@ perf_install_in_context(struct perf_event_context *ctx,
 	}
 	add_event_to_ctx(event, ctx);
 	raw_spin_unlock_irq(&ctx->lock);
+	return 0;
 }
 
 /*
@@ -11130,7 +11134,9 @@ SYSCALL_DEFINE5(perf_event_open,
 		 */
 		for_each_sibling_event(sibling, group_leader) {
 			perf_event__state_init(sibling);
-			perf_install_in_context(ctx, sibling, sibling->cpu);
+			err = perf_install_in_context(ctx, sibling,
+						      sibling->cpu);
+			WARN_ON_ONCE(err);
 			get_ctx(ctx);
 		}
 
@@ -11140,7 +11146,9 @@ SYSCALL_DEFINE5(perf_event_open,
 		 * startup state, ready to be add into new context.
 		 */
 		perf_event__state_init(group_leader);
-		perf_install_in_context(ctx, group_leader, group_leader->cpu);
+		err = perf_install_in_context(ctx, group_leader,
+					      group_leader->cpu);
+		WARN_ON_ONCE(err);
 		get_ctx(ctx);
 	}
 
@@ -11155,7 +11163,8 @@ SYSCALL_DEFINE5(perf_event_open,
 
 	event->owner = current;
 
-	perf_install_in_context(ctx, event, event->cpu);
+	err = perf_install_in_context(ctx, event, event->cpu);
+	WARN_ON_ONCE(err);
 	perf_unpin_context(ctx);
 
 	if (move_group)
@@ -11274,7 +11283,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 		goto err_unlock;
 	}
 
-	perf_install_in_context(ctx, event, cpu);
+	err = perf_install_in_context(ctx, event, cpu);
+	WARN_ON_ONCE(err);
 	perf_unpin_context(ctx);
 	mutex_unlock(&ctx->mutex);
 
@@ -11297,6 +11307,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
 	struct perf_event_context *dst_ctx;
 	struct perf_event *event, *tmp;
 	LIST_HEAD(events);
+	int err;
 
 	src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
 	dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
@@ -11335,7 +11346,8 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
 		if (event->state >= PERF_EVENT_STATE_OFF)
 			event->state = PERF_EVENT_STATE_INACTIVE;
 		account_event_cpu(event, dst_cpu);
-		perf_install_in_context(dst_ctx, event, dst_cpu);
+		err = perf_install_in_context(dst_ctx, event, dst_cpu);
+		WARN_ON_ONCE(err);
 		get_ctx(dst_ctx);
 	}
 
@@ -11348,7 +11360,8 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
 		if (event->state >= PERF_EVENT_STATE_OFF)
 			event->state = PERF_EVENT_STATE_INACTIVE;
 		account_event_cpu(event, dst_cpu);
-		perf_install_in_context(dst_ctx, event, dst_cpu);
+		err = perf_install_in_context(dst_ctx, event, dst_cpu);
+		WARN_ON_ONCE(err);
 		get_ctx(dst_ctx);
 	}
 	mutex_unlock(&dst_ctx->mutex);
-- 
2.22.0.709.g102302147b-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ