lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 24 May 2017 11:30:18 -0700
From:   "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To:     Thomas Gleixner <tglx@...utronix.de>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...nel.org>,
        Steven Rostedt <rostedt@...dmis.org>,
        Sebastian Siewior <bigeasy@...utronix.de>,
        Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
        Masami Hiramatsu <mhiramat@...nel.org>
Subject: Re: [patch V3 23/32] perf/tracing/cpuhotplug: Fix locking order

On Wed, May 24, 2017 at 10:15:34AM +0200, Thomas Gleixner wrote:
> perf, tracing, kprobes and jump_labels have a gazillion of ways to create
> dependency lock chains. Some of those involve nested invocations of
> get_online_cpus().
> 
> The conversion of the hotplug locking to a percpu rwsem requires to avoid
> such nested calls. sys_perf_event_open() protects most of the syscall logic
> against cpu hotplug. This causes nested calls and lock inversions versus
> ftrace and kprobes in various interesting ways.
> 
> It's impossible to move the hotplug locking to the outer end of all call
> chains in the involved facilities, so the hotplug protection in
> sys_perf_event_open() needs to be solved differently.
> 
> Introduce 'pmus_mutex' which protects a perf private online cpumask. This
> mutex is taken when the mask is updated in the cpu hotplug callbacks and
> can be taken in sys_perf_event_open() to protect the swhash setup/teardown
> code and when the final judgement about a valid event has to be made.
> 
> [ tglx: Produced changelog and fixed the swhash interaction ]
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> Cc: Steven Rostedt <rostedt@...dmis.org>
> Cc: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
> Cc: Ingo Molnar <mingo@...nel.org>
> Cc: Thomas Gleixner <tglx@...utronix.de>
> Cc: Masami Hiramatsu <mhiramat@...nel.org>
> Cc: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>

One question below about use of cpus_read_lock().

							Thanx, Paul

> ---
>  include/linux/perf_event.h |    2 
>  kernel/events/core.c       |  106 ++++++++++++++++++++++++++++++++-------------
>  2 files changed, 78 insertions(+), 30 deletions(-)
> 
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -801,6 +801,8 @@ struct perf_cpu_context {
> 
>  	struct list_head		sched_cb_entry;
>  	int				sched_cb_usage;
> +
> +	int				online;
>  };
> 
>  struct perf_output_handle {
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -389,6 +389,7 @@ static atomic_t nr_switch_events __read_
>  static LIST_HEAD(pmus);
>  static DEFINE_MUTEX(pmus_lock);
>  static struct srcu_struct pmus_srcu;
> +static cpumask_var_t perf_online_mask;
> 
>  /*
>   * perf event paranoia level:
> @@ -3812,14 +3813,6 @@ find_get_context(struct pmu *pmu, struct
>  		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
>  			return ERR_PTR(-EACCES);
> 
> -		/*
> -		 * We could be clever and allow to attach a event to an
> -		 * offline CPU and activate it when the CPU comes up, but
> -		 * that's for later.
> -		 */
> -		if (!cpu_online(cpu))
> -			return ERR_PTR(-ENODEV);
> -
>  		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
>  		ctx = &cpuctx->ctx;
>  		get_ctx(ctx);
> @@ -7703,7 +7696,8 @@ static int swevent_hlist_get_cpu(int cpu
>  	int err = 0;
> 
>  	mutex_lock(&swhash->hlist_mutex);
> -	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
> +	if (!swevent_hlist_deref(swhash) &&
> +	    cpumask_test_cpu(cpu, perf_online_mask)) {
>  		struct swevent_hlist *hlist;
> 
>  		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
> @@ -7724,7 +7718,7 @@ static int swevent_hlist_get(void)
>  {
>  	int err, cpu, failed_cpu;
> 
> -	get_online_cpus();
> +	mutex_lock(&pmus_lock);
>  	for_each_possible_cpu(cpu) {
>  		err = swevent_hlist_get_cpu(cpu);
>  		if (err) {
> @@ -7732,8 +7726,7 @@ static int swevent_hlist_get(void)
>  			goto fail;
>  		}
>  	}
> -	put_online_cpus();
> -
> +	mutex_unlock(&pmus_lock);
>  	return 0;
>  fail:
>  	for_each_possible_cpu(cpu) {
> @@ -7741,8 +7734,7 @@ static int swevent_hlist_get(void)
>  			break;
>  		swevent_hlist_put_cpu(cpu);
>  	}
> -
> -	put_online_cpus();
> +	mutex_unlock(&pmus_lock);
>  	return err;
>  }
> 
> @@ -8920,7 +8912,7 @@ perf_event_mux_interval_ms_store(struct
>  	pmu->hrtimer_interval_ms = timer;
> 
>  	/* update all cpuctx for this PMU */
> -	get_online_cpus();
> +	cpus_read_lock();

OK, I'll bite...

Why is this piece using cpus_read_lock() instead of pmus_lock?

My guess is for the benefit of the cpu_function_call() below, but if
the code instead cycled through the perf_online_mask, wouldn't any
CPU selected be guaranteed to be online?

Or is there some reason that it would be necessary to specially handle
CPUs that perf does not consider to be active, but that are still at
least partway online?

>  	for_each_online_cpu(cpu) {
>  		struct perf_cpu_context *cpuctx;
>  		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
> @@ -8929,7 +8921,7 @@ perf_event_mux_interval_ms_store(struct
>  		cpu_function_call(cpu,
>  			(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
>  	}
> -	put_online_cpus();
> +	cpus_read_unlock();
>  	mutex_unlock(&mux_interval_mutex);
> 
>  	return count;
> @@ -9059,6 +9051,7 @@ int perf_pmu_register(struct pmu *pmu, c
>  		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
>  		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
>  		cpuctx->ctx.pmu = pmu;
> +		cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
> 
>  		__perf_mux_hrtimer_init(cpuctx, cpu);
>  	}
> @@ -9882,12 +9875,10 @@ SYSCALL_DEFINE5(perf_event_open,
>  		goto err_task;
>  	}
> 
> -	get_online_cpus();
> -
>  	if (task) {
>  		err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
>  		if (err)
> -			goto err_cpus;
> +			goto err_cred;
> 
>  		/*
>  		 * Reuse ptrace permission checks for now.
> @@ -10073,6 +10064,23 @@ SYSCALL_DEFINE5(perf_event_open,
>  		goto err_locked;
>  	}
> 
> +	if (!task) {
> +		/*
> +		 * Check if the @cpu we're creating an event for is online.
> +		 *
> +		 * We use the perf_cpu_context::ctx::mutex to serialize against
> +		 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
> +		 */
> +		struct perf_cpu_context *cpuctx =
> +			container_of(ctx, struct perf_cpu_context, ctx);
> +
> +		if (!cpuctx->online) {
> +			err = -ENODEV;
> +			goto err_locked;
> +		}
> +	}
> +
> +
>  	/*
>  	 * Must be under the same ctx::mutex as perf_install_in_context(),
>  	 * because we need to serialize with concurrent event creation.
> @@ -10162,8 +10170,6 @@ SYSCALL_DEFINE5(perf_event_open,
>  		put_task_struct(task);
>  	}
> 
> -	put_online_cpus();
> -
>  	mutex_lock(&current->perf_event_mutex);
>  	list_add_tail(&event->owner_entry, &current->perf_event_list);
>  	mutex_unlock(&current->perf_event_mutex);
> @@ -10197,8 +10203,6 @@ SYSCALL_DEFINE5(perf_event_open,
>  err_cred:
>  	if (task)
>  		mutex_unlock(&task->signal->cred_guard_mutex);
> -err_cpus:
> -	put_online_cpus();
>  err_task:
>  	if (task)
>  		put_task_struct(task);
> @@ -10253,6 +10257,21 @@ perf_event_create_kernel_counter(struct
>  		goto err_unlock;
>  	}
> 
> +	if (!task) {
> +		/*
> +		 * Check if the @cpu we're creating an event for is online.
> +		 *
> +		 * We use the perf_cpu_context::ctx::mutex to serialize against
> +		 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
> +		 */
> +		struct perf_cpu_context *cpuctx =
> +			container_of(ctx, struct perf_cpu_context, ctx);
> +		if (!cpuctx->online) {
> +			err = -ENODEV;
> +			goto err_unlock;
> +		}
> +	}
> +
>  	if (!exclusive_event_installable(event, ctx)) {
>  		err = -EBUSY;
>  		goto err_unlock;
> @@ -10920,6 +10939,8 @@ static void __init perf_event_init_all_c
>  	struct swevent_htable *swhash;
>  	int cpu;
> 
> +	zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
> +
>  	for_each_possible_cpu(cpu) {
>  		swhash = &per_cpu(swevent_htable, cpu);
>  		mutex_init(&swhash->hlist_mutex);
> @@ -10935,7 +10956,7 @@ static void __init perf_event_init_all_c
>  	}
>  }
> 
> -int perf_event_init_cpu(unsigned int cpu)
> +void perf_swevent_init_cpu(unsigned int cpu)
>  {
>  	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
> 
> @@ -10948,7 +10969,6 @@ int perf_event_init_cpu(unsigned int cpu
>  		rcu_assign_pointer(swhash->swevent_hlist, hlist);
>  	}
>  	mutex_unlock(&swhash->hlist_mutex);
> -	return 0;
>  }
> 
>  #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
> @@ -10966,19 +10986,22 @@ static void __perf_event_exit_context(vo
> 
>  static void perf_event_exit_cpu_context(int cpu)
>  {
> +	struct perf_cpu_context *cpuctx;
>  	struct perf_event_context *ctx;
>  	struct pmu *pmu;
> -	int idx;
> 
> -	idx = srcu_read_lock(&pmus_srcu);
> -	list_for_each_entry_rcu(pmu, &pmus, entry) {
> -		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
> +	mutex_lock(&pmus_lock);
> +	list_for_each_entry(pmu, &pmus, entry) {
> +		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
> +		ctx = &cpuctx->ctx;
> 
>  		mutex_lock(&ctx->mutex);
>  		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
> +		cpuctx->online = 0;
>  		mutex_unlock(&ctx->mutex);
>  	}
> -	srcu_read_unlock(&pmus_srcu, idx);
> +	cpumask_clear_cpu(cpu, perf_online_mask);
> +	mutex_unlock(&pmus_lock);
>  }
>  #else
> 
> @@ -10986,6 +11009,29 @@ static void perf_event_exit_cpu_context(
> 
>  #endif
> 
> +int perf_event_init_cpu(unsigned int cpu)
> +{
> +	struct perf_cpu_context *cpuctx;
> +	struct perf_event_context *ctx;
> +	struct pmu *pmu;
> +
> +	perf_swevent_init_cpu(cpu);
> +
> +	mutex_lock(&pmus_lock);
> +	cpumask_set_cpu(cpu, perf_online_mask);
> +	list_for_each_entry(pmu, &pmus, entry) {
> +		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
> +		ctx = &cpuctx->ctx;
> +
> +		mutex_lock(&ctx->mutex);
> +		cpuctx->online = 1;
> +		mutex_unlock(&ctx->mutex);
> +	}
> +	mutex_unlock(&pmus_lock);
> +
> +	return 0;
> +}
> +
>  int perf_event_exit_cpu(unsigned int cpu)
>  {
>  	perf_event_exit_cpu_context(cpu);
> 
> 

Powered by blists - more mailing lists