lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zp5bpLJHlYsZinGj@localhost.localdomain>
Date: Mon, 22 Jul 2024 15:16:20 +0200
From: Frederic Weisbecker <frederic@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: linux-tip-commits@...r.kernel.org,
	Anna-Maria Behnsen <anna-maria@...utronix.de>,
	Thomas Gleixner <tglx@...utronix.de>, x86@...nel.org
Subject: Re: [tip: timers/urgent] timers/migration: Move hierarchy setup into
 cpuhotplug prepare callback

Le Fri, Jul 19, 2024 at 06:06:12PM -0000, tip-bot2 for Anna-Maria Behnsen a écrit :
> @@ -1661,80 +1728,39 @@ static int tmigr_add_cpu(unsigned int cpu)
>  	return ret;
>  }
>  
> -static int tmigr_cpu_online(unsigned int cpu)
> -{
> -	struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
> -	int ret;
> -
> -	/* First online attempt? Initialize CPU data */
> -	if (!tmc->tmgroup) {
> -		raw_spin_lock_init(&tmc->lock);
> -
> -		ret = tmigr_add_cpu(cpu);
> -		if (ret < 0)
> -			return ret;
> -
> -		if (tmc->childmask == 0)
> -			return -EINVAL;
> -
> -		timerqueue_init(&tmc->cpuevt.nextevt);
> -		tmc->cpuevt.nextevt.expires = KTIME_MAX;
> -		tmc->cpuevt.ignore = true;
> -		tmc->cpuevt.cpu = cpu;
> -
> -		tmc->remote = false;
> -		WRITE_ONCE(tmc->wakeup, KTIME_MAX);
> -	}
> -	raw_spin_lock_irq(&tmc->lock);
> -	trace_tmigr_cpu_online(tmc);
> -	tmc->idle = timer_base_is_idle();
> -	if (!tmc->idle)
> -		__tmigr_cpu_activate(tmc);
> -	tmc->online = true;
> -	raw_spin_unlock_irq(&tmc->lock);
> -	return 0;
> -}
> -
> -/*
> - * tmigr_trigger_active() - trigger a CPU to become active again
> - *
> - * This function is executed on a CPU which is part of cpu_online_mask, when the
> - * last active CPU in the hierarchy is offlining. With this, it is ensured that
> - * the other CPU is active and takes over the migrator duty.
> - */
> -static long tmigr_trigger_active(void *unused)
> +static int tmigr_cpu_prepare(unsigned int cpu)
>  {
> -	struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
> -
> -	WARN_ON_ONCE(!tmc->online || tmc->idle);
> +	struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
> +	int ret = 0;
>  
> -	return 0;
> -}
> +	/*
> +	 * The target CPU must never do the prepare work. Otherwise it may
> +	 * spuriously activate the old top level group inside the new one
> +	 * (nevertheless whether old top level group is active or not) and/or
> +	 * release an uninitialized childmask.
> +	 */
> +	WARN_ON_ONCE(cpu == raw_smp_processor_id());

Silly me! Who else than the boot CPU can do the prepare work for the
boot CPU?

This should be something like:

diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index fae04950487f..8d57f7686bb0 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -1673,6 +1673,15 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
 
 		lvllist = &tmigr_level_list[top];
 		if (group->num_children == 1 && list_is_singular(lvllist)) {
+			/*
+			 * The target CPU must never do the prepare work, except
+			 * on early boot when the boot CPU is the target. Otherwise
+			 * it may spuriously activate the old top level group inside
+			 * the new one (nevertheless whether old top level group is
+			 * active or not) and/or release an uninitialized childmask.
+			 */
+			WARN_ON_ONCE(cpu == raw_smp_processor_id());
+
 			lvllist = &tmigr_level_list[top - 1];
 			list_for_each_entry(child, lvllist, list) {
 				if (child->parent)
@@ -1705,14 +1714,6 @@ static int tmigr_cpu_prepare(unsigned int cpu)
 	struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
 	int ret = 0;
 
-	/*
-	 * The target CPU must never do the prepare work. Otherwise it may
-	 * spuriously activate the old top level group inside the new one
-	 * (nevertheless whether old top level group is active or not) and/or
-	 * release an uninitialized childmask.
-	 */
-	WARN_ON_ONCE(cpu == raw_smp_processor_id());
-
 	/* Not first online attempt? */
 	if (tmc->tmgroup)
 		return ret;



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ