lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20120906112739.13320.53090.stgit@kvmdev>
Date:	Thu, 06 Sep 2012 20:27:40 +0900
From:	Tomoki Sekiyama <tomoki.sekiyama.qu@...achi.com>
To:	kvm@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, x86@...nel.org,
	yrl.pp-manager.tt@...achi.com,
	Tomoki Sekiyama <tomoki.sekiyama.qu@...achi.com>,
	Avi Kivity <avi@...hat.com>,
	Marcelo Tosatti <mtosatti@...hat.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H. Peter Anvin" <hpa@...or.com>
Subject: [RFC v2 PATCH 04/21] x86: Avoid RCU warnings on slave CPUs

Initialize rcu related variables to avoid warnings about RCU usage while
slave CPUs is running specified functions. Also notify RCU subsystem before
the slave CPU is entered into idle state.

Signed-off-by: Tomoki Sekiyama <tomoki.sekiyama.qu@...achi.com>
Cc: Avi Kivity <avi@...hat.com>
Cc: Marcelo Tosatti <mtosatti@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
---

 arch/x86/kernel/smpboot.c |    4 ++++
 kernel/rcutree.c          |   14 ++++++++++++++
 2 files changed, 18 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e8cfe377..45dfc1d 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -382,6 +382,8 @@ notrace static void __cpuinit start_slave_cpu(void *unused)
 		f = per_cpu(slave_cpu_func, cpu);
 		per_cpu(slave_cpu_func, cpu).func = NULL;
 
+		rcu_note_context_switch(cpu);
+
 		if (!f.func) {
 			native_safe_halt();
 			continue;
@@ -1005,6 +1007,8 @@ int __cpuinit slave_cpu_up(unsigned int cpu)
 	if (IS_ERR(idle))
 		return PTR_ERR(idle);
 
+	slave_cpu_notify(CPU_SLAVE_UP_PREPARE, cpu);
+
 	ret = __native_cpu_up(cpu, idle, 1);
 
 	cpu_maps_update_done();
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f280e54..31a7c8c 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -2589,6 +2589,9 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
 	switch (action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
+#ifdef CONFIG_SLAVE_CPU
+	case CPU_SLAVE_UP_PREPARE:
+#endif
 		rcu_prepare_cpu(cpu);
 		rcu_prepare_kthreads(cpu);
 		break;
@@ -2603,6 +2606,9 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
 		break;
 	case CPU_DYING:
 	case CPU_DYING_FROZEN:
+#ifdef CONFIG_SLAVE_CPU
+	case CPU_SLAVE_DYING:
+#endif
 		/*
 		 * The whole machine is "stopped" except this CPU, so we can
 		 * touch any data without introducing corruption. We send the
@@ -2616,6 +2622,9 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
 	case CPU_DEAD_FROZEN:
 	case CPU_UP_CANCELED:
 	case CPU_UP_CANCELED_FROZEN:
+#ifdef CONFIG_SLAVE_CPU
+	case CPU_SLAVE_DEAD:
+#endif
 		for_each_rcu_flavor(rsp)
 			rcu_cleanup_dead_cpu(cpu, rsp);
 		break;
@@ -2797,6 +2806,10 @@ static void __init rcu_init_geometry(void)
 	rcu_num_nodes -= n;
 }
 
+static struct notifier_block __cpuinitdata rcu_slave_nb = {
+	.notifier_call = rcu_cpu_notify,
+};
+
 void __init rcu_init(void)
 {
 	int cpu;
@@ -2814,6 +2827,7 @@ void __init rcu_init(void)
 	 * or the scheduler are operational.
 	 */
 	cpu_notifier(rcu_cpu_notify, 0);
+	register_slave_cpu_notifier(&rcu_slave_nb);
 	for_each_online_cpu(cpu)
 		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
 	check_cpu_stall_init();


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ