lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20120601091148.31979.53116.stgit@srivatsabhat.in.ibm.com>
Date:	Fri, 01 Jun 2012 14:41:56 +0530
From:	"Srivatsa S. Bhat" <srivatsa.bhat@...ux.vnet.ibm.com>
To:	tglx@...utronix.de, peterz@...radead.org,
	paulmck@...ux.vnet.ibm.com
Cc:	rusty@...tcorp.com.au, mingo@...nel.org, yong.zhang0@...il.com,
	akpm@...ux-foundation.org, vatsa@...ux.vnet.ibm.com, rjw@...k.pl,
	linux-arch@...r.kernel.org, linux-kernel@...r.kernel.org,
	srivatsa.bhat@...ux.vnet.ibm.com, nikunj@...ux.vnet.ibm.com,
	"Nikunj A. Dadhania" <nikunj@...ux.vnet.ibm.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
	Suresh Siddha <suresh.b.siddha@...el.com>,
	Joerg Roedel <joerg.roedel@....com>,
	Yinghai Lu <yinghai@...nel.org>,
	Naga Chumbalkar <nagananda.chumbalkar@...com>,
	Don Zickus <dzickus@...hat.com>,
	Paul Gortmaker <paul.gortmaker@...driver.com>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Subject: [PATCH 07/27] x86, smpboot: Use generic SMP booting infrastructure

From: Nikunj A. Dadhania <nikunj@...ux.vnet.ibm.com>

Convert x86 to use the generic framework to boot secondary CPUs.

Notes:
1. x86 manipulates the cpu_online_mask under vector_lock. So, while
converting over to the generic smp booting code, override arch_vector_lock()
and arch_vector_unlock() to lock_vector_lock() and unlock_vector_lock()
respectively.

2. In smp_callin(), we allow the master to continue as soon as the physical
booting of the secondary processor is done. That is, we don't wait till the
CPU_STARTING notifications are sent.

Implications:
 - This does not alter the order in which the notifications are sent (i.e.,
   still CPU_STARTING is followed by CPU_ONLINE) because the master waits till
   the new cpu is set in the cpu_online_mask before returning to generic code.

 - This approach is better because of 2 reasons:
   a. It makes more sense: the master has a timeout for waiting on the
      cpu_callin_map - which means we should report back as soon as possible.
      The whole idea of having a timeout is to estimate the maximum time that
      could be taken for physical booting. This approach separates out the
      physical booting vs running CPU hotplug callbacks and reports back to
      the master as soon as physical booting is done.

   b. Because we send out CPU_STARTING notifications *after* reporting to the
      master, we don't risk the chance of the master wrongly concluding a boot
      failure if we happen to add more callbacks to the CPU_STARTING
      notification.

Signed-off-by: Nikunj A. Dadhania <nikunj@...ux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: x86@...nel.org
Cc: Suresh Siddha <suresh.b.siddha@...el.com>
Cc: Joerg Roedel <joerg.roedel@....com>
Cc: Yinghai Lu <yinghai@...nel.org>
Cc: Naga Chumbalkar <nagananda.chumbalkar@...com>
Cc: Don Zickus <dzickus@...hat.com>
Cc: Paul Gortmaker <paul.gortmaker@...driver.com>
Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@...ux.vnet.ibm.com>
---

 arch/x86/include/asm/smp.h     |    3 +++
 arch/x86/kernel/apic/io_apic.c |   15 +++++++++++++++
 arch/x86/kernel/smp.c          |    4 ++++
 arch/x86/kernel/smpboot.c      |   39 ++++++++++++++-------------------------
 4 files changed, 36 insertions(+), 25 deletions(-)

diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index ac1f3eb..b081b90 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -176,6 +176,9 @@ void cpu_disable_common(void);
 void native_smp_prepare_boot_cpu(void);
 void native_smp_prepare_cpus(unsigned int max_cpus);
 void native_smp_cpus_done(unsigned int max_cpus);
+void native_cpu_pre_starting(void *arg);
+void native_cpu_pre_online(void *arg);
+void native_cpu_post_online(void *arg);
 int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
 int native_cpu_disable(void);
 void native_cpu_die(unsigned int cpu);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ac96561..a7d0037 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1084,6 +1084,21 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
 }
 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
 
+/*
+ * We need to hold vector_lock while manipulating cpu_online_mask so that the
+ * set of online cpus does not change while we are assigning vectors to cpus.
+ * Holding this lock ensures we don't half assign or remove an irq from a cpu.
+ */
+void arch_vector_lock(void)
+{
+	lock_vector_lock();
+}
+
+void arch_vector_unlock(void)
+{
+	unlock_vector_lock();
+}
+
 void lock_vector_lock(void)
 {
 	/* Used to the online set of cpus does not change
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 48d2b7d..4a9748e 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -293,6 +293,10 @@ struct smp_ops smp_ops = {
 	.stop_other_cpus	= native_stop_other_cpus,
 	.smp_send_reschedule	= native_smp_send_reschedule,
 
+	.cpu_pre_starting	= native_cpu_pre_starting,
+	.cpu_pre_online		= native_cpu_pre_online,
+	.cpu_post_online	= native_cpu_post_online,
+
 	.cpu_up			= native_cpu_up,
 	.cpu_die		= native_cpu_die,
 	.cpu_disable		= native_cpu_disable,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 269bc1f..202be43 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -189,7 +189,7 @@ static void __cpuinit smp_callin(void)
 	/*
 	 * Need to setup vector mappings before we enable interrupts.
 	 */
-	setup_vector_irq(smp_processor_id());
+	setup_vector_irq(cpuid);
 
 	/*
 	 * Save our processor parameters. Note: this information
@@ -211,14 +211,10 @@ static void __cpuinit smp_callin(void)
 	 * This must be done before setting cpu_online_mask
 	 * or calling notify_cpu_starting.
 	 */
-	set_cpu_sibling_map(raw_smp_processor_id());
+	set_cpu_sibling_map(cpuid);
 	wmb();
 
-	notify_cpu_starting(cpuid);
-
-	/*
-	 * Allow the master to continue.
-	 */
+	/* Allow the master to continue. */
 	cpumask_set_cpu(cpuid, cpu_callin_mask);
 }
 
@@ -227,6 +223,11 @@ static void __cpuinit smp_callin(void)
  */
 notrace static void __cpuinit start_secondary(void *unused)
 {
+	smpboot_start_secondary(unused);
+}
+
+void __cpuinit native_cpu_pre_starting(void *unused)
+{
 	/*
 	 * Don't put *anything* before cpu_init(), SMP booting is too
 	 * fragile that we want to limit the things done here to the
@@ -234,43 +235,31 @@ notrace static void __cpuinit start_secondary(void *unused)
 	 */
 	cpu_init();
 	x86_cpuinit.early_percpu_clock_init();
-	preempt_disable();
 	smp_callin();
+}
 
+void __cpuinit native_cpu_pre_online(void *unused)
+{
 #ifdef CONFIG_X86_32
 	/* switch away from the initial page table */
 	load_cr3(swapper_pg_dir);
 	__flush_tlb_all();
 #endif
 
-	/* otherwise gcc will move up smp_processor_id before the cpu_init */
-	barrier();
 	/*
 	 * Check TSC synchronization with the BP:
 	 */
 	check_tsc_sync_target();
+}
 
-	/*
-	 * We need to hold vector_lock so there the set of online cpus
-	 * does not change while we are assigning vectors to cpus.  Holding
-	 * this lock ensures we don't half assign or remove an irq from a cpu.
-	 */
-	lock_vector_lock();
-	set_cpu_online(smp_processor_id(), true);
-	unlock_vector_lock();
-	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+void __cpuinit native_cpu_post_online(void *unused)
+{
 	x86_platform.nmi_init();
 
-	/* enable local interrupts */
-	local_irq_enable();
-
 	/* to prevent fake stack check failure in clock setup */
 	boot_init_stack_canary();
 
 	x86_cpuinit.setup_percpu_clockev();
-
-	wmb();
-	cpu_idle();
 }
 
 /*

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ