lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 19 Mar 2008 14:26:00 -0300
From:	Glauber de Oliveira Costa <gcosta@...hat.com>
To:	linux-kernel@...r.kernel.org
Cc:	akpm@...ux-foundation.org, tglx@...utronix.de, mingo@...e.hu,
	ak@...e.de, Glauber Costa <gcosta@...hat.com>
Subject: [PATCH 65/79] [PATCH] integrate start_secondary

From: Glauber Costa <gcosta@...hat.com>

It now looks the same between architectures, so we
merge it in smpboot.c. Minor differences goes inside
an ifdef

Signed-off-by: Glauber Costa <gcosta@...hat.com>
---
 arch/x86/kernel/smpboot.c    |   86 +++++++++++++++++++++++++++++++++++++++++-
 arch/x86/kernel/smpboot_32.c |   75 ------------------------------------
 arch/x86/kernel/smpboot_64.c |   63 ------------------------------
 3 files changed, 85 insertions(+), 139 deletions(-)

diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 69c1796..a36ae27 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -17,6 +17,7 @@
 #include <asm/tlbflush.h>
 #include <asm/mtrr.h>
 #include <asm/nmi.h>
+#include <asm/vmi.h>
 #include <linux/mc146818rtc.h>
 
 #include <mach_apic.h>
@@ -229,6 +230,90 @@ void __cpuinit smp_callin(void)
 	cpu_set(cpuid, cpu_callin_map);
 }
 
+/*
+ * Activate a secondary processor.
+ */
+void __cpuinit start_secondary(void *unused)
+{
+	/*
+	 * Don't put *anything* before cpu_init(), SMP booting is too
+	 * fragile that we want to limit the things done here to the
+	 * most necessary things.
+	 */
+#ifdef CONFIG_VMI
+	vmi_bringup();
+#endif
+	cpu_init();
+	preempt_disable();
+	smp_callin();
+
+	/* otherwise gcc will move up smp_processor_id before the cpu_init */
+	barrier();
+	/*
+	 * Check TSC synchronization with the BP:
+	 */
+	check_tsc_sync_target();
+
+	if (nmi_watchdog == NMI_IO_APIC) {
+		disable_8259A_irq(0);
+		enable_NMI_through_LVT0();
+		enable_8259A_irq(0);
+	}
+
+	/* This must be done before setting cpu_online_map */
+	set_cpu_sibling_map(raw_smp_processor_id());
+	wmb();
+
+	/*
+	 * We need to hold call_lock, so there is no inconsistency
+	 * between the time smp_call_function() determines number of
+	 * IPI recipients, and the time when the determination is made
+	 * for which cpus receive the IPI. Holding this
+	 * lock helps us to not include this cpu in a currently in progress
+	 * smp_call_function().
+	 */
+	lock_ipi_call_lock();
+#ifdef CONFIG_X86_64
+	spin_lock(&vector_lock);
+
+	/* Setup the per cpu irq handling data structures */
+	__setup_vector_irq(smp_processor_id());
+	/*
+	 * Allow the master to continue.
+	 */
+	spin_unlock(&vector_lock);
+#endif
+	cpu_set(smp_processor_id(), cpu_online_map);
+	unlock_ipi_call_lock();
+	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+
+	setup_secondary_clock();
+
+	wmb();
+	cpu_idle();
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * Everything has been set up for the secondary
+ * CPUs - they just need to reload everything
+ * from the task structure
+ * This function must not return.
+ */
+void __devinit initialize_secondary(void)
+{
+	/*
+	 * We don't actually need to load the full TSS,
+	 * basically just the stack pointer and the ip.
+	 */
+
+	asm volatile(
+		"movl %0,%%esp\n\t"
+		"jmp *%1"
+		:
+		:"m" (current->thread.sp), "m" (current->thread.ip));
+}
+#endif
 
 static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
 {
@@ -533,7 +618,6 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
 }
 #endif	/* WAKE_SECONDARY_VIA_NMI */
 
-extern void start_secondary(void *unused);
 #ifdef WAKE_SECONDARY_VIA_INIT
 static int __devinit
 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index e82eeb2..77b045c 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -80,81 +80,6 @@ extern void unmap_cpu_to_logical_apicid(int cpu);
 /* State of each CPU. */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
-extern void smp_callin(void);
-
-/*
- * Activate a secondary processor.
- */
-void __cpuinit start_secondary(void *unused)
-{
-	/*
-	 * Don't put *anything* before cpu_init(), SMP booting is too
-	 * fragile that we want to limit the things done here to the
-	 * most necessary things.
-	 */
-#ifdef CONFIG_VMI
-	vmi_bringup();
-#endif
-	cpu_init();
-	preempt_disable();
-	smp_callin();
-
-	/* otherwise gcc will move up smp_processor_id before the cpu_init */
-	barrier();
-	/*
-	 * Check TSC synchronization with the BP:
-	 */
-	check_tsc_sync_target();
-
-	if (nmi_watchdog == NMI_IO_APIC) {
-		disable_8259A_irq(0);
-		enable_NMI_through_LVT0();
-		enable_8259A_irq(0);
-	}
-
-	/* This must be done before setting cpu_online_map */
-	set_cpu_sibling_map(raw_smp_processor_id());
-	wmb();
-
-	/*
-	 * We need to hold call_lock, so there is no inconsistency
-	 * between the time smp_call_function() determines number of
-	 * IPI recipients, and the time when the determination is made
-	 * for which cpus receive the IPI. Holding this
-	 * lock helps us to not include this cpu in a currently in progress
-	 * smp_call_function().
-	 */
-	lock_ipi_call_lock();
-	cpu_set(smp_processor_id(), cpu_online_map);
-	unlock_ipi_call_lock();
-	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-
-	setup_secondary_clock();
-
-	wmb();
-	cpu_idle();
-}
-
-/*
- * Everything has been set up for the secondary
- * CPUs - they just need to reload everything
- * from the task structure
- * This function must not return.
- */
-void __devinit initialize_secondary(void)
-{
-	/*
-	 * We don't actually need to load the full TSS,
-	 * basically just the stack pointer and the ip.
-	 */
-
-	asm volatile(
-		"movl %0,%%esp\n\t"
-		"jmp *%1"
-		:
-		:"m" (current->thread.sp),"m" (current->thread.ip));
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 void cpu_exit_clear(void)
 {
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 71f13b1..60cd8cf 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -71,69 +71,6 @@ int smp_threads_ready;
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
-extern void smp_callin(void);
-/*
- * Setup code on secondary processor (after comming out of the trampoline)
- */
-void __cpuinit start_secondary(void)
-{
-	/*
-	 * Dont put anything before smp_callin(), SMP
-	 * booting is too fragile that we want to limit the
-	 * things done here to the most necessary things.
-	 */
-	cpu_init();
-	preempt_disable();
-	smp_callin();
-
-	/* otherwise gcc will move up the smp_processor_id before the cpu_init */
-	barrier();
-
-	/*
-  	 * Check TSC sync first:
- 	 */
-	check_tsc_sync_target();
-
-	if (nmi_watchdog == NMI_IO_APIC) {
-		disable_8259A_irq(0);
-		enable_NMI_through_LVT0();
-		enable_8259A_irq(0);
-	}
-
-	/*
-	 * The sibling maps must be set before turing the online map on for
-	 * this cpu
-	 */
-	set_cpu_sibling_map(smp_processor_id());
-
-	/*
-	 * We need to hold call_lock, so there is no inconsistency
-	 * between the time smp_call_function() determines number of
-	 * IPI recipients, and the time when the determination is made
-	 * for which cpus receive the IPI in genapic_flat.c. Holding this
-	 * lock helps us to not include this cpu in a currently in progress
-	 * smp_call_function().
-	 */
-	lock_ipi_call_lock();
-	spin_lock(&vector_lock);
-
-	/* Setup the per cpu irq handling data structures */
-	__setup_vector_irq(smp_processor_id());
-	/*
-	 * Allow the master to continue.
-	 */
-	spin_unlock(&vector_lock);
-	cpu_set(smp_processor_id(), cpu_online_map);
-	unlock_ipi_call_lock();
-
-	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-
-	setup_secondary_clock();
-
-	wmb();
-	cpu_idle();
-}
-
 cycles_t cacheflush_time;
 unsigned long cache_decay_ticks;
 
-- 
1.5.0.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ