Subject: i386: fix voyager build This adds an smp_ops for voyager, and hooks things up appropriately. This is the first baby-step to making subarch runtime switchable. Signed-off-by: Jeremy Fitzhardinge Cc: James Bottomley Cc: Eric W. Biederman --- arch/i386/mach-voyager/voyager_smp.c | 73 ++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 29 deletions(-) =================================================================== --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -28,7 +28,6 @@ #include #include #include -#include /* TLB state -- visible externally, indexed physically */ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; @@ -423,7 +422,7 @@ find_smp_config(void) VOYAGER_SUS_IN_CONTROL_PORT); current_thread_info()->cpu = boot_cpu_id; - write_pda(cpu_number, boot_cpu_id); + x86_write_percpu(cpu_number, boot_cpu_id); } /* @@ -436,7 +435,7 @@ smp_store_cpu_info(int id) *c = boot_cpu_data; - identify_cpu(c); + identify_secondary_cpu(c); } /* set up the trampoline and return the physical address of the code */ @@ -460,7 +459,7 @@ start_secondary(void *unused) /* external functions not defined in the headers */ extern void calibrate_delay(void); - secondary_cpu_init(); + cpu_init(); /* OK, we're in the routine */ ack_CPI(VIC_CPU_BOOT_CPI); @@ -580,7 +579,9 @@ do_boot_cpu(__u8 cpu) /* init_tasks (in sched.c) is indexed logically */ stack_start.esp = (void *) idle->thread.esp; - init_gdt(cpu, idle); + init_gdt(cpu); + per_cpu(current_task, cpu) = idle; + early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); irq_ctx_init(cpu); /* Note: Don't modify initial ss override */ @@ -884,8 +885,8 @@ smp_invalidate_interrupt(void) /* This routine is called with a physical cpu mask */ static void -flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, - unsigned long va) +voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, + unsigned long va) { int stuck = 50000; @@ -937,7 +938,7 @@ flush_tlb_current_task(void) cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); local_flush_tlb(); if (cpu_mask) - flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); preempt_enable(); } @@ -959,7 +960,7 @@ flush_tlb_mm (struct mm_struct * mm) leave_mm(smp_processor_id()); } if (cpu_mask) - flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); preempt_enable(); } @@ -980,7 +981,7 @@ void flush_tlb_page(struct vm_area_struc } if (cpu_mask) - flush_tlb_others(cpu_mask, mm, va); + voyager_flush_tlb_others(cpu_mask, mm, va); preempt_enable(); } @@ -1076,12 +1077,13 @@ smp_call_function_interrupt(void) [RETURNS] 0 on success, else a negative status code. Does not return until remote CPUs are nearly ready to execute <> or are or have executed. */ -int -smp_call_function (void (*func) (void *info), void *info, int retry, - int wait) +static int +voyager_smp_call_function_mask (cpumask_t cpumask, + void (*func) (void *info), void *info, + int wait) { struct call_data_struct data; - __u32 mask = cpus_addr(cpu_online_map)[0]; + u32 mask = cpumask.bits[0]; mask &= ~(1<cpu = hard_smp_processor_id(); - write_pda(cpu_number, hard_smp_processor_id()); -} + x86_write_percpu(cpu_number, hard_smp_processor_id()); +} + +struct smp_ops smp_ops = { + .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, + .smp_prepare_cpus = voyager_smp_prepare_cpus, + .cpu_up = voyager_cpu_up, + .smp_cpus_done = voyager_smp_cpus_done, + + .smp_send_stop = voyager_smp_send_stop, + .smp_send_reschedule = voyager_smp_send_reschedule, + .smp_call_function_mask = voyager_smp_call_function_mask, +};