Index: kvm/arch/i386/kernel/smp.c =================================================================== --- kvm.orig/arch/i386/kernel/smp.c 2007-08-24 17:09:46.000000000 +0200 +++ kvm/arch/i386/kernel/smp.c 2007-08-24 17:09:48.000000000 +0200 @@ -705,3 +705,10 @@ struct smp_ops smp_ops = { .smp_send_reschedule = native_smp_send_reschedule, .smp_call_function_mask = native_smp_call_function_mask, }; + +int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), + void *info, int wait) +{ + return smp_ops.smp_call_function_mask(mask, func, info, wait); +} +EXPORT_SYMBOL(smp_call_function_mask); Index: kvm/include/asm-i386/smp.h =================================================================== --- kvm.orig/include/asm-i386/smp.h 2007-08-24 17:09:46.000000000 +0200 +++ kvm/include/asm-i386/smp.h 2007-08-24 17:10:34.000000000 +0200 @@ -92,12 +92,9 @@ static inline void smp_send_reschedule(i { smp_ops.smp_send_reschedule(cpu); } -static inline int smp_call_function_mask(cpumask_t mask, - void (*func) (void *info), void *info, - int wait) -{ - return smp_ops.smp_call_function_mask(mask, func, info, wait); -} +extern int smp_call_function_mask(cpumask_t mask, + void (*func) (void *info), void *info, + int wait); void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus);