Impact: reduce memory usage, use alloc_percpu(). Instead of allocating an nr_cpu_ids array, most places should be using percpu_alloc(). This doesn't waste space if cpu numbers aren't contiguous. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/kernel/cpu/mcheck/mce_64.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) --- linux-2.6-for-ingo.orig/arch/x86/kernel/cpu/mcheck/mce_64.c +++ linux-2.6-for-ingo/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -568,7 +568,7 @@ static void collect_tscs(void *data) { unsigned long *cpu_tsc = (unsigned long *)data; - rdtscll(cpu_tsc[smp_processor_id()]); + rdtscll(*per_cpu_ptr(cpu_tsc, smp_processor_id())); } static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, @@ -580,7 +580,7 @@ static ssize_t mce_read(struct file *fil char __user *buf = ubuf; int i, err; - cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); + cpu_tsc = alloc_percpu(unsigned long); if (!cpu_tsc) return -ENOMEM; @@ -590,7 +590,7 @@ static ssize_t mce_read(struct file *fil /* Only supports full reads right now */ if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { mutex_unlock(&mce_read_mutex); - kfree(cpu_tsc); + free_percpu(cpu_tsc); return -EINVAL; } @@ -624,7 +624,8 @@ static ssize_t mce_read(struct file *fil on_each_cpu(collect_tscs, cpu_tsc, 1); for (i = next; i < MCE_LOG_LEN; i++) { if (mcelog.entry[i].finished && - mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { + mcelog.entry[i].tsc < *per_cpu_ptr(cpu_tsc, + mcelog.entry[i].cpu)) { err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce)); smp_rmb(); @@ -633,7 +634,7 @@ static ssize_t mce_read(struct file *fil } } mutex_unlock(&mce_read_mutex); - kfree(cpu_tsc); + free_percpu(cpu_tsc); return err ? -EFAULT : buf - ubuf; } -- -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/