From: chirag@linux.vnet.ibm.com Chirag Jog To: j.mell@t-online.de J?rgen Mell, tglx@linutronix.de Thomas Gleixner Date: Wed, 30 Jul 2008 19:18:42 +0200 Subject: [PATCH] Fix Bug messages This patch should solve some of the bug messages. It does two things: 1. Change rt_runtime_lock to be a raw spinlock as the comment above it says: it is nested inside the rq lock. 2. Change mnt_writers to be a per_cpu locked variable. This eliminates the need for the codepath to disable preemption and then potentially sleep, leading to the BUG messages Signed-off-by: Chirag [updated Chirag's patch against 2.6.26.3-rt3] Signed-off-by: John Kacur Index: linux-2.6.26.3/fs/namespace.c =================================================================== --- linux-2.6.26.3.orig/fs/namespace.c +++ linux-2.6.26.3/fs/namespace.c @@ -178,13 +178,13 @@ struct mnt_writer { unsigned long count; struct vfsmount *mnt; } ____cacheline_aligned_in_smp; -static DEFINE_PER_CPU(struct mnt_writer, mnt_writers); +static DEFINE_PER_CPU_LOCKED(struct mnt_writer, mnt_writers); static int __init init_mnt_writers(void) { int cpu; for_each_possible_cpu(cpu) { - struct mnt_writer *writer = &per_cpu(mnt_writers, cpu); + struct mnt_writer *writer = &per_cpu_var_locked(mnt_writers, cpu); spin_lock_init(&writer->lock); lockdep_set_class(&writer->lock, &writer->lock_class); writer->count = 0; @@ -199,7 +199,7 @@ static void unlock_mnt_writers(void) struct mnt_writer *cpu_writer; for_each_possible_cpu(cpu) { - cpu_writer = &per_cpu(mnt_writers, cpu); + cpu_writer = &per_cpu_var_locked(mnt_writers, cpu); spin_unlock(&cpu_writer->lock); } } @@ -251,8 +251,8 @@ int mnt_want_write(struct vfsmount *mnt) { int ret = 0; struct mnt_writer *cpu_writer; - - cpu_writer = &get_cpu_var(mnt_writers); + int cpu = 0; + cpu_writer = &get_cpu_var_locked(mnt_writers, &cpu); spin_lock(&cpu_writer->lock); if (__mnt_is_readonly(mnt)) { ret = -EROFS; @@ -262,7 +262,7 @@ int mnt_want_write(struct vfsmount *mnt) cpu_writer->count++; out: spin_unlock(&cpu_writer->lock); - put_cpu_var(mnt_writers); + put_cpu_var_locked(mnt_writers, cpu); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); @@ -273,7 +273,7 @@ static void lock_mnt_writers(void) struct mnt_writer *cpu_writer; for_each_possible_cpu(cpu) { - cpu_writer = &per_cpu(mnt_writers, cpu); + cpu_writer = &per_cpu_var_locked(mnt_writers, cpu); spin_lock(&cpu_writer->lock); __clear_mnt_count(cpu_writer); cpu_writer->mnt = NULL; @@ -332,8 +332,8 @@ void mnt_drop_write(struct vfsmount *mnt { int must_check_underflow = 0; struct mnt_writer *cpu_writer; - - cpu_writer = &get_cpu_var(mnt_writers); + int cpu = 0; + cpu_writer = &get_cpu_var_locked(mnt_writers, &cpu); spin_lock(&cpu_writer->lock); use_cpu_writer_for_mount(cpu_writer, mnt); @@ -360,7 +360,7 @@ void mnt_drop_write(struct vfsmount *mnt * __mnt_writers can underflow. Without it, * we could theoretically wrap __mnt_writers. */ - put_cpu_var(mnt_writers); + put_cpu_var_locked(mnt_writers, cpu); } EXPORT_SYMBOL_GPL(mnt_drop_write); @@ -612,7 +612,7 @@ static inline void __mntput(struct vfsmo * can come in. */ for_each_possible_cpu(cpu) { - struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu); + struct mnt_writer *cpu_writer = &per_cpu_var_locked(mnt_writers, cpu); if (cpu_writer->mnt != mnt) continue; spin_lock(&cpu_writer->lock);