[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20130627195747.29830.13668.stgit@srivatsabhat.in.ibm.com>
Date: Fri, 28 Jun 2013 01:27:47 +0530
From: "Srivatsa S. Bhat" <srivatsa.bhat@...ux.vnet.ibm.com>
To: tglx@...utronix.de, peterz@...radead.org, tj@...nel.org,
oleg@...hat.com, paulmck@...ux.vnet.ibm.com, rusty@...tcorp.com.au,
mingo@...nel.org, akpm@...ux-foundation.org, namhyung@...nel.org,
walken@...gle.com, vincent.guittot@...aro.org,
laijs@...fujitsu.com, David.Laight@...lab.com
Cc: rostedt@...dmis.org, wangyun@...ux.vnet.ibm.com,
xiaoguangrong@...ux.vnet.ibm.com, sbw@....edu, fweisbec@...il.com,
zhong@...ux.vnet.ibm.com, nikunj@...ux.vnet.ibm.com,
srivatsa.bhat@...ux.vnet.ibm.com, linux-pm@...r.kernel.org,
linux-arch@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
Richard Henderson <rth@...ddle.net>,
Ivan Kokshaysky <ink@...assic.park.msu.ru>,
Matt Turner <mattst88@...il.com>,
Thomas Gleixner <tglx@...utronix.de>,
linux-alpha@...r.kernel.org,
"Srivatsa S. Bhat" <srivatsa.bhat@...ux.vnet.ibm.com>
Subject: [PATCH v3 31/45] alpha/smp: Use get/put_online_cpus_atomic() to
prevent CPU offline
Once stop_machine() is gone from the CPU offline path, we won't be able
to depend on disabling preemption to prevent CPUs from going offline
from under us.
Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going
offline, while invoking from atomic context.
Cc: Richard Henderson <rth@...ddle.net>
Cc: Ivan Kokshaysky <ink@...assic.park.msu.ru>
Cc: Matt Turner <mattst88@...il.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: linux-alpha@...r.kernel.org
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@...ux.vnet.ibm.com>
---
arch/alpha/kernel/smp.c | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 7b60834..e147268 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -497,7 +497,6 @@ smp_cpus_done(unsigned int max_cpus)
((bogosum + 2500) / (5000/HZ)) % 100);
}
-.
void
smp_percpu_timer_interrupt(struct pt_regs *regs)
{
@@ -681,7 +680,7 @@ ipi_flush_tlb_mm(void *x)
void
flush_tlb_mm(struct mm_struct *mm)
{
- preempt_disable();
+ get_online_cpus_atomic();
if (mm == current->active_mm) {
flush_tlb_current(mm);
@@ -693,7 +692,7 @@ flush_tlb_mm(struct mm_struct *mm)
if (mm->context[cpu])
mm->context[cpu] = 0;
}
- preempt_enable();
+ put_online_cpus_atomic();
return;
}
}
@@ -702,7 +701,7 @@ flush_tlb_mm(struct mm_struct *mm)
printk(KERN_CRIT "flush_tlb_mm: timed out\n");
}
- preempt_enable();
+ put_online_cpus_atomic();
}
EXPORT_SYMBOL(flush_tlb_mm);
@@ -730,7 +729,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
struct flush_tlb_page_struct data;
struct mm_struct *mm = vma->vm_mm;
- preempt_disable();
+ get_online_cpus_atomic();
if (mm == current->active_mm) {
flush_tlb_current_page(mm, vma, addr);
@@ -742,7 +741,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
if (mm->context[cpu])
mm->context[cpu] = 0;
}
- preempt_enable();
+ put_online_cpus_atomic();
return;
}
}
@@ -755,7 +754,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
printk(KERN_CRIT "flush_tlb_page: timed out\n");
}
- preempt_enable();
+ put_online_cpus_atomic();
}
EXPORT_SYMBOL(flush_tlb_page);
@@ -786,7 +785,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
if ((vma->vm_flags & VM_EXEC) == 0)
return;
- preempt_disable();
+ get_online_cpus_atomic();
if (mm == current->active_mm) {
__load_new_mm_context(mm);
@@ -798,7 +797,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
if (mm->context[cpu])
mm->context[cpu] = 0;
}
- preempt_enable();
+ put_online_cpus_atomic();
return;
}
}
@@ -807,5 +806,5 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
printk(KERN_CRIT "flush_icache_page: timed out\n");
}
- preempt_enable();
+ put_online_cpus_atomic();
}
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists