[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1396855830.28539.10.camel@marge.simpson.net>
Date: Mon, 07 Apr 2014 09:30:30 +0200
From: Mike Galbraith <bitbucket@...ine.de>
To: Sasha Levin <sasha.levin@...cle.com>
Cc: mingo@...nel.org, hpa@...or.com, linux-kernel@...r.kernel.org,
torvalds@...ux-foundation.org, peterz@...radead.org,
mgorman@...e.com, akpm@...ux-foundation.org, tglx@...utronix.de,
linux-tip-commits@...r.kernel.org, Dave Jones <davej@...hat.com>
Subject: Re: [tip:sched/core] sched/numa: Move task_numa_free() to
__put_task_struct()
On Mon, 2014-04-07 at 07:29 +0200, Mike Galbraith wrote:
> I'm not getting it.
>
> I moved task_numa_free() from one interrupts enabled spot to another.
> But, with numa=fake=4 and lockdep enabled, not only does lockdep gripe,
> my little box locks up on splat. Saying spin_lock/unlock_irq() did the
> expected, just moved lockdep gripe to task_numa_fault().
>
>
> > [ 2590.270067] Possible interrupt unsafe locking scenario:
> > [ 2590.270067]
> > [ 2590.270067] CPU0 CPU1
> > [ 2590.270067] ---- ----
> > [ 2590.270067] lock(&(&grp->lock)->rlock);
> > [ 2590.270067] local_irq_disable();
> > [ 2590.270067] lock(&(&new_timer->it_lock)->rlock);
> > [ 2590.270067] lock(&(&grp->lock)->rlock);
> > [ 2590.270067] <Interrupt>
> > [ 2590.270067] lock(&(&new_timer->it_lock)->rlock);
> > [ 2590.270067]
> > [ 2590.270067] *** DEADLOCK ***
>
> Ok, so how did I manage that HARDIRQ-safe -> HARDIRQ-unsafe?
Think I'll turn lockdep off, and make context switches take a good long
while after finish_lock_switch(), but meanwhile, this made it happy.
Sasha reports that lockdep claims 156654f491dd8d52687a5fbe1637f472a52ce75b made
numa_group.lock interrupt unsafe. While I don't see how that could be given the
commit in question moved task_numa_free() from one irq enabled region to another,
the below does make both gripes and lockup upon gripe with numa=fake=4 go away.
Reported-by: Sasha Levin <sasha.levin@...cle.com>
Not-signed-off-by: Mike Galbraith <bitbucket@...ine.de>
---
kernel/sched/fair.c | 12 +++++++-----
kernel/sched/sched.h | 9 +++++++++
2 files changed, 16 insertions(+), 5 deletions(-)
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1497,7 +1497,7 @@ static void task_numa_placement(struct t
/* If the task is part of a group prevent parallel updates to group stats */
if (p->numa_group) {
group_lock = &p->numa_group->lock;
- spin_lock(group_lock);
+ spin_lock_irq(group_lock);
}
/* Find the node with the highest number of faults */
@@ -1572,7 +1572,7 @@ static void task_numa_placement(struct t
}
}
- spin_unlock(group_lock);
+ spin_unlock_irq(group_lock);
}
/* Preferred node as the node with the most faults */
@@ -1677,7 +1677,8 @@ static void task_numa_group(struct task_
if (!join)
return;
- double_lock(&my_grp->lock, &grp->lock);
+ BUG_ON(irqs_disabled());
+ double_lock_irq(&my_grp->lock, &grp->lock);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
my_grp->faults[i] -= p->numa_faults_memory[i];
@@ -1692,6 +1693,7 @@ static void task_numa_group(struct task_
spin_unlock(&my_grp->lock);
spin_unlock(&grp->lock);
+ local_irq_enable();
rcu_assign_pointer(p->numa_group, grp);
@@ -1710,14 +1712,14 @@ void task_numa_free(struct task_struct *
void *numa_faults = p->numa_faults_memory;
if (grp) {
- spin_lock(&grp->lock);
+ spin_lock_irq(&grp->lock);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
grp->faults[i] -= p->numa_faults_memory[i];
grp->total_faults -= p->total_numa_faults;
list_del(&p->numa_entry);
grp->nr_tasks--;
- spin_unlock(&grp->lock);
+ spin_unlock_irq(&grp->lock);
rcu_assign_pointer(p->numa_group, NULL);
put_numa_group(grp);
}
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1388,6 +1388,15 @@ static inline void double_lock(spinlock_
spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
}
+static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
+{
+ if (l1 > l2)
+ swap(l1, l2);
+
+ spin_lock_irq(l1);
+ spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
+}
+
static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
{
if (l1 > l2)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists