[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20080519170727.6820.45060.stgit@novell1.haskins.net>
Date: Mon, 19 May 2008 13:07:28 -0400
From: Gregory Haskins <ghaskins@...ell.com>
To: mingo@...e.hu, peterz@...radead.org, tglx@...utronix.de,
rostedt@...dmis.org, linux-rt-users@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, bill.huey@...il.com,
dsingleton@...sta.com, dwalker@...sta.com, npiggin@...e.de,
pavel@....cz, acme@...hat.com, sdietrich@...ell.com,
pmorreale@...ell.com, mkohari@...ell.com, ghaskins@...ell.com
Subject: [PATCH 3/8] sched: make task->oncpu available in all configurations
We will use this later in the series to eliminate the need for a function
call.
Signed-off-by: Gregory Haskins <ghaskins@...ell.com>
---
include/linux/sched.h | 2 --
kernel/sched.c | 35 ++++++++++++++++++++++++-----------
2 files changed, 24 insertions(+), 13 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2d38d9e..76a76fd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1080,10 +1080,8 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
#ifdef CONFIG_SMP
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
-#endif
int prio, static_prio, normal_prio;
#ifdef CONFIG_PREEMPT_RCU_BOOST
diff --git a/kernel/sched.c b/kernel/sched.c
index f3e36ee..78e7e8e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -694,18 +694,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
return rq->curr == p;
}
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline int task_running(struct rq *rq, struct task_struct *p)
{
+#ifdef CONFIG_SMP
+ return p->oncpu;
+#else
return task_current(rq, p);
+#endif
}
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
+#ifdef CONFIG_SMP
+ /*
+ * We can optimise this out completely for !SMP, because the
+ * SMP rebalancing from interrupt is the only thing that cares
+ * here.
+ */
+ next->oncpu = 1;
+#endif
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
+#ifdef CONFIG_SMP
+ /*
+ * After ->oncpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ */
+ smp_wmb();
+ prev->oncpu = 0;
+#endif
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
@@ -721,14 +742,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline int task_running(struct rq *rq, struct task_struct *p)
-{
-#ifdef CONFIG_SMP
- return p->oncpu;
-#else
- return task_current(rq, p);
-#endif
-}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
@@ -2077,7 +2090,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
p->oncpu = 0;
#endif
#ifdef CONFIG_PREEMPT
@@ -5679,7 +5692,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
spin_lock_irqsave(&rq->lock, flags);
rq->curr = rq->idle = idle;
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
idle->oncpu = 1;
#endif
spin_unlock_irqrestore(&rq->lock, flags);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists