[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1255691192.7029.13.camel@marge.simson.net>
Date: Fri, 16 Oct 2009 13:06:32 +0200
From: Mike Galbraith <efault@....de>
To: "Zhang, Yanmin" <yanmin_zhang@...ux.intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>,
LKML <linux-kernel@...r.kernel.org>, Ingo Molnar <mingo@...e.hu>
Subject: Re: hackbench regression with kernel 2.6.32-rc1
On Tue, 2009-10-13 at 11:12 +0800, Zhang, Yanmin wrote:
> NEXT_BUDDY has no help on volanoMark and tbench.
Can you try the patch below please? It does tries to preserve buddy
affinity where possible, and mitigates over-preemption by strengthening
buddies a bit. It improves vmark here by ~7%.
diff --git a/kernel/sched.c b/kernel/sched.c
index 00f9e71..fb025d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2007,8 +2007,12 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
/*
* Buddy candidates are cache hot:
+ *
+ * Do not honor buddies if there may be nothing else to
+ * prevent us from becoming idle.
*/
if (sched_feat(CACHE_HOT_BUDDY) &&
+ task_rq(p)->nr_running >= sched_nr_latency &&
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c32c3e6..428bf55 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -863,18 +863,20 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
struct sched_entity *se = __pick_next_entity(cfs_rq);
struct sched_entity *buddy;
- if (cfs_rq->next) {
+ if (cfs_rq->next && sched_feat(NEXT_BUDDY)) {
buddy = cfs_rq->next;
- cfs_rq->next = NULL;
- if (wakeup_preempt_entity(buddy, se) < 1)
+ if (wakeup_preempt_entity(buddy, se) < 1) {
+ cfs_rq->next = NULL;
return buddy;
+ }
}
- if (cfs_rq->last) {
+ if (cfs_rq->last && sched_feat(LAST_BUDDY)) {
buddy = cfs_rq->last;
- cfs_rq->last = NULL;
- if (wakeup_preempt_entity(buddy, se) < 1)
+ if (wakeup_preempt_entity(buddy, se) < 1) {
+ cfs_rq->last = NULL;
return buddy;
+ }
}
return se;
@@ -1600,9 +1602,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
* Also, during early boot the idle thread is in the fair class, for
* obvious reasons its a bad idea to schedule back to the idle thread.
*/
- if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
+ if (!(wake_flags & WF_FORK) && likely(se->on_rq && curr != rq->idle))
set_last_buddy(se);
- if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
+ if (!(wake_flags & WF_FORK))
set_next_buddy(pse);
/*
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists