[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20150508132800.1202fd8c@gandalf.local.home>
Date: Fri, 8 May 2015 13:28:00 -0400
From: Steven Rostedt <rostedt@...dmis.org>
To: pang.xunlei@....com.cn
Cc: Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...il.com>,
linux-kernel@...r.kernel.org, linux-kernel-owner@...r.kernel.org,
Ingo Molnar <mingo@...hat.com>,
Xunlei Pang <pang.xunlei@...aro.org>
Subject: Re: [PATCH v2 1/2] sched/rt: Check to push task away when its
affinity is changed
On Tue, 5 May 2015 23:17:30 +0800
pang.xunlei@....com.cn wrote:
> > Or just do the p->{nr_,}cpus_allowed assignments in
> > set_cpus_allowed_rt() and keep it all in the one callback.
>
> Ok, thanks.
>
> How about this?
This is something more like I had in mind.
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index d13fc13..c995a02 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -4768,11 +4768,15 @@ static struct rq *move_queued_task(struct
> task_struct *p, int new_cpu)
>
> void do_set_cpus_allowed(struct task_struct *p, const struct cpumask
> *new_mask)
> {
> + bool updated = false;
> +
> if (p->sched_class->set_cpus_allowed)
> - p->sched_class->set_cpus_allowed(p, new_mask);
> + updated = p->sched_class->set_cpus_allowed(p, new_mask);
>
> - cpumask_copy(&p->cpus_allowed, new_mask);
> - p->nr_cpus_allowed = cpumask_weight(new_mask);
> + if (!updated) {
> + cpumask_copy(&p->cpus_allowed, new_mask);
> + p->nr_cpus_allowed = cpumask_weight(new_mask);
> + }
I'm fine with this if Peter is.
> }
>
> /*
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index 5e95145..3baffb2 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -1574,7 +1574,7 @@ static void task_woken_dl(struct rq *rq, struct
> task_struct *p)
> }
> }
>
> -static void set_cpus_allowed_dl(struct task_struct *p,
> +static bool set_cpus_allowed_dl(struct task_struct *p,
> const struct cpumask *new_mask)
> {
> struct rq *rq;
> @@ -1610,7 +1610,7 @@ static void set_cpus_allowed_dl(struct task_struct
> *p,
> * it is on the rq AND it is not throttled).
> */
> if (!on_dl_rq(&p->dl))
> - return;
> + return false;
>
I would think DEAD_LINE tasks would need the same "feature".
> weight = cpumask_weight(new_mask);
>
> @@ -1619,7 +1619,7 @@ static void set_cpus_allowed_dl(struct task_struct
> *p,
> * can migrate or not.
> */
> if ((p->nr_cpus_allowed > 1) == (weight > 1))
> - return;
> + return false;
>
> /*
> * The process used to be able to migrate OR it can now migrate
> @@ -1636,6 +1636,8 @@ static void set_cpus_allowed_dl(struct task_struct
> *p,
> }
>
> update_dl_migration(&rq->dl);
> +
> + return false;
> }
>
> /* Assumes rq->lock is held */
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 8885b65..9e7a4bb 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -2241,7 +2241,7 @@ static void task_woken_rt(struct rq *rq, struct
> task_struct *p)
> push_rt_tasks(rq);
> }
>
> -static void set_cpus_allowed_rt(struct task_struct *p,
> +static bool set_cpus_allowed_rt(struct task_struct *p,
> const struct cpumask *new_mask)
> {
> struct rq *rq;
> @@ -2250,18 +2250,18 @@ static void set_cpus_allowed_rt(struct task_struct
> *p,
> BUG_ON(!rt_task(p));
>
> if (!task_on_rq_queued(p))
> - return;
> + return false;
>
> weight = cpumask_weight(new_mask);
>
> + rq = task_rq(p);
> +
> /*
> * Only update if the process changes its state from whether it
> * can migrate or not.
Comment needs updating.
> */
> if ((p->nr_cpus_allowed > 1) == (weight > 1))
> - return;
> -
> - rq = task_rq(p);
> + goto check_push;
>
> /*
> * The process used to be able to migrate OR it can now migrate
> @@ -2278,6 +2278,18 @@ static void set_cpus_allowed_rt(struct task_struct
> *p,
> }
>
> update_rt_migration(&rq->rt);
> +
> +check_push:
> + if (weight > 1 && !task_running(rq, p) &&
> + !cpumask_subset(new_mask, &p->cpus_allowed)) {
> + /* Update new affinity for pushing */
> + cpumask_copy(&p->cpus_allowed, new_mask);
> + p->nr_cpus_allowed = weight;
> + push_rt_tasks(rq);
> + return true;
> + }
> +
> + return false;
> }
>
> /* Assumes rq->lock is held */
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index e0e1299..75f869b 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1189,7 +1189,8 @@ struct sched_class {
> void (*task_waking) (struct task_struct *task);
> void (*task_woken) (struct rq *this_rq, struct task_struct *task);
>
> - void (*set_cpus_allowed)(struct task_struct *p,
> + /* If p's affinity was updated by it, return true. Otherwise false
> */
/* Return true if p's affinity was updated, false otherwise */
-- Steve
> + bool (*set_cpus_allowed)(struct task_struct *p,
> const struct cpumask *newmask);
>
> void (*rq_online)(struct rq *rq);
>
> --------------------------------------------------------
> ZTE Information Security Notice: The information contained in this mail (and any attachment transmitted herewith) is privileged and confidential and is intended for the exclusive use of the addressee(s). If you are not an intended recipient, any disclosure, reproduction, distribution or other dissemination or use of the information contained is strictly prohibited. If you have received this mail in error, please delete it and notify us immediately.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists