[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20190522195249.21168-1-vpillai@digitalocean.com>
Date: Wed, 22 May 2019 19:52:49 +0000
From: Vineeth Remanan Pillai <vpillai@...italocean.com>
To: Phil Auld <pauld@...hat.com>
Cc: Vineeth Remanan Pillai <vpillai@...italocean.com>,
Nishanth Aravamudan <naravamudan@...italocean.com>,
Julien Desfossez <jdesfossez@...italocean.com>,
Peter Zijlstra <peterz@...radead.org>,
Aubrey Li <aubrey.intel@...il.com>, mingo@...nel.org,
tglx@...utronix.de, pjt@...gle.com, torvalds@...ux-foundation.org,
linux-kernel@...r.kernel.org, subhra.mazumdar@...cle.com,
fweisbec@...il.com, keescook@...omium.org, kerrnel@...gle.com,
Aaron Lu <aaron.lwe@...il.com>,
Valentin Schneider <valentin.schneider@....com>,
Mel Gorman <mgorman@...hsingularity.net>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Tim Chen <tim.c.chen@...ux.intel.com>
Subject: Re: [RFC PATCH v2 11/17] sched: Basic tracking of matching tasks
> > I do not have a strong opinion on both. Probably a better approach
> > would be to replace both cpu_prio_less/core_prio_less with prio_less
> > which takes the third arguement 'bool on_same_rq'?
> >
>
> Fwiw, I find the two names easier to read than a boolean flag. Could still
> be wrapped to a single implementation I suppose.
>
> An enum to control cpu or core would be more readable, but probably overkill...
>
I think we can infact remove the boolean altogether and still have a single
function to compare the priority. If tasks are on the same cpu, use the task's
vruntime, else do the normalization.
Thanks,
Vineeth
---
-static inline bool __prio_less(struct task_struct *a, struct task_struct *b, bool core_cmp)
+static inline bool prio_less(struct task_struct *a, struct task_struct *b)
{
- u64 vruntime;
int pa = __task_prio(a), pb = __task_prio(b);
@@ -119,25 +105,21 @@ static inline bool __prio_less(struct task_struct *a, struct task_struct *b, boo
if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
return !dl_time_before(a->dl.deadline, b->dl.deadline);
- vruntime = b->se.vruntime;
- if (core_cmp) {
- vruntime -= task_cfs_rq(b)->min_vruntime;
- vruntime += task_cfs_rq(a)->min_vruntime;
- }
- if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
- return !((s64)(a->se.vruntime - vruntime) <= 0);
+ if (pa == MAX_RT_PRIO + MAX_NICE) { /* fair */
+ u64 vruntime = b->se.vruntime;
- return false;
-}
+ /*
+ * Normalize the vruntime if tasks are in different cpus.
+ */
+ if (task_cpu(a) != task_cpu(b)) {
+ vruntime -= task_cfs_rq(b)->min_vruntime;
+ vruntime += task_cfs_rq(a)->min_vruntime;
+ }
-static inline bool cpu_prio_less(struct task_struct *a, struct task_struct *b)
-{
- return __prio_less(a, b, false);
-}
+ return !((s64)(a->se.vruntime - vruntime) <= 0);
+ }
-static inline bool core_prio_less(struct task_struct *a, struct task_struct *b)
-{
- return __prio_less(a, b, true);
+ return false;
}
static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
@@ -149,7 +131,7 @@ static inline bool __sched_core_less(struct task_struct *a, struct task_struct *
return false;
/* flip prio, so high prio is leftmost */
- if (cpu_prio_less(b, a))
+ if (prio_less(b, a))
return true;
return false;
@@ -3621,7 +3603,7 @@ pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *ma
* higher priority than max.
*/
if (max && class_pick->core_cookie &&
- core_prio_less(class_pick, max))
+ prio_less(class_pick, max))
return idle_sched_class.pick_task(rq);
return class_pick;
@@ -3640,8 +3622,8 @@ pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *ma
* the core (so far) and it must be selected, otherwise we must go with
* the cookie pick in order to satisfy the constraint.
*/
- if (cpu_prio_less(cookie_pick, class_pick) &&
- (!max || core_prio_less(max, class_pick)))
+ if (prio_less(cookie_pick, class_pick) &&
+ (!max || prio_less(max, class_pick)))
return class_pick;
return cookie_pick;
Powered by blists - more mailing lists