lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <CAKns5cVijC_o13H7UM7WS2ckexP2y1aYJviqNcKeCE-y_2mcXQ@mail.gmail.com>
Date:   Mon, 16 Jan 2023 16:58:09 +0800
From:   Rick Yiu <rickyiu@...gle.com>
To:     mingo@...hat.com, peterz@...radead.org, rostedt@...dmis.org
Cc:     linux-kernel@...r.kernel.org, Rick Yiu <rickyiu@...gle.com>,
        Wei Wang <wvw@...gle.com>, Quentin Perret <qperret@...gle.com>
Subject: [PATCH] sched: Pass flags to cpufreq governor for RT tasks

Right now only CFS tasks could pass flags to the cpufreq governor
but not RT tasks. This limits the ability of cpufreq governor to handle
RT tasks if it needs to. By passing flags of RT tasks will increase
the flexibility of the cpufreq governor.

Signed-off-by: Rick Yiu <rickyiu@...gle.com>
---
 kernel/sched/rt.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ed2a47e4ddae..6cef87b3d946 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -479,7 +479,7 @@ static inline void rt_queue_push_tasks(struct rq *rq)
 }
 #endif /* CONFIG_SMP */

-static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
+static void enqueue_top_rt_rq(struct rt_rq *rt_rq, unsigned int flags);
 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);

 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
@@ -584,7 +584,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)

  if (rt_rq->rt_nr_running) {
  if (!rt_se)
- enqueue_top_rt_rq(rt_rq);
+ enqueue_top_rt_rq(rt_rq, 0);
  else if (!on_rt_rq(rt_se))
  enqueue_rt_entity(rt_se, 0);

@@ -681,7 +681,7 @@ static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
  if (!rt_rq->rt_nr_running)
  return;

- enqueue_top_rt_rq(rt_rq);
+ enqueue_top_rt_rq(rt_rq, 0);
  resched_curr(rq);
 }

@@ -1102,7 +1102,7 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
 }

 static void
-enqueue_top_rt_rq(struct rt_rq *rt_rq)
+enqueue_top_rt_rq(struct rt_rq *rt_rq, unsigned int flags)
 {
  struct rq *rq = rq_of_rt_rq(rt_rq);

@@ -1120,7 +1120,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
  }

  /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
- cpufreq_update_util(rq, 0);
+ cpufreq_update_util(rq, flags);
 }

 #if defined CONFIG_SMP
@@ -1508,7 +1508,7 @@ static void enqueue_rt_entity(struct
sched_rt_entity *rt_se, unsigned int flags)
  dequeue_rt_stack(rt_se, flags);
  for_each_sched_rt_entity(rt_se)
  __enqueue_rt_entity(rt_se, flags);
- enqueue_top_rt_rq(&rq->rt);
+ enqueue_top_rt_rq(&rq->rt, flags);
 }

 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned
int flags)
@@ -1525,7 +1525,7 @@ static void dequeue_rt_entity(struct
sched_rt_entity *rt_se, unsigned int flags)
  if (rt_rq && rt_rq->rt_nr_running)
  __enqueue_rt_entity(rt_se, flags);
  }
- enqueue_top_rt_rq(&rq->rt);
+ enqueue_top_rt_rq(&rq->rt, flags);
 }

 /*
-- 
2.39.0.314.g84b9a713c41-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ