lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 8 Jun 2021 15:25:54 +0100
From:   Qais Yousef <qais.yousef@....com>
To:     Xuewen Yan <xuewen.yan94@...il.com>
Cc:     Quentin Perret <qperret@...gle.com>,
        Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Juri Lelli <juri.lelli@...hat.com>,
        Vincent Guittot <vincent.guittot@...aro.org>,
        Dietmar Eggemann <dietmar.eggemann@....com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Benjamin Segall <bsegall@...gle.com>,
        Mel Gorman <mgorman@...e.de>,
        Daniel Bristot de Oliveira <bristot@...hat.com>,
        linux-kernel <linux-kernel@...r.kernel.org>,
        Chunyan Zhang <zhang.lyra@...il.com>,
        Ryan Y <xuewyan@...mail.com>,
        Patrick Bellasi <patrick.bellasi@...bug.net>, tj@...nel.org
Subject: Re: [PATCH] sched/uclamp: Avoid setting cpu.uclamp.min bigger than
 cpu.uclamp.max

On 06/08/21 19:45, Xuewen Yan wrote:
> > Looking at this again now, I better understand what you were trying to say.
> > I got confused that you were still arguing about cgroup inverted
> > cpu.uclamp.min/max, but you're actually talking about something else.
> 
> Generally speaking, this kind of situation does not basically exist,
> but I just consider all the situations that can occur when users use
> it.

+1

> 
> >
> > It would be a lot easier to not cross talk threads and reply to my patch
> > directly with this remark.
> Sorry for the trouble because of my unfamiliar with the maillist, I
> will pay attention next time :)

Not really a problem, it was just a bit confusing to get the right context :)

> > +       uc_min = task_group(p)->uclamp[UCLAMP_MIN].value;
> > +       uc_max = task_group(p)->uclamp[UCLAMP_MAX].value;
> > +       val = uc_req.value;
> > +       uc_req.value = clamp(val, uc_min, uc_max);
> 
> This is not a good solution, because it just clamp the uc_req.value,
> but the  uc_req.bucket_id is not changed.


This is what I actually have now. I did move to using uclamp_se_set().
I also needed to modify uclamp_update_active_tasks() so that both
uclamp_min/max unconditionally.

I still need to sleep on it to make sure I haven't missed something else, but
it looks fine so far.

Thanks!

--
Qais Yousef


--->8---

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9e9a5be35cde..1d2d3e6648a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1403,38 +1403,28 @@ static void uclamp_sync_util_min_rt_default(void)
 static inline struct uclamp_se
 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
 {
-       struct uclamp_se uc_req = p->uclamp_req[clamp_id];
+       /* Copy by value as we could modify it */
+       struct uclamp_se uc_eff = p->uclamp_req[clamp_id];
 #ifdef CONFIG_UCLAMP_TASK_GROUP
+       unsigned long tg_min, tg_max, value;

        /*
         * Tasks in autogroups or root task group will be
         * restricted by system defaults.
         */
        if (task_group_is_autogroup(task_group(p)))
-               return uc_req;
+               return uc_eff;
        if (task_group(p) == &root_task_group)
-               return uc_req;
+               return uc_eff;

-       switch (clamp_id) {
-       case UCLAMP_MIN: {
-               struct uclamp_se uc_min = task_group(p)->uclamp[clamp_id];
-               if (uc_req.value < uc_min.value)
-                       return uc_min;
-               break;
-       }
-       case UCLAMP_MAX: {
-               struct uclamp_se uc_max = task_group(p)->uclamp[clamp_id];
-               if (uc_req.value > uc_max.value)
-                       return uc_max;
-               break;
-       }
-       default:
-               WARN_ON_ONCE(1);
-               break;
-       }
+       tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
+       tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
+       value = uc_eff.value;
+       value = clamp(value, tg_min, tg_max);
+       uclamp_se_set(&uc_eff, value, false);
 #endif

-       return uc_req;
+       return uc_eff;
 }
 
 /*
@@ -1661,8 +1651,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
 
 #ifdef CONFIG_UCLAMP_TASK_GROUP
 static inline void
-uclamp_update_active_tasks(struct cgroup_subsys_state *css,
-                          unsigned int clamps)
+uclamp_update_active_tasks(struct cgroup_subsys_state *css)
 {
        enum uclamp_id clamp_id;
        struct css_task_iter it;
@@ -1670,10 +1659,8 @@ uclamp_update_active_tasks(struct cgroup_subsys_state *css,
 
        css_task_iter_start(css, 0, &it);
        while ((p = css_task_iter_next(&it))) {
-               for_each_clamp_id(clamp_id) {
-                       if ((0x1 << clamp_id) & clamps)
-                               uclamp_update_active(p, clamp_id);
-               }
+               for_each_clamp_id(clamp_id)
+                       uclamp_update_active(p, clamp_id);
        }
        css_task_iter_end(&it);
 }
@@ -9626,7 +9613,7 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
                }
 
                /* Immediately update descendants RUNNABLE tasks */
-               uclamp_update_active_tasks(css, clamps);
+               uclamp_update_active_tasks(css);
        }
 }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ