[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241110025023.664487-2-longman@redhat.com>
Date: Sat, 9 Nov 2024 21:50:21 -0500
From: Waiman Long <longman@...hat.com>
To: Tejun Heo <tj@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Michal Koutný <mkoutny@...e.com>
Cc: cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org,
Juri Lelli <juri.lelli@...hat.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH 1/3] cgroup/cpuset: Revert "Allow suppression of sched domain rebuild in update_cpumasks_hier()"
Revert commit 3ae0b773211e ("cgroup/cpuset: Allow suppression of sched
domain rebuild in update_cpumasks_hier()") to allow for an alternative
way to suppress unnecessary rebuild_sched_domains_locked() calls in
update_cpumasks_hier() and elsewhere in a following commit.
Signed-off-by: Waiman Long <longman@...hat.com>
---
kernel/cgroup/cpuset.c | 39 ++++++++++++++-------------------------
1 file changed, 14 insertions(+), 25 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index a4dd285cdf39..565280193922 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1922,12 +1922,6 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
rcu_read_unlock();
}
-/*
- * update_cpumasks_hier() flags
- */
-#define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
-#define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
-
/*
* update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
* @cs: the cpuset to consider
@@ -1942,7 +1936,7 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
* Called with cpuset_mutex held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
- int flags)
+ bool force)
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
@@ -2007,10 +2001,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
* Skip the whole subtree if
* 1) the cpumask remains the same,
* 2) has no partition root state,
- * 3) HIER_CHECKALL flag not set, and
+ * 3) force flag not set, and
* 4) for v2 load balance state same as its parent.
*/
- if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
+ if (!cp->partition_root_state && !force &&
cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
(is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
@@ -2112,8 +2106,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
}
rcu_read_unlock();
- if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) &&
- !force_sd_rebuild)
+ if (need_rebuild_sched_domains && !force_sd_rebuild)
rebuild_sched_domains_locked();
}
@@ -2141,9 +2134,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
* directly.
*
* The update_cpumasks_hier() function may sleep. So we have to
- * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
- * flag is used to suppress rebuild of sched domains as the callers
- * will take care of that.
+ * release the RCU read lock before calling it.
*/
rcu_read_lock();
cpuset_for_each_child(sibling, pos_css, parent) {
@@ -2159,7 +2150,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
continue;
rcu_read_unlock();
- update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
+ update_cpumasks_hier(sibling, tmp, false);
rcu_read_lock();
css_put(&sibling->css);
}
@@ -2179,7 +2170,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
struct tmpmasks tmp;
struct cpuset *parent = parent_cs(cs);
bool invalidate = false;
- int hier_flags = 0;
+ bool force = false;
int old_prs = cs->partition_root_state;
/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
@@ -2240,8 +2231,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* Check all the descendants in update_cpumasks_hier() if
* effective_xcpus is to be changed.
*/
- if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
- hier_flags = HIER_CHECKALL;
+ force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
retval = validate_change(cs, trialcs);
@@ -2309,7 +2299,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
spin_unlock_irq(&callback_lock);
/* effective_cpus/effective_xcpus will be updated here */
- update_cpumasks_hier(cs, &tmp, hier_flags);
+ update_cpumasks_hier(cs, &tmp, force);
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
if (cs->partition_root_state)
@@ -2334,7 +2324,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
struct tmpmasks tmp;
struct cpuset *parent = parent_cs(cs);
bool invalidate = false;
- int hier_flags = 0;
+ bool force = false;
int old_prs = cs->partition_root_state;
if (!*buf) {
@@ -2357,8 +2347,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* Check all the descendants in update_cpumasks_hier() if
* effective_xcpus is to be changed.
*/
- if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
- hier_flags = HIER_CHECKALL;
+ force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
retval = validate_change(cs, trialcs);
if (retval)
@@ -2411,8 +2400,8 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* of the subtree when it is a valid partition root or effective_xcpus
* is updated.
*/
- if (is_partition_valid(cs) || hier_flags)
- update_cpumasks_hier(cs, &tmp, hier_flags);
+ if (is_partition_valid(cs) || force)
+ update_cpumasks_hier(cs, &tmp, force);
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
if (cs->partition_root_state)
@@ -2853,7 +2842,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
update_unbound_workqueue_cpumask(new_xcpus_state);
/* Force update if switching back to member */
- update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
+ update_cpumasks_hier(cs, &tmpmask, !new_prs);
/* Update sched domains and load balance flag */
update_partition_sd_lb(cs, old_prs);
--
2.47.0
Powered by blists - more mailing lists