[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240229021414.508972-3-longman@redhat.com>
Date: Wed, 28 Feb 2024 21:14:14 -0500
From: Waiman Long <longman@...hat.com>
To: Zefan Li <lizefan.x@...edance.com>,
Tejun Heo <tj@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Valentin Schneider <vschneid@...hat.com>,
Frederic Weisbecker <frederic@...nel.org>
Cc: linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org,
Cestmir Kalina <ckalina@...hat.com>,
Costa Shulyupin <cshulyup@...hat.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH 2/2] cgroup/cpuset: Exclude isolated CPUs from housekeeping CPU masks
Call the newly introduced housekeeping_exlude_isolcpus() function to
exclude isolated CPUs from the selected housekeeping CPU masks. This
is in addition to the exclusion of isolated CPUs from the workqueue
unbound CPU mask.
Right now only HK_TYPE_TIMER and HK_TYPE_RCU CPU masks are updated,
but more may be added in the future when appropriate.
Signed-off-by: Waiman Long <longman@...hat.com>
---
kernel/cgroup/cpuset.c | 30 +++++++++++++++++++++++-------
1 file changed, 23 insertions(+), 7 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index ba36c073304a..d2541af71c22 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -214,6 +214,11 @@ static cpumask_var_t isolated_cpus;
/* List of remote partition root children */
static struct list_head remote_children;
+/*
+ * The set of housekeeping flags to be updated for CPU isolation
+ */
+#define HOUSEKEEPING_FLAGS (BIT(HK_TYPE_TIMER) | BIT(HK_TYPE_RCU))
+
/*
* Partition root states:
*
@@ -1505,7 +1510,15 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
-static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
+/**
+ * update_isolation_cpumasks - Update external isolation CPU masks
+ * @isolcpus_updated - @true if isolation CPU masks update needed
+ *
+ * The following external CPU masks will be updated if necessary:
+ * - workqueue unbound cpumask
+ * - housekeeping cpumasks
+ */
+static void update_isolation_cpumasks(bool isolcpus_updated)
{
int ret;
@@ -1515,7 +1528,10 @@ static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
return;
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
- WARN_ON_ONCE(ret < 0);
+ if (WARN_ON_ONCE(ret < 0))
+ return;
+ ret = housekeeping_exlude_isolcpus(isolated_cpus, HOUSEKEEPING_FLAGS);
+ WARN_ON_ONCE((ret < 0) && (ret != -EOPNOTSUPP));
}
/**
@@ -1609,7 +1625,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
parent->child_ecpus_count--;
}
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1645,7 +1661,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
cs->prs_err = PERR_INVCPUS;
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1697,7 +1713,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
if (deleting)
isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -2067,7 +2083,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
WARN_ON_ONCE(parent->nr_subparts < 0);
}
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive(cs, new_prs);
@@ -3131,7 +3147,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
else if (new_xcpus_state)
partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(new_xcpus_state);
+ update_isolation_cpumasks(new_xcpus_state);
/* Force update if switching back to member */
update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
--
2.39.3
Powered by blists - more mailing lists