[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230308131452.383914-3-peternewman@google.com>
Date: Wed, 8 Mar 2023 14:14:51 +0100
From: Peter Newman <peternewman@...gle.com>
To: reinette.chatre@...el.com, fenghua.yu@...el.com
Cc: Babu.Moger@....com, bp@...en8.de, dave.hansen@...ux.intel.com,
eranian@...gle.com, gupasani@...gle.com, hpa@...or.com,
james.morse@....com, linux-kernel@...r.kernel.org,
mingo@...hat.com, skodak@...gle.com, tglx@...utronix.de,
tony.luck@...el.com, x86@...nel.org,
Peter Newman <peternewman@...gle.com>
Subject: [PATCH v4 2/3] x86/resctrl: Parameterize rdt_move_group_tasks() task matching
Allow rdt_move_group_tasks() to be used for new group-scope operations.
This function is currently only used to implement rmdir on a group or
unmounting resctrlfs.
Callers now provide a filtering function to indicate which tasks should
be moved.
No functional change.
Signed-off-by: Peter Newman <peternewman@...gle.com>
---
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 34 +++++++++++++++++++-------
1 file changed, 25 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index c3fb525d52e9..84af23a29612 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2393,22 +2393,29 @@ static int reset_all_ctrls(struct rdt_resource *r)
}
/*
- * Move tasks from one to the other group. If @from is NULL, then all tasks
- * in the systems are moved unconditionally (used for teardown).
+ * Move tasks from one to the other group.
+ *
+ * @from: passed unmodified to task_match_fn() for each task
+ * @to: group providing new config values for matching tasks
+ * @task_match_fn: callback returning true when a task requires update
+ * @mask: output-parameter indicating set of CPUs impacted by this
+ * operation
*
* If @mask is not NULL the cpus on which moved tasks are running are set
* in that mask so the update smp function call is restricted to affected
* cpus.
*/
-static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
- struct cpumask *mask)
+static void rdt_move_group_tasks(struct rdtgroup *from,
+ struct rdtgroup *to,
+ struct cpumask *mask,
+ bool task_match_fn(struct task_struct *,
+ struct rdtgroup *))
{
struct task_struct *p, *t;
read_lock(&tasklist_lock);
for_each_process_thread(p, t) {
- if (!from || is_closid_match(t, from) ||
- is_rmid_match(t, from)) {
+ if (task_match_fn(t, from)) {
WRITE_ONCE(t->closid, to->closid);
WRITE_ONCE(t->rmid, to->mon.rmid);
@@ -2451,6 +2458,15 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
}
}
+/*
+ * If @from is NULL, then all tasks in the systems are moved unconditionally
+ * (used for teardown).
+ */
+static bool rmdir_match(struct task_struct *t, struct rdtgroup *from)
+{
+ return !from || is_closid_match(t, from) || is_rmid_match(t, from);
+}
+
/*
* Forcibly remove all of subdirectories under root.
*/
@@ -2459,7 +2475,7 @@ static void rmdir_all_sub(void)
struct rdtgroup *rdtgrp, *tmp;
/* Move all tasks to the default resource group */
- rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
+ rdt_move_group_tasks(NULL, &rdtgroup_default, NULL, rmdir_match);
list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
/* Free any child rmids */
@@ -3124,7 +3140,7 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
int cpu;
/* Give any tasks back to the parent group */
- rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
+ rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask, rmdir_match);
/* Update per cpu rmid of the moved CPUs first */
for_each_cpu(cpu, &rdtgrp->cpu_mask)
@@ -3164,7 +3180,7 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
int cpu;
/* Give any tasks back to the default group */
- rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
+ rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask, rmdir_match);
/* Give any CPUs back to the default group */
cpumask_or(&rdtgroup_default.cpu_mask,
--
2.40.0.rc0.216.gc4246ad0f0-goog
Powered by blists - more mailing lists