[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221103141641.3055981-2-peternewman@google.com>
Date: Thu, 3 Nov 2022 15:16:41 +0100
From: Peter Newman <peternewman@...gle.com>
To: Reinette Chatre <reinette.chatre@...el.com>,
Fenghua Yu <fenghua.yu@...el.com>
Cc: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>,
linux-kernel@...r.kernel.org, jannh@...gle.com, eranian@...gle.com,
kpsingh@...gle.com, derkling@...gle.com, james.morse@....com,
Peter Newman <peternewman@...gle.com>
Subject: [PATCH 1/1] x86/resctrl: serialize task CLOSID update with task_call_func()
When determining whether tasks need to be interrupted due to a
closid/rmid change, it is possible for the task in question to migrate
or wake up concurrently without observing the updated closid/rmid
values.
This is because stores updating the closid and rmid in the task
structure could reorder with the loads in task_curr() and task_cpu().
Similar reordering also impacts resctrl_sched_in(), where reading the
updated values could reorder with prior stores to t->cpu or rq->curr.
Instead, use task_call_func() to serialize updates to the closid and
rmid fields in the task_struct with context switch. This also removes
the need for READ_ONCE()/WRITE_ONCE when accessing the fields.
Signed-off-by: Peter Newman <peternewman@...gle.com>
---
arch/x86/include/asm/resctrl.h | 11 ++--
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 83 +++++++++++++++-----------
2 files changed, 51 insertions(+), 43 deletions(-)
diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
index d24b04ebf950..b712106e8f81 100644
--- a/arch/x86/include/asm/resctrl.h
+++ b/arch/x86/include/asm/resctrl.h
@@ -56,22 +56,19 @@ static void __resctrl_sched_in(void)
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
u32 rmid = state->default_rmid;
- u32 tmp;
/*
* If this task has a closid/rmid assigned, use it.
* Else use the closid/rmid assigned to this cpu.
*/
if (static_branch_likely(&rdt_alloc_enable_key)) {
- tmp = READ_ONCE(current->closid);
- if (tmp)
- closid = tmp;
+ if (current->closid)
+ closid = current->closid;
}
if (static_branch_likely(&rdt_mon_enable_key)) {
- tmp = READ_ONCE(current->rmid);
- if (tmp)
- rmid = tmp;
+ if (current->rmid)
+ rmid = current->rmid;
}
if (closid != state->cur_closid || rmid != state->cur_rmid) {
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index e5a48f05e787..97cfa841f296 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -538,12 +538,38 @@ static void _update_task_closid_rmid(void *task)
resctrl_sched_in();
}
-static void update_task_closid_rmid(struct task_struct *t)
+static int update_locked_task_closid_rmid(struct task_struct *t, void *arg)
{
- if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
- smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
- else
- _update_task_closid_rmid(t);
+ struct rdtgroup *rdtgrp = arg;
+
+ /*
+ * We assume task_call_func() has provided the necessary serialization
+ * with resctrl_sched_in().
+ */
+ if (rdtgrp->type == RDTCTRL_GROUP) {
+ t->closid = rdtgrp->closid;
+ t->rmid = rdtgrp->mon.rmid;
+ } else if (rdtgrp->type == RDTMON_GROUP) {
+ t->rmid = rdtgrp->mon.rmid;
+ }
+
+ /*
+ * If the task is current on a CPU, the PQR_ASSOC MSR needs to be
+ * updated to make the resource group go into effect. If the task is not
+ * current, the MSR will be updated when the task is scheduled in.
+ */
+ return task_curr(t);
+}
+
+static bool update_task_closid_rmid(struct task_struct *t,
+ struct rdtgroup *rdtgrp)
+{
+ /*
+ * Serialize the closid and rmid update with context switch. If this
+ * function indicates that the task was running, then it needs to be
+ * interrupted to install the new closid and rmid.
+ */
+ return task_call_func(t, update_locked_task_closid_rmid, rdtgrp);
}
static int __rdtgroup_move_task(struct task_struct *tsk,
@@ -557,39 +583,26 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
return 0;
/*
- * Set the task's closid/rmid before the PQR_ASSOC MSR can be
- * updated by them.
- *
- * For ctrl_mon groups, move both closid and rmid.
* For monitor groups, can move the tasks only from
* their parent CTRL group.
*/
-
- if (rdtgrp->type == RDTCTRL_GROUP) {
- WRITE_ONCE(tsk->closid, rdtgrp->closid);
- WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
- } else if (rdtgrp->type == RDTMON_GROUP) {
- if (rdtgrp->mon.parent->closid == tsk->closid) {
- WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
- } else {
- rdt_last_cmd_puts("Can't move task to different control group\n");
- return -EINVAL;
- }
+ if (rdtgrp->type == RDTMON_GROUP &&
+ rdtgrp->mon.parent->closid != tsk->closid) {
+ rdt_last_cmd_puts("Can't move task to different control group\n");
+ return -EINVAL;
}
- /*
- * Ensure the task's closid and rmid are written before determining if
- * the task is current that will decide if it will be interrupted.
- */
- barrier();
-
- /*
- * By now, the task's closid and rmid are set. If the task is current
- * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
- * group go into effect. If the task is not current, the MSR will be
- * updated when the task is scheduled in.
- */
- update_task_closid_rmid(tsk);
+ if (update_task_closid_rmid(tsk, rdtgrp) && IS_ENABLED(CONFIG_SMP))
+ /*
+ * If the task has migrated away from the CPU indicated by
+ * task_cpu() below, then it has already switched in on the
+ * new CPU using the updated closid and rmid and the call below
+ * unnecessary, but harmless.
+ */
+ smp_call_function_single(task_cpu(tsk),
+ _update_task_closid_rmid, tsk, 1);
+ else
+ _update_task_closid_rmid(tsk);
return 0;
}
@@ -2398,8 +2411,6 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
for_each_process_thread(p, t) {
if (!from || is_closid_match(t, from) ||
is_rmid_match(t, from)) {
- WRITE_ONCE(t->closid, to->closid);
- WRITE_ONCE(t->rmid, to->mon.rmid);
/*
* If the task is on a CPU, set the CPU in the mask.
@@ -2408,7 +2419,7 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
* In such a case the function call is pointless, but
* there is no other side effect.
*/
- if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
+ if (update_task_closid_rmid(t, to) && mask)
cpumask_set_cpu(task_cpu(t), mask);
}
}
--
2.38.1.273.g43a17bfeac-goog
Powered by blists - more mailing lists