[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1478912558-55514-3-git-send-email-fenghua.yu@intel.com>
Date: Fri, 11 Nov 2016 17:02:38 -0800
From: "Fenghua Yu" <fenghua.yu@...el.com>
To: "Thomas Gleixner" <tglx@...utronix.de>
Cc: "H. Peter Anvin" <h.peter.anvin@...el.com>,
"Ingo Molnar" <mingo@...e.hu>, "Tony Luck" <tony.luck@...el.com>,
"Ravi V Shankar" <ravi.v.shankar@...el.com>,
"Sai Prakhya" <sai.praneeth.prakhya@...el.com>,
"Vikas Shivappa" <vikas.shivappa@...ux.intel.com>,
"linux-kernel" <linux-kernel@...r.kernel.org>,
"x86" <x86@...nel.org>, "Fenghua Yu" <fenghua.yu@...el.com>
Subject: [PATCH 3/3] x86/intel_rdt: Update closid in PQR_ASSOC registers in synchronous mode when changing "cpus"
From: Fenghua Yu <fenghua.yu@...el.com>
When "cpus" is changed in a rdtgroup, the current code doesn't update
PQR_ASSOC registers with new closid in the cpus write operation.
The PQR_ASSOC registers are updated asynchronously later when new
processes are scheduled in on the CPUs.
A process may run on a CPU for long time without being switched to
another process, e.g. high performance computing or real time cases.
Then closid in the PQR_ASSOC register on the CPU is not updated for
long time until a new process is switched in.
This patch updates closid in PQR_ASSOC synchronously when writing
cpus to avoid the above issue.
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
---
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 53 ++++++++++++++++++++------------
1 file changed, 34 insertions(+), 19 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 416b95e..a45f2ba 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -191,6 +191,31 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
return ret;
}
+/*
+ * MSR_IA32_PQR_ASSOC is scoped per logical CPU, so all updates
+ * are always in thread context.
+ */
+static void rdt_update_pqr_assoc_closid(void *v)
+{
+ struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+
+ state->closid = *(int *)v;
+ wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, state->closid);
+}
+
+static void rdt_update_pqr_assoc(const struct cpumask *cpu_mask, int closid)
+{
+ int cpu = get_cpu();
+
+ /* Update PQR_ASSOC MSR on this cpu if it's in cpu_mask. */
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ rdt_update_pqr_assoc_closid(&closid);
+ /* Update PQR_ASSOC MSR on the rest of cpus. */
+ smp_call_function_many(cpu_mask, rdt_update_pqr_assoc_closid,
+ &closid, 1);
+ put_cpu();
+}
+
static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
@@ -238,6 +263,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
&rdtgroup_default.cpu_mask, tmpmask);
for_each_cpu(cpu, tmpmask)
per_cpu(cpu_closid, cpu) = 0;
+
+ /* Update PQR_ASSOC registers on the dropped cpus */
+ rdt_update_pqr_assoc(tmpmask, rdtgroup_default.closid);
}
/*
@@ -253,6 +281,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
}
for_each_cpu(cpu, tmpmask)
per_cpu(cpu_closid, cpu) = rdtgrp->closid;
+
+ /* Update PQR_ASSOC registers on the added cpus */
+ rdt_update_pqr_assoc(tmpmask, rdtgrp->closid);
}
/* Done pushing/pulling - update this group with new mask */
@@ -783,18 +814,6 @@ static int reset_all_cbms(struct rdt_resource *r)
}
/*
- * MSR_IA32_PQR_ASSOC is scoped per logical CPU, so all updates
- * are always in thread context.
- */
-static void rdt_reset_pqr_assoc_closid(void *v)
-{
- struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
-
- state->closid = 0;
- wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
-}
-
-/*
* Forcibly remove all of subdirectories under root.
*/
static void rmdir_all_sub(void)
@@ -809,13 +828,9 @@ static void rmdir_all_sub(void)
t->closid = 0;
read_unlock(&tasklist_lock);
- get_cpu();
- /* Reset PQR_ASSOC MSR on this cpu. */
- rdt_reset_pqr_assoc_closid(NULL);
- /* Reset PQR_ASSOC MSR on the rest of cpus. */
- smp_call_function_many(cpu_online_mask, rdt_reset_pqr_assoc_closid,
- NULL, 1);
- put_cpu();
+ /* Reset PQR_ASSOC MSR on all online cpus */
+ rdt_update_pqr_assoc(cpu_online_mask, 0);
+
list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
/* Remove each rdtgroup other than root */
if (rdtgrp == &rdtgroup_default)
--
2.5.0
Powered by blists - more mailing lists