[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1473328647-33116-12-git-send-email-fenghua.yu@intel.com>
Date: Thu, 8 Sep 2016 02:57:05 -0700
From: "Fenghua Yu" <fenghua.yu@...el.com>
To: "Thomas Gleixner" <tglx@...utronix.de>,
"H. Peter Anvin" <h.peter.anvin@...el.com>,
"Ingo Molnar" <mingo@...e.hu>, "Tony Luck" <tony.luck@...el.com>,
"Peter Zijlstra" <peterz@...radead.org>,
"Tejun Heo" <tj@...nel.org>, "Borislav Petkov" <bp@...e.de>,
"Stephane Eranian" <eranian@...gle.com>,
"Marcelo Tosatti" <mtosatti@...hat.com>,
"David Carrillo-Cisneros" <davidcc@...gle.com>,
"Shaohua Li" <shli@...com>,
"Ravi V Shankar" <ravi.v.shankar@...el.com>,
"Vikas Shivappa" <vikas.shivappa@...ux.intel.com>,
"Sai Prakhya" <sai.praneeth.prakhya@...el.com>
Cc: "linux-kernel" <linux-kernel@...r.kernel.org>,
"x86" <x86@...nel.org>, Fenghua Yu <fenghua.yu@...el.com>
Subject: [PATCH v2 11/33] x86/intel_rdt: Hot cpu support for Cache Allocation
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
This patch adds hot plug cpu support for Intel Cache allocation. Support
includes updating the cache bitmask MSRs IA32_L3_QOS_n when a new CPU
package comes online or goes offline. The IA32_L3_QOS_n MSRs are one per
Class of service on each CPU package. The new package's MSRs are
synchronized with the values of existing MSRs. Also the software cache
for IA32_PQR_ASSOC MSRs are reset during hot cpu notifications.
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
Reviewed-by: Tony Luck <tony.luck@...el.com>
---
arch/x86/kernel/cpu/intel_rdt.c | 85 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 85 insertions(+)
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 9f30492..4537658 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -21,6 +21,7 @@
*/
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/cpu.h>
#include <linux/sched.h>
#include <asm/pqr_common.h>
#include <asm/intel_rdt.h>
@@ -130,6 +131,9 @@ static inline void msr_update_all(int msr, u64 val)
on_each_cpu_mask(&rdt_cpumask, msr_cpu_update, &info, 1);
}
+/*
+ * Set only one cpu in cpumask in all cpus that share the same cache.
+ */
static inline bool rdt_cpumask_update(int cpu)
{
cpumask_and(&tmp_cpumask, &rdt_cpumask, topology_core_cpumask(cpu));
@@ -141,6 +145,80 @@ static inline bool rdt_cpumask_update(int cpu)
return false;
}
+/*
+ * cbm_update_msrs() - Updates all the existing IA32_L3_MASK_n MSRs
+ * which are one per CLOSid on the current package.
+ */
+static void cbm_update_msrs(void *dummy)
+{
+ int maxid = boot_cpu_data.x86_cache_max_closid;
+ struct rdt_remote_data info;
+ unsigned int i;
+
+ for (i = 0; i < maxid; i++) {
+ if (cctable[i].clos_refcnt) {
+ info.msr = CBM_FROM_INDEX(i);
+ info.val = cctable[i].cbm;
+ msr_cpu_update(&info);
+ }
+ }
+}
+
+static int intel_rdt_online_cpu(unsigned int cpu)
+{
+ struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
+
+ state->closid = 0;
+ mutex_lock(&rdtgroup_mutex);
+ /* The cpu is set in root rdtgroup after online. */
+ cpumask_set_cpu(cpu, &root_rdtgrp->cpu_mask);
+ per_cpu(cpu_rdtgroup, cpu) = root_rdtgrp;
+ /*
+ * If the cpu is first time found and set in its siblings that
+ * share the same cache, update the CBM MSRs for the cache.
+ */
+ if (rdt_cpumask_update(cpu))
+ smp_call_function_single(cpu, cbm_update_msrs, NULL, 1);
+ mutex_unlock(&rdtgroup_mutex);
+}
+
+static int clear_rdtgroup_cpumask(unsigned int cpu)
+{
+ struct list_head *l;
+ struct rdtgroup *r;
+
+ list_for_each(l, &rdtgroup_lists) {
+ r = list_entry(l, struct rdtgroup, rdtgroup_list);
+ if (cpumask_test_cpu(cpu, &r->cpu_mask)) {
+ cpumask_clear_cpu(cpu, &r->cpu_mask);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int intel_rdt_offline_cpu(unsigned int cpu)
+{
+ int i;
+
+ mutex_lock(&rdtgroup_mutex);
+ if (!cpumask_test_and_clear_cpu(cpu, &rdt_cpumask)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return;
+ }
+
+ cpumask_and(&tmp_cpumask, topology_core_cpumask(cpu), cpu_online_mask);
+ cpumask_clear_cpu(cpu, &tmp_cpumask);
+ i = cpumask_any(&tmp_cpumask);
+
+ if (i < nr_cpu_ids)
+ cpumask_set_cpu(i, &rdt_cpumask);
+
+ clear_rdtgroup_cpumask(cpu);
+ mutex_unlock(&rdtgroup_mutex);
+}
+
static int __init intel_rdt_late_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -169,6 +247,13 @@ static int __init intel_rdt_late_init(void)
for_each_online_cpu(i)
rdt_cpumask_update(i);
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "AP_INTEL_RDT_ONLINE",
+ intel_rdt_online_cpu, intel_rdt_offline_cpu);
+ if (err < 0)
+ goto out_err;
+
pr_info("Intel cache allocation enabled\n");
out_err:
--
2.5.0
Powered by blists - more mailing lists