[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1435260324-18125-9-git-send-email-vikas.shivappa@linux.intel.com>
Date: Thu, 25 Jun 2015 12:25:23 -0700
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: linux-kernel@...r.kernel.org
Cc: x86@...nel.org, hpa@...or.com, tglx@...utronix.de,
mingo@...nel.org, tj@...nel.org, peterz@...radead.org,
matt.fleming@...el.com, will.auld@...el.com,
kanaka.d.juvva@...el.com, glenn.p.williamson@...el.com,
vikas.shivappa@...ux.intel.com, vikas.shivappa@...el.com
Subject: [PATCH 8/9] x86/intel_rdt: Hot cpu support for Cache Allocation
This patch adds hot cpu support for Intel Cache allocation. Support
includes updating the cache bitmask MSRs IA32_L3_QOS_n when a new CPU
package comes online. The IA32_L3_QOS_n MSRs are one per Class of
service on each CPU package. The new package's MSRs are synchronized
with the values of existing MSRs. Also the software cache for
IA32_PQR_ASSOC MSRs are updated during hot cpu notifications.
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/kernel/cpu/intel_rdt.c | 89 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 87 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index c8bb134..28886be 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/spinlock.h>
+#include <linux/cpu.h>
#include <asm/intel_rdt.h>
/*
@@ -313,13 +314,89 @@ out:
return err;
}
-static inline void rdt_cpumask_update(int cpu)
+static inline bool rdt_cpumask_update(int cpu)
{
static cpumask_t tmp;
cpumask_and(&tmp, &rdt_cpumask, topology_core_cpumask(cpu));
- if (cpumask_empty(&tmp))
+ if (cpumask_empty(&tmp)) {
cpumask_set_cpu(cpu, &rdt_cpumask);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * cbm_update_msrs() - Updates all the existing IA32_L3_MASK_n MSRs
+ * which are one per CLOSid except IA32_L3_MASK_0 on the current package.
+ */
+static void cbm_update_msrs(void *info)
+{
+ int maxid = boot_cpu_data.x86_cache_max_closid;
+ unsigned int i;
+
+ /*
+ * At cpureset, all bits of IA32_L3_MASK_n are set.
+ * The index starts from one as there is no need
+ * to update IA32_L3_MASK_0 as it belongs to root cgroup
+ * whose cache mask is all 1s always.
+ */
+ for (i = 1; i < maxid; i++) {
+ if (ccmap[i].clos_refcnt)
+ cbm_cpu_update((void *)i);
+ }
+}
+
+static inline void intel_rdt_cpu_start(int cpu)
+{
+ struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
+
+ state->closid = 0;
+ mutex_lock(&rdt_group_mutex);
+ if (rdt_cpumask_update(cpu))
+ smp_call_function_single(cpu, cbm_update_msrs, NULL, 1);
+ mutex_unlock(&rdt_group_mutex);
+}
+
+static void intel_rdt_cpu_exit(unsigned int cpu)
+{
+ static cpumask_t tmp;
+ int i;
+
+ mutex_lock(&rdt_group_mutex);
+ if (!cpumask_test_and_clear_cpu(cpu, &rdt_cpumask)) {
+ mutex_unlock(&rdt_group_mutex);
+ return;
+ }
+
+ cpumask_and(&tmp, topology_core_cpumask(cpu), cpu_online_mask);
+ cpumask_clear_cpu(cpu, &tmp);
+ i = cpumask_any(&tmp);
+
+ if (i < nr_cpu_ids)
+ cpumask_set_cpu(i, &rdt_cpumask);
+ mutex_unlock(&rdt_group_mutex);
+}
+
+static int intel_rdt_cpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ intel_rdt_cpu_start(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ intel_rdt_cpu_exit(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
}
static int __init intel_rdt_late_init(void)
@@ -358,8 +435,16 @@ static int __init intel_rdt_late_init(void)
ccm->cache_mask = (1ULL << max_cbm_len) - 1;
ccm->clos_refcnt = 1;
+ cpu_notifier_register_begin();
+
+ mutex_lock(&rdt_group_mutex);
for_each_online_cpu(i)
rdt_cpumask_update(i);
+ mutex_unlock(&rdt_group_mutex);
+
+ __hotcpu_notifier(intel_rdt_cpu_notifier, 0);
+
+ cpu_notifier_register_done();
static_key_slow_inc(&rdt_enable_key);
pr_info("Intel cache allocation enabled\n");
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists