lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1476497548-11169-11-git-send-email-fenghua.yu@intel.com>
Date:   Fri, 14 Oct 2016 19:12:20 -0700
From:   "Fenghua Yu" <fenghua.yu@...el.com>
To:     "Thomas Gleixner" <tglx@...utronix.de>
Cc:     "H. Peter Anvin" <h.peter.anvin@...el.com>,
        "Ingo Molnar" <mingo@...e.hu>, "Tony Luck" <tony.luck@...el.com>,
        "Peter Zijlstra" <peterz@...radead.org>,
        "Stephane Eranian" <eranian@...gle.com>,
        "Borislav Petkov" <bp@...e.de>,
        "Dave Hansen" <dave.hansen@...el.com>,
        "Nilay Vaish" <nilayvaish@...il.com>, "Shaohua Li" <shli@...com>,
        "David Carrillo-Cisneros" <davidcc@...gle.com>,
        "Ravi V Shankar" <ravi.v.shankar@...el.com>,
        "Sai Prakhya" <sai.praneeth.prakhya@...el.com>,
        "Vikas Shivappa" <vikas.shivappa@...ux.intel.com>,
        "linux-kernel" <linux-kernel@...r.kernel.org>,
        "x86" <x86@...nel.org>, "Fenghua Yu" <fenghua.yu@...el.com>
Subject: [PATCH v4 10/18] x86/intel_rdt: Build structures for each resource based on cache topology

From: Tony Luck <tony.luck@...el.com>

We use the cpu hotplug notifier to catch each cpu in turn and look at
its cache topology w.r.t each of the resource groups. As we discover
new resources, we initialize the bitmask array for each to the default
(full access) value.

Signed-off-by: Tony Luck <tony.luck@...el.com>
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
---
 arch/x86/include/asm/intel_rdt.h |  31 ++++++++++
 arch/x86/kernel/cpu/intel_rdt.c  | 128 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 159 insertions(+)

diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
index 8c61d83..b3df691 100644
--- a/arch/x86/include/asm/intel_rdt.h
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -43,6 +43,35 @@ struct rdt_resource {
 		if (r->enabled)
 
 #define IA32_L3_CBM_BASE	0xc90
+
+/**
+ * struct rdt_domain - group of cpus sharing an RDT resource
+ * @list:	all instances of this resource
+ * @id:		unique id for this instance
+ * @cpu_mask:	which cpus share this resource
+ * @cbm:	array of cache bit masks (indexed by CLOSID)
+ */
+struct rdt_domain {
+	struct list_head	list;
+	int			id;
+	struct cpumask		cpu_mask;
+	u32			*cbm;
+};
+
+/**
+ * struct msr_param - set a range of MSRs from a domain
+ * @res:       The resource to use
+ * @low:       Beginning index from base MSR
+ * @high:      End index
+ */
+struct msr_param {
+	struct rdt_resource	*res;
+	int			low;
+	int			high;
+};
+
+extern struct mutex rdtgroup_mutex;
+
 extern struct rdt_resource rdt_resources_all[];
 
 enum {
@@ -65,4 +94,6 @@ union cpuid_0x10_1_edx {
 	} split;
 	unsigned int full;
 };
+
+void rdt_cbm_update(void *arg);
 #endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 87f9650..0b47ea9 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -26,11 +26,16 @@
 
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpuhotplug.h>
 
 #include <asm/intel_rdt_common.h>
 #include <asm/intel-family.h>
 #include <asm/intel_rdt.h>
 
+/* Mutex to protect rdtgroup access. */
+DEFINE_MUTEX(rdtgroup_mutex);
+
 #define domain_init(name) LIST_HEAD_INIT(rdt_resources_all[name].domains)
 
 struct rdt_resource rdt_resources_all[] = {
@@ -142,13 +147,136 @@ static inline bool get_rdt_resources(void)
 	return ret;
 }
 
+static int get_cache_id(int cpu, int level)
+{
+	struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+	int i;
+
+	for (i = 0; i < ci->num_leaves; i++)
+		if (ci->info_list[i].level == level)
+			return ci->info_list[i].id;
+	return -1;
+}
+
+void rdt_cbm_update(void *arg)
+{
+	struct msr_param *m = (struct msr_param *)arg;
+	struct rdt_resource *r = m->res;
+	struct rdt_domain *d;
+	struct list_head *l;
+	int i, cpu = smp_processor_id();
+
+	list_for_each(l, &r->domains) {
+		d = list_entry(l, struct rdt_domain, list);
+		if (cpumask_test_cpu(cpu, &d->cpu_mask))
+			goto found;
+	}
+	pr_info_once("cpu %d not found in any domain for resource %s\n",
+		     cpu, r->name);
+
+found:
+	for (i = m->low; i < m->high; i++)
+		wrmsrl(r->msr_base + i, d->cbm[i]);
+}
+
+static void update_domain(int cpu, struct rdt_resource *r, int add)
+{
+	struct list_head *l;
+	struct rdt_domain *d;
+	int i, cache_id;
+
+	cache_id = get_cache_id(cpu, r->cache_level);
+
+	if (cache_id == -1) {
+		pr_info_once("Could't find cache id for cpu %d\n", cpu);
+		return;
+	}
+	list_for_each(l, &r->domains) {
+		d = list_entry(l, struct rdt_domain, list);
+		if (cache_id == d->id)
+			goto found;
+		if (cache_id < d->id)
+			break;
+	}
+	if (!add) {
+		pr_info_once("removed unknown cpu %d\n", cpu);
+		return;
+	}
+	d = kzalloc(sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return;
+
+	d->id = cache_id;
+	d->cbm = kmalloc_array(r->max_closid, sizeof(*d->cbm), GFP_KERNEL);
+	if (!d->cbm) {
+		pr_info("Failed to alloc CBM array for cpu %d\n", cpu);
+		kfree(d);
+		return;
+	}
+	cpumask_set_cpu(cpu, &d->cpu_mask);
+	for (i = 0; i < r->max_closid; i++) {
+		d->cbm[i] = r->max_cbm;
+		wrmsrl(r->msr_base + i, d->cbm[i]);
+	}
+	list_add_tail(&d->list, l);
+	r->num_domains++;
+	return;
+
+found:
+	if (add) {
+		cpumask_set_cpu(cpu, &d->cpu_mask);
+	} else {
+		cpumask_clear_cpu(cpu, &d->cpu_mask);
+		if (cpumask_empty(&d->cpu_mask)) {
+			r->num_domains--;
+			kfree(d->cbm);
+			list_del(&d->list);
+			kfree(d);
+		}
+	}
+}
+
+static int intel_rdt_online_cpu(unsigned int cpu)
+{
+	struct rdt_resource *r;
+	struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+
+	mutex_lock(&rdtgroup_mutex);
+	for_each_rdt_resource(r)
+		update_domain(cpu, r, 1);
+	state->closid = 0;
+	wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
+	mutex_unlock(&rdtgroup_mutex);
+
+	return 0;
+}
+
+static int intel_rdt_offline_cpu(unsigned int cpu)
+{
+	struct rdt_resource *r;
+
+	mutex_lock(&rdtgroup_mutex);
+	for_each_rdt_resource(r)
+		update_domain(cpu, r, 0);
+	mutex_unlock(&rdtgroup_mutex);
+
+	return 0;
+}
+
 static int __init intel_rdt_late_init(void)
 {
 	struct rdt_resource *r;
+	int state;
 
 	if (!get_rdt_resources())
 		return -ENODEV;
 
+	state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+				"AP_INTEL_RDT_ONLINE",
+				intel_rdt_online_cpu, intel_rdt_offline_cpu);
+	if (state < 0)
+		return state;
+
 	for_each_rdt_resource(r)
 		pr_info("Intel RDT %s allocation %s detected\n", r->name,
 			r->cdp_capable ? " (with CDP)" : "");
-- 
2.5.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ