lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1435100183-21720-11-git-send-email-vikas.shivappa@linux.intel.com>
Date:	Tue, 23 Jun 2015 15:56:23 -0700
From:	Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To:	linux-kernel@...r.kernel.org
Cc:	x86@...nel.org, hpa@...or.com, tglx@...utronix.de,
	mingo@...nel.org, tj@...nel.org, peterz@...radead.org,
	matt.fleming@...el.com, will.auld@...el.com,
	glenn.p.williamson@...el.com, kanaka.d.juvva@...el.com,
	priya.v.autee@...el.com, vikas.shivappa@...ux.intel.com
Subject: [PATCH 10/10] x86/intel_rdt: Intel haswell Cache Allocation enumeration

Cache Allocation on hsw(haswell) needs to be enumerated separately as
HSW does not have support for CPUID enumeration for Cache Allocation.
Cache Allocation is only supported on certain HSW SKUs. This patch does
a probe test for hsw CPUs by writing a CLOSid(Class of service id) into
high 32 bits of IA32_PQR_MSR and see if the bits stick. The probe test
is only done after confirming that the CPU is HSW.  Other HSW specific
quirks are:
 - HSW requires the L3 cache bit mask to be at least two bits.
 - Maximum CLOSids supported is always 4.
 - Maximum bits support in cache bit mask is always 20.

Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
 arch/x86/kernel/cpu/intel_rdt.c | 62 +++++++++++++++++++++++++++++++++++++++--
 1 file changed, 59 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 24db9a0..e15dd96 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -38,6 +38,11 @@ struct intel_rdt rdt_root_group;
 struct static_key __read_mostly rdt_enable_key = STATIC_KEY_INIT_FALSE;
 
 /*
+ * Minimum bits required in Cache bitmask.
+ */
+static unsigned int min_bitmask_len = 1;
+
+/*
  * Mask of CPUs for writing CBM values. We only need one CPU per-socket.
  */
 static cpumask_t rdt_cpumask;
@@ -45,6 +50,56 @@ static cpumask_t rdt_cpumask;
 #define rdt_for_each_child(pos_css, parent_ir)		\
 	css_for_each_child((pos_css), &(parent_ir)->css)
 
+/*
+ * cache_alloc_hsw_probe() - Have to do probe test for Intel haswell CPUs as it
+ * does not have CPUID enumeration support for Cache allocation.
+ *
+ * Probes by writing to the high 32 bits(CLOSid) of the IA32_PQR_MSR and
+ * testing if the bits stick.  Then hardcode the max CLOS and max
+ * bitmask length on hsw.  The minimum cache bitmask length allowed for
+ * HSW is 2 bits.
+ */
+static inline bool cache_alloc_hsw_probe(void)
+{
+	u32 l, h_old, h_new, h_tmp;
+
+	if (rdmsr_safe(MSR_IA32_PQR_ASSOC, &l, &h_old))
+		return false;
+
+	/*
+	 * Default value is always 0 if feature is present.
+	 */
+	h_tmp = h_old ^ 0x1U;
+	if (wrmsr_safe(MSR_IA32_PQR_ASSOC, l, h_tmp) ||
+	    rdmsr_safe(MSR_IA32_PQR_ASSOC, &l, &h_new))
+		return false;
+
+	if (h_tmp != h_new)
+		return false;
+
+	wrmsr_safe(MSR_IA32_PQR_ASSOC, l, h_old);
+
+	boot_cpu_data.x86_cache_max_closid = 4;
+	boot_cpu_data.x86_cache_max_cbm_len = 20;
+	min_bitmask_len = 2;
+
+	return true;
+}
+
+static inline bool cache_alloc_supported(struct cpuinfo_x86 *c)
+{
+	if (cpu_has(c, X86_FEATURE_CAT_L3))
+		return true;
+
+	/*
+	 * Probe test for Haswell CPUs.
+	 */
+	if (c->x86 == 0x6 && c->x86_model == 0x3f)
+		return cache_alloc_hsw_probe();
+
+	return false;
+}
+
 static inline void closid_get(u32 closid)
 {
 	struct clos_cbm_map *ccm = &ccmap[closid];
@@ -155,7 +210,7 @@ static inline bool cbm_is_contiguous(unsigned long var)
 	unsigned long maxcbm = MAX_CBM_LENGTH;
 	unsigned long first_bit, zero_bit;
 
-	if (!var)
+	if (bitmap_weight(&var, maxcbm) < min_bitmask_len)
 		return false;
 
 	first_bit = find_next_bit(&var, maxcbm, 0);
@@ -175,7 +230,8 @@ static int cbm_validate(struct intel_rdt *ir, unsigned long cbmvalue)
 	int err = 0;
 
 	if (!cbm_is_contiguous(cbmvalue)) {
-		pr_err("bitmask should have >= 1 bit and be contiguous\n");
+		pr_err("bitmask should have >=%d bits and be contiguous\n",
+			 min_bitmask_len);
 		err = -EINVAL;
 		goto out_err;
 	}
@@ -402,7 +458,7 @@ static int __init intel_rdt_late_init(void)
 	int err = 0, i;
 	size_t sizeb;
 
-	if (!cpu_has(c, X86_FEATURE_CAT_L3)) {
+	if (!cache_alloc_supported(c)) {
 		rdt_root_group.css.ss->disabled = 1;
 		return -ENODEV;
 	}
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ