[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1430530601-16319-7-git-send-email-vikas.shivappa@linux.intel.com>
Date: Fri, 1 May 2015 18:36:40 -0700
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: vikas.shivappa@...el.com
Cc: linux-kernel@...r.kernel.org, x86@...nel.org, hpa@...or.com,
tglx@...utronix.de, mingo@...nel.org, tj@...nel.org,
peterz@...radead.org, matt.fleming@...el.com, will.auld@...el.com,
peter.zijlstra@...el.com, h.peter.anvin@...el.com,
kanaka.d.juvva@...el.com, vikas.shivappa@...ux.intel.com
Subject: [PATCH 6/7] x86/intel_rdt: Intel haswell CAT enumeration
CAT(Cache Allocation Technology) on hsw needs to be enumerated
separately. CAT is only supported on certain HSW SKUs. This patch does
a probe test for hsw CPUs by writing a CLOSid into high 32 bits of
IA32_PQR_MSR and see if the bits stick. The probe test is only done
after confirming that the CPU is HSW.
HSW also requires the L3 cache bitmask to be at least two bits.
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/kernel/cpu/intel_rdt.c | 56 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 53 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 9da61b2..4c12e5b 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -38,6 +38,11 @@ struct static_key __read_mostly rdt_enable_key = STATIC_KEY_INIT_FALSE;
DEFINE_PER_CPU(unsigned int, x86_cpu_clos);
/*
+ * Minimum bits required in Cache bitmasks.
+ */
+static unsigned int min_bitmask_len = 1;
+
+/*
* Mask of CPUs for writing CBM values. We only need one per-socket.
*/
static cpumask_t rdt_cpumask;
@@ -45,11 +50,54 @@ static cpumask_t rdt_cpumask;
#define rdt_for_each_child(pos_css, parent_ir) \
css_for_each_child((pos_css), &(parent_ir)->css)
+/*
+ * hsw_probetest() - Have to do probe
+ * test for Intel haswell CPUs as it does not have
+ * CPUID enumeration support for CAT.
+ *
+ * Probes by writing to the high 32 bits(CLOSid)
+ * of the IA32_PQR_MSR and testing if the bits stick.
+ * Then hardcode the max CLOS and max bitmask length on hsw.
+ * The minimum cache bitmask length allowed for HSW is 2 bits.
+ */
+static inline bool hsw_probetest(void)
+{
+ u32 l, h_old, h_new, h_tmp;
+
+ if (rdmsr_safe(MSR_IA32_PQR_ASSOC, &l, &h_old))
+ return false;
+
+ /*
+ * Default value is always 0 if feature is present.
+ */
+ h_tmp = h_old ^ 0x1U;
+ if (wrmsr_safe(MSR_IA32_PQR_ASSOC, l, h_tmp) ||
+ rdmsr_safe(MSR_IA32_PQR_ASSOC, &l, &h_new))
+ return false;
+
+ if (h_tmp != h_new)
+ return false;
+
+ wrmsr_safe(MSR_IA32_PQR_ASSOC, l, h_old);
+
+ boot_cpu_data.x86_cat_closs = 4;
+ boot_cpu_data.x86_cat_cbmlength = 20;
+ min_bitmask_len = 2;
+
+ return true;
+}
+
static inline bool cat_supported(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_CAT_L3))
return true;
+ /*
+ * Probe test for Haswell CPUs.
+ */
+ if (c->x86 == 0x6 && c->x86_model == 0x3f)
+ return hsw_probetest();
+
return false;
}
@@ -153,7 +201,7 @@ static inline bool cbm_is_contiguous(unsigned long var)
unsigned long first_bit, zero_bit;
unsigned long maxcbm = MAX_CBM_LENGTH;
- if (!var)
+ if (bitmap_weight(&var, maxcbm) < min_bitmask_len)
return false;
first_bit = find_next_bit(&var, maxcbm, 0);
@@ -180,7 +228,8 @@ static int validate_cbm(struct intel_rdt *ir, unsigned long cbmvalue)
unsigned long *cbm_tmp;
if (!cbm_is_contiguous(cbmvalue)) {
- pr_err("bitmask should have >= 1 bits and be contiguous\n");
+ pr_err("bitmask should have >=%d bits and be contiguous\n",
+ min_bitmask_len);
return -EINVAL;
}
@@ -236,7 +285,8 @@ static void __cpu_cbm_update(void *info)
}
/*
- * cbm_update_all() - Update the cache bit mask for all packages.
+ * cbm_update_all() - Update the cache bit mask for
+ * all packages.
*/
static inline void cbm_update_all(unsigned int closid)
{
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists