lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-bf926731e1585ccad029ca2fad1444fee082b78d@git.kernel.org>
Date:	Wed, 27 May 2015 03:04:25 -0700
From:	tip-bot for Thomas Gleixner <tipbot@...or.com>
To:	linux-tip-commits@...r.kernel.org
Cc:	tglx@...utronix.de, linux-kernel@...r.kernel.org,
	peterz@...radead.org, torvalds@...ux-foundation.org,
	kanaka.d.juvva@...el.com, vikas.shivappa@...ux.intel.com,
	will.auld@...el.com, hpa@...or.com, matt.fleming@...el.com,
	mingo@...nel.org
Subject: [tip:perf/core] perf/x86/intel/cqm: Add storage for 'closid'
  and clean up 'struct intel_pqr_state'

Commit-ID:  bf926731e1585ccad029ca2fad1444fee082b78d
Gitweb:     http://git.kernel.org/tip/bf926731e1585ccad029ca2fad1444fee082b78d
Author:     Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Tue, 19 May 2015 00:00:58 +0000
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Wed, 27 May 2015 09:17:41 +0200

perf/x86/intel/cqm: Add storage for 'closid' and clean up 'struct intel_pqr_state'

'closid' (CLass Of Service ID) is used for the Class based Cache
Allocation Technology (CAT). Add explicit storage to the per cpu cache
for it, so it can be used later with the CAT support (requires to move
the per cpu data).

While at it:

 - Rename the structure to intel_pqr_state which reflects the actual
   purpose of the struct: cache values which go into the PQR MSR

 - Rename 'cnt' to rmid_usecnt which reflects the actual purpose of
   the counter.

 - Document the structure and the struct members.

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Acked-by: Matt Fleming <matt.fleming@...el.com>
Cc: Kanaka Juvva <kanaka.d.juvva@...el.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
Cc: Will Auld <will.auld@...el.com>
Link: http://lkml.kernel.org/r/20150518235150.240899319@linutronix.de
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/kernel/cpu/perf_event_intel_cqm.c | 50 ++++++++++++++++--------------
 1 file changed, 27 insertions(+), 23 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 8241b64..8233b29 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -16,18 +16,32 @@
 static unsigned int cqm_max_rmid = -1;
 static unsigned int cqm_l3_scale; /* supposedly cacheline size */
 
-struct intel_cqm_state {
+/**
+ * struct intel_pqr_state - State cache for the PQR MSR
+ * @rmid:		The cached Resource Monitoring ID
+ * @closid:		The cached Class Of Service ID
+ * @rmid_usecnt:	The usage counter for rmid
+ *
+ * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
+ * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
+ * contains both parts, so we need to cache them.
+ *
+ * The cache also helps to avoid pointless updates if the value does
+ * not change.
+ */
+struct intel_pqr_state {
 	u32			rmid;
-	int			cnt;
+	u32			closid;
+	int			rmid_usecnt;
 };
 
 /*
- * The cached intel_cqm_state is strictly per CPU and can never be
+ * The cached intel_pqr_state is strictly per CPU and can never be
  * updated from a remote CPU. Both functions which modify the state
  * (intel_cqm_event_start and intel_cqm_event_stop) are called with
  * interrupts disabled, which is sufficient for the protection.
  */
-static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state);
+static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
 
 /*
  * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
@@ -966,7 +980,7 @@ out:
 
 static void intel_cqm_event_start(struct perf_event *event, int mode)
 {
-	struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
+	struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
 	u32 rmid = event->hw.cqm_rmid;
 
 	if (!(event->hw.cqm_state & PERF_HES_STOPPED))
@@ -974,7 +988,7 @@ static void intel_cqm_event_start(struct perf_event *event, int mode)
 
 	event->hw.cqm_state &= ~PERF_HES_STOPPED;
 
-	if (state->cnt++) {
+	if (state->rmid_usecnt++) {
 		if (!WARN_ON_ONCE(state->rmid != rmid))
 			return;
 	} else {
@@ -982,17 +996,12 @@ static void intel_cqm_event_start(struct perf_event *event, int mode)
 	}
 
 	state->rmid = rmid;
-	/*
-	 * This is actually wrong, as the upper 32 bit MSR contain the
-	 * closid which is used for configuring the Cache Allocation
-	 * Technology component.
-	 */
-	wrmsr(MSR_IA32_PQR_ASSOC, rmid, 0);
+	wrmsr(MSR_IA32_PQR_ASSOC, rmid, state->closid);
 }
 
 static void intel_cqm_event_stop(struct perf_event *event, int mode)
 {
-	struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
+	struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
 
 	if (event->hw.cqm_state & PERF_HES_STOPPED)
 		return;
@@ -1001,15 +1010,9 @@ static void intel_cqm_event_stop(struct perf_event *event, int mode)
 
 	intel_cqm_event_read(event);
 
-	if (!--state->cnt) {
+	if (!--state->rmid_usecnt) {
 		state->rmid = 0;
-		/*
-		 * This is actually wrong, as the upper 32 bit of the
-		 * MSR contain the closid which is used for
-		 * configuring the Cache Allocation Technology
-		 * component.
-		 */
-		wrmsr(MSR_IA32_PQR_ASSOC, 0, 0);
+		wrmsr(MSR_IA32_PQR_ASSOC, 0, state->closid);
 	} else {
 		WARN_ON_ONCE(!state->rmid);
 	}
@@ -1247,11 +1250,12 @@ static inline void cqm_pick_event_reader(int cpu)
 
 static void intel_cqm_cpu_prepare(unsigned int cpu)
 {
-	struct intel_cqm_state *state = &per_cpu(cqm_state, cpu);
+	struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
 	state->rmid = 0;
-	state->cnt  = 0;
+	state->closid = 0;
+	state->rmid_usecnt = 0;
 
 	WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
 	WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ