lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 23 Jun 2016 10:43:53 +0200
From:	Peter Zijlstra <peterz@...radead.org>
To:	David Carrillo-Cisneros <davidcc@...gle.com>
Cc:	linux-kernel@...r.kernel.org, "x86@...nel.org" <x86@...nel.org>,
	Ingo Molnar <mingo@...hat.com>,
	Andi Kleen <ak@...ux.intel.com>,
	Kan Liang <kan.liang@...el.com>
Subject: Re: [PATCH v02 4/5] perf/x86/intel: MSR_LAST_BRANCH_FROM_x quirk for
 ctx switch

On Tue, Jun 21, 2016 at 11:31:13AM -0700, David Carrillo-Cisneros wrote:
> @@ -297,7 +312,8 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
>  	tos = task_ctx->tos;
>  	for (i = 0; i < tos; i++) {
>  		lbr_idx = (tos - i) & mask;
> -		wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
> +		wrmsrl(x86_pmu.lbr_from + lbr_idx,
> +			lbr_from_signext_quirk_wr(task_ctx->lbr_from[i]));
>  		wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
>  		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
>  			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
> @@ -321,7 +337,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
>  	tos = intel_pmu_lbr_tos();
>  	for (i = 0; i < tos; i++) {
>  		lbr_idx = (tos - i) & mask;
> -		rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
> +		rdmsrl(x86_pmu.lbr_from + lbr_idx, val);
> +		task_ctx->lbr_from[i] = lbr_from_signext_quirk_rd(val);
>  		rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
>  		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
>  			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
> @@ -499,6 +516,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
>  		int lbr_flags = lbr_desc[lbr_format];
>  
>  		rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
> +		from = lbr_from_signext_quirk_rd(from);
> +
>  		rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
>  
>  		if (lbr_format == LBR_FORMAT_INFO && need_info) {


I did this on top... 


--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -288,12 +288,42 @@ inline u64 lbr_from_signext_quirk_wr(u64
 
 u64 lbr_from_signext_quirk_rd(u64 val)
 {
-	if (static_branch_unlikely(&lbr_from_quirk_key))
+	if (static_branch_unlikely(&lbr_from_quirk_key)) {
 		/*
 		 * Quirk is on when TSX is not enabled. Therefore TSX
 		 * flags must be read as OFF.
 		 */
 		val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
+	}
+	return val;
+}
+
+static inline void wrlbr_from(unsigned int idx, u64 val)
+{
+	val = lbr_from_signext_quirk_wr(val);
+	wrmsrl(x86_pmu.lbr_from + idx, val);
+}
+
+static inline void wrlbr_to(unsigned int idx, u64 val)
+{
+	wrmsrl(x86_pmu.lbr_to + idx, val);
+}
+
+static inline u64 rdlbr_from(unsigned int idx)
+{
+	u64 val;
+
+	rdmsrl(x86_pmu.lbr_from + idx, val);
+
+	return lbr_from_signext_quirk_rd(val);
+}
+
+static inline u64 rdlbr_to(unsigned int idx)
+{
+	u64 val;
+
+	rdmsrl(x86_pmu.lbr_from + idx, val);
+
 	return val;
 }
 
@@ -313,9 +343,9 @@ static void __intel_pmu_lbr_restore(stru
 	tos = task_ctx->tos;
 	for (i = 0; i < tos; i++) {
 		lbr_idx = (tos - i) & mask;
-		wrmsrl(x86_pmu.lbr_from + lbr_idx,
-			lbr_from_signext_quirk_wr(task_ctx->lbr_from[i]));
-		wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+		wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
+		wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
+
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
@@ -325,9 +355,9 @@ static void __intel_pmu_lbr_restore(stru
 
 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 {
-	int i;
 	unsigned lbr_idx, mask;
-	u64 tos, val;
+	u64 tos;
+	int i;
 
 	if (task_ctx->lbr_callstack_users == 0) {
 		task_ctx->lbr_stack_state = LBR_NONE;
@@ -338,9 +368,8 @@ static void __intel_pmu_lbr_save(struct
 	tos = intel_pmu_lbr_tos();
 	for (i = 0; i < tos; i++) {
 		lbr_idx = (tos - i) & mask;
-		rdmsrl(x86_pmu.lbr_from + lbr_idx, val);
-		task_ctx->lbr_from[i] = lbr_from_signext_quirk_rd(val);
-		rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+		task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+		task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
@@ -516,10 +545,8 @@ static void intel_pmu_lbr_read_64(struct
 		u16 cycles = 0;
 		int lbr_flags = lbr_desc[lbr_format];
 
-		rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
-		from = lbr_from_signext_quirk_rd(from);
-
-		rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
+		from = rdlbr_from(lbr_idx);
+		to   = rdlbr_to(lbr_idx);
 
 		if (lbr_format == LBR_FORMAT_INFO && need_info) {
 			u64 info;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ