lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20160601174220.3CDFB90E@viggo.jf.intel.com>
Date:	Wed, 01 Jun 2016 10:42:20 -0700
From:	Dave Hansen <dave@...1.net>
To:	linux-kernel@...r.kernel.org
Cc:	x86@...nel.org, Dave Hansen <dave@...1.net>,
	dave.hansen@...ux.intel.com, mingo@...nel.org
Subject: [PATCH] x86, fpu: add tracepoints to dump FPU state at key points


I posted this a few months ago, and Ingo sounded like he'd be
willing to merge this.  So here goes.

--
From: Dave Hansen <dave.hansen@...ux.intel.com>

I've been carrying this patch around for a bit and it's helped me
solve at least a couple FPU-related bugs.  In addition to using
it for debugging, I also drug it out because using AVX (and
AVX2/AVX-512) can have serious power consequences for a modern
core.  It's very important to be able to figure out who is using
it.

It's also insanely useful to go out and see who is using a given
feature, like MPX or Memory Protection Keys.  If you, for
instance, want to find all processes using protection keys, you
can do:

	echo 'xfeatures & 0x200' > filter

Since 0x200 is the protection keys feature bit.

Note that this touches the KVM code.  KVM did a CREATE_TRACE_POINTS
and then included a bunch of random headers.  If anyone one of
those included other tracepoints, it would have defined the *OTHER*
tracepoints.  That's bogus, so move it to the right place.

Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Ingo Molnar <mingo@...nel.org>
---

 b/arch/x86/include/asm/fpu/internal.h |    5 +
 b/arch/x86/include/asm/trace/fpu.h    |  119 ++++++++++++++++++++++++++++++++++
 b/arch/x86/kernel/fpu/core.c          |   18 +++++
 b/arch/x86/kernel/fpu/signal.c        |    3 
 b/arch/x86/kvm/x86.c                  |    6 -
 5 files changed, 148 insertions(+), 3 deletions(-)

diff -puN arch/x86/include/asm/fpu/internal.h~fpu-trace-v1 arch/x86/include/asm/fpu/internal.h
--- a/arch/x86/include/asm/fpu/internal.h~fpu-trace-v1	2016-06-01 10:35:58.216193182 -0700
+++ b/arch/x86/include/asm/fpu/internal.h	2016-06-01 10:35:58.231193860 -0700
@@ -18,6 +18,7 @@
 #include <asm/fpu/api.h>
 #include <asm/fpu/xstate.h>
 #include <asm/cpufeature.h>
+#include <asm/trace/fpu.h>
 
 /*
  * High level FPU state handling functions:
@@ -524,6 +525,7 @@ static inline void __fpregs_deactivate(s
 
 	fpu->fpregs_active = 0;
 	this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+	trace_x86_fpu_regs_deactivated(fpu);
 }
 
 /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
@@ -533,6 +535,7 @@ static inline void __fpregs_activate(str
 
 	fpu->fpregs_active = 1;
 	this_cpu_write(fpu_fpregs_owner_ctx, fpu);
+	trace_x86_fpu_regs_activated(fpu);
 }
 
 /*
@@ -604,11 +607,13 @@ switch_fpu_prepare(struct fpu *old_fpu,
 
 		/* But leave fpu_fpregs_owner_ctx! */
 		old_fpu->fpregs_active = 0;
+		trace_x86_fpu_regs_deactivated(old_fpu);
 
 		/* Don't change CR0.TS if we just switch! */
 		if (fpu.preload) {
 			new_fpu->counter++;
 			__fpregs_activate(new_fpu);
+			trace_x86_fpu_regs_activated(new_fpu);
 			prefetch(&new_fpu->state);
 		} else {
 			__fpregs_deactivate_hw();
diff -puN arch/x86/include/asm/trace/fpu.h~fpu-trace-v1 arch/x86/include/asm/trace/fpu.h
--- a/arch/x86/include/asm/trace/fpu.h~fpu-trace-v1	2016-06-01 10:35:58.219193317 -0700
+++ b/arch/x86/include/asm/trace/fpu.h	2016-06-01 10:35:58.231193860 -0700
@@ -0,0 +1,119 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM x86_fpu
+
+#if !defined(_TRACE_FPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FPU_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(x86_fpu,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu),
+
+	TP_STRUCT__entry(
+		__field(struct fpu *, fpu)
+		__field(bool, fpregs_active)
+		__field(bool, fpstate_active)
+		__field(int, counter)
+		__field(u64, xfeatures)
+		__field(u64, xcomp_bv)
+		),
+
+	TP_fast_assign(
+		__entry->fpu		= fpu;
+		__entry->fpregs_active	= fpu->fpregs_active;
+		__entry->fpstate_active	= fpu->fpstate_active;
+		__entry->counter	= fpu->counter;
+		if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+			__entry->xfeatures = fpu->state.xsave.header.xfeatures;
+			__entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
+		}
+	),
+	TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+			__entry->fpu,
+			__entry->fpregs_active,
+			__entry->fpstate_active,
+			__entry->counter,
+			__entry->xfeatures,
+			__entry->xcomp_bv
+	)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_state,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_before_save,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_after_save,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_before_restore,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_after_restore,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_regs_activated,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_regs_deactivated,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_activate_state,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_deactivate_state,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_init_state,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_dropped,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_copy_src,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_copy_dst,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_xstate_check_failed,
+	TP_PROTO(struct fpu *fpu),
+	TP_ARGS(fpu)
+);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH asm/trace/
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE fpu
+#endif /* _TRACE_FPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff -puN arch/x86/kernel/fpu/core.c~fpu-trace-v1 arch/x86/kernel/fpu/core.c
--- a/arch/x86/kernel/fpu/core.c~fpu-trace-v1	2016-06-01 10:35:58.221193408 -0700
+++ b/arch/x86/kernel/fpu/core.c	2016-06-01 10:35:58.232193905 -0700
@@ -12,6 +12,9 @@
 
 #include <linux/hardirq.h>
 
+#define CREATE_TRACE_POINTS
+#include <asm/trace/fpu.h>
+
 /*
  * Represents the initial FPU state. It's mostly (but not completely) zeroes,
  * depending on the FPU hardware format:
@@ -192,6 +195,7 @@ void fpu__save(struct fpu *fpu)
 	WARN_ON_FPU(fpu != &current->thread.fpu);
 
 	preempt_disable();
+	trace_x86_fpu_before_save(fpu);
 	if (fpu->fpregs_active) {
 		if (!copy_fpregs_to_fpstate(fpu)) {
 			if (use_eager_fpu())
@@ -200,6 +204,7 @@ void fpu__save(struct fpu *fpu)
 				fpregs_deactivate(fpu);
 		}
 	}
+	trace_x86_fpu_after_save(fpu);
 	preempt_enable();
 }
 EXPORT_SYMBOL_GPL(fpu__save);
@@ -275,6 +280,9 @@ int fpu__copy(struct fpu *dst_fpu, struc
 	}
 	preempt_enable();
 
+	trace_x86_fpu_copy_src(src_fpu);
+	trace_x86_fpu_copy_dst(dst_fpu);
+
 	return 0;
 }
 
@@ -288,7 +296,9 @@ void fpu__activate_curr(struct fpu *fpu)
 
 	if (!fpu->fpstate_active) {
 		fpstate_init(&fpu->state);
+		trace_x86_fpu_init_state(fpu);
 
+		trace_x86_fpu_activate_state(fpu);
 		/* Safe to do for the current task: */
 		fpu->fpstate_active = 1;
 	}
@@ -314,7 +324,9 @@ void fpu__activate_fpstate_read(struct f
 	} else {
 		if (!fpu->fpstate_active) {
 			fpstate_init(&fpu->state);
+			trace_x86_fpu_init_state(fpu);
 
+			trace_x86_fpu_activate_state(fpu);
 			/* Safe to do for current and for stopped child tasks: */
 			fpu->fpstate_active = 1;
 		}
@@ -347,7 +359,9 @@ void fpu__activate_fpstate_write(struct
 		fpu->last_cpu = -1;
 	} else {
 		fpstate_init(&fpu->state);
+		trace_x86_fpu_init_state(fpu);
 
+		trace_x86_fpu_activate_state(fpu);
 		/* Safe to do for stopped child tasks: */
 		fpu->fpstate_active = 1;
 	}
@@ -432,9 +446,11 @@ void fpu__restore(struct fpu *fpu)
 
 	/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
 	kernel_fpu_disable();
+	trace_x86_fpu_before_restore(fpu);
 	fpregs_activate(fpu);
 	copy_kernel_to_fpregs(&fpu->state);
 	fpu->counter++;
+	trace_x86_fpu_after_restore(fpu);
 	kernel_fpu_enable();
 }
 EXPORT_SYMBOL_GPL(fpu__restore);
@@ -463,6 +479,8 @@ void fpu__drop(struct fpu *fpu)
 
 	fpu->fpstate_active = 0;
 
+	trace_x86_fpu_dropped(fpu);
+
 	preempt_enable();
 }
 
diff -puN arch/x86/kernel/fpu/signal.c~fpu-trace-v1 arch/x86/kernel/fpu/signal.c
--- a/arch/x86/kernel/fpu/signal.c~fpu-trace-v1	2016-06-01 10:35:58.223193498 -0700
+++ b/arch/x86/kernel/fpu/signal.c	2016-06-01 10:35:58.233193951 -0700
@@ -10,6 +10,7 @@
 #include <asm/fpu/regset.h>
 
 #include <asm/sigframe.h>
+#include <asm/trace/fpu.h>
 
 static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
 
@@ -282,6 +283,7 @@ static int __fpu__restore_sig(void __use
 			 */
 			state_size = sizeof(struct fxregs_state);
 			fx_only = 1;
+			trace_x86_fpu_xstate_check_failed(fpu);
 		} else {
 			state_size = fx_sw_user.xstate_size;
 			xfeatures = fx_sw_user.xfeatures;
@@ -311,6 +313,7 @@ static int __fpu__restore_sig(void __use
 		if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
 		    __copy_from_user(&env, buf, sizeof(env))) {
 			fpstate_init(&fpu->state);
+			trace_x86_fpu_init_state(fpu);
 			err = -1;
 		} else {
 			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
diff -puN arch/x86/kvm/x86.c~fpu-trace-v1 arch/x86/kvm/x86.c
--- a/arch/x86/kvm/x86.c~fpu-trace-v1	2016-06-01 10:35:58.226193634 -0700
+++ b/arch/x86/kvm/x86.c	2016-06-01 10:35:58.237194131 -0700
@@ -55,9 +55,6 @@
 #include <linux/irqbypass.h>
 #include <trace/events/kvm.h>
 
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
 #include <asm/debugreg.h>
 #include <asm/msr.h>
 #include <asm/desc.h>
@@ -68,6 +65,9 @@
 #include <asm/div64.h>
 #include <asm/irq_remapping.h>
 
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 #define MAX_IO_MSRS 256
 #define KVM_MAX_MCE_BANKS 32
 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
_

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ