lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue,  5 May 2015 19:49:36 +0200
From:	Ingo Molnar <mingo@...nel.org>
To:	linux-kernel@...r.kernel.org
Cc:	Andy Lutomirski <luto@...capital.net>,
	Borislav Petkov <bp@...en8.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	Fenghua Yu <fenghua.yu@...el.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Oleg Nesterov <oleg@...hat.com>,
	Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 084/208] x86/fpu: Rename xsave.header::xstate_bv to 'xfeatures'

'xsave.header::xstate_bv' is a misnomer - what does 'bv' stand for?

It probably comes from the 'XGETBV' instruction name, but I could
not find in the Intel documentation where that abbreviation comes
from. It could mean 'bit vector' - or something else?

But how about - instead of guessing about a weird name - we named
the field in an obvious and descriptive way that tells us exactly
what it does?

So rename it to 'xfeatures', which is a bitmask of the
xfeatures that are fpstate_active in that context structure.

Eyesore like:

           fpu->state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;

is now much more readable:

           fpu->state->xsave.header.xfeatures |= XSTATE_FP;

Which form is not just infinitely more readable, but is also
shorter as well.

Reviewed-by: Borislav Petkov <bp@...en8.de>
Cc: Andy Lutomirski <luto@...capital.net>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Fenghua Yu <fenghua.yu@...el.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Oleg Nesterov <oleg@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/include/asm/fpu/internal.h    |  2 +-
 arch/x86/include/asm/fpu/types.h       |  2 +-
 arch/x86/include/asm/user.h            |  8 ++++----
 arch/x86/include/uapi/asm/sigcontext.h |  4 ++--
 arch/x86/kernel/fpu/core.c             |  6 +++---
 arch/x86/kernel/fpu/xsave.c            | 52 ++++++++++++++++++++++++++--------------------------
 arch/x86/kvm/x86.c                     |  4 ++--
 7 files changed, 39 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 3007df99833e..07c6adc02f68 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -261,7 +261,7 @@ static inline int fpu_save_init(struct fpu *fpu)
 		/*
 		 * xsave header may indicate the init state of the FP.
 		 */
-		if (!(fpu->state->xsave.header.xstate_bv & XSTATE_FP))
+		if (!(fpu->state->xsave.header.xfeatures & XSTATE_FP))
 			return 1;
 	} else if (use_fxsr()) {
 		fpu_fxsave(fpu);
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 33c0c7b782db..9bd2cd1a19fd 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -100,7 +100,7 @@ struct bndcsr {
 } __packed;
 
 struct xstate_header {
-	u64				xstate_bv;
+	u64				xfeatures;
 	u64				xcomp_bv;
 	u64				reserved[6];
 } __attribute__((packed));
diff --git a/arch/x86/include/asm/user.h b/arch/x86/include/asm/user.h
index fa042410c42c..59a54e869f15 100644
--- a/arch/x86/include/asm/user.h
+++ b/arch/x86/include/asm/user.h
@@ -15,7 +15,7 @@ struct user_ymmh_regs {
 };
 
 struct user_xstate_header {
-	__u64 xstate_bv;
+	__u64 xfeatures;
 	__u64 reserved1[2];
 	__u64 reserved2[5];
 };
@@ -41,11 +41,11 @@ struct user_xstate_header {
  * particular process/thread.
  *
  * Also when the user modifies certain state FP/SSE/etc through the
- * ptrace interface, they must ensure that the header.xstate_bv
+ * ptrace interface, they must ensure that the header.xfeatures
  * bytes[512..519] of the memory layout are updated correspondingly.
  * i.e., for example when FP state is modified to a non-init state,
- * header.xstate_bv's bit 0 must be set to '1', when SSE is modified to
- * non-init state, header.xstate_bv's bit 1 must to be set to '1', etc.
+ * header.xfeatures's bit 0 must be set to '1', when SSE is modified to
+ * non-init state, header.xfeatures's bit 1 must to be set to '1', etc.
  */
 #define USER_XSTATE_FX_SW_WORDS 6
 #define USER_XSTATE_XCR0_WORD	0
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 7f850f7b5c45..0e8a973de9ee 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -25,7 +25,7 @@ struct _fpx_sw_bytes {
 	__u32 extended_size;	/* total size of the layout referred by
 				 * fpstate pointer in the sigcontext.
 				 */
-	__u64 xstate_bv;
+	__u64 xfeatures;
 				/* feature bit mask (including fp/sse/extended
 				 * state) that is present in the memory
 				 * layout.
@@ -210,7 +210,7 @@ struct sigcontext {
 #endif /* !__i386__ */
 
 struct _header {
-	__u64 xstate_bv;
+	__u64 xfeatures;
 	__u64 reserved1[2];
 	__u64 reserved2[5];
 };
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index eabf4380366a..c12dd3c0aabb 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -470,7 +470,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
 	 * presence of FP and SSE state.
 	 */
 	if (cpu_has_xsave)
-		fpu->state->xsave.header.xstate_bv |= XSTATE_FPSSE;
+		fpu->state->xsave.header.xfeatures |= XSTATE_FPSSE;
 
 	return ret;
 }
@@ -528,7 +528,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
 	 * mxcsr reserved bits must be masked to zero for security reasons.
 	 */
 	xsave->i387.mxcsr &= mxcsr_feature_mask;
-	xsave->header.xstate_bv &= xfeatures_mask;
+	xsave->header.xfeatures &= xfeatures_mask;
 	/*
 	 * These bits must be zero.
 	 */
@@ -740,7 +740,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
 	 * presence of FP.
 	 */
 	if (cpu_has_xsave)
-		fpu->state->xsave.header.xstate_bv |= XSTATE_FP;
+		fpu->state->xsave.header.xfeatures |= XSTATE_FP;
 	return ret;
 }
 
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index 03639fa079b0..467e4635bd29 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -32,7 +32,7 @@ static unsigned int xfeatures_nr;
 /*
  * If a processor implementation discern that a processor state component is
  * in its initialized state it may modify the corresponding bit in the
- * header.xstate_bv as '0', with out modifying the corresponding memory
+ * header.xfeatures as '0', with out modifying the corresponding memory
  * layout in the case of xsaveopt. While presenting the xstate information to
  * the user, we always ensure that the memory layout of a feature will be in
  * the init state if the corresponding header bit is zero. This is to ensure
@@ -43,24 +43,24 @@ void __sanitize_i387_state(struct task_struct *tsk)
 {
 	struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
 	int feature_bit = 0x2;
-	u64 xstate_bv;
+	u64 xfeatures;
 
 	if (!fx)
 		return;
 
-	xstate_bv = tsk->thread.fpu.state->xsave.header.xstate_bv;
+	xfeatures = tsk->thread.fpu.state->xsave.header.xfeatures;
 
 	/*
 	 * None of the feature bits are in init state. So nothing else
 	 * to do for us, as the memory layout is up to date.
 	 */
-	if ((xstate_bv & xfeatures_mask) == xfeatures_mask)
+	if ((xfeatures & xfeatures_mask) == xfeatures_mask)
 		return;
 
 	/*
 	 * FP is in init state
 	 */
-	if (!(xstate_bv & XSTATE_FP)) {
+	if (!(xfeatures & XSTATE_FP)) {
 		fx->cwd = 0x37f;
 		fx->swd = 0;
 		fx->twd = 0;
@@ -73,17 +73,17 @@ void __sanitize_i387_state(struct task_struct *tsk)
 	/*
 	 * SSE is in init state
 	 */
-	if (!(xstate_bv & XSTATE_SSE))
+	if (!(xfeatures & XSTATE_SSE))
 		memset(&fx->xmm_space[0], 0, 256);
 
-	xstate_bv = (xfeatures_mask & ~xstate_bv) >> 2;
+	xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
 
 	/*
 	 * Update all the other memory layouts for which the corresponding
 	 * header bit is in the init state.
 	 */
-	while (xstate_bv) {
-		if (xstate_bv & 0x1) {
+	while (xfeatures) {
+		if (xfeatures & 0x1) {
 			int offset = xstate_offsets[feature_bit];
 			int size = xstate_sizes[feature_bit];
 
@@ -92,7 +92,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
 			       size);
 		}
 
-		xstate_bv >>= 1;
+		xfeatures >>= 1;
 		feature_bit++;
 	}
 }
@@ -162,7 +162,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
 {
 	struct xsave_struct __user *x = buf;
 	struct _fpx_sw_bytes *sw_bytes;
-	u32 xstate_bv;
+	u32 xfeatures;
 	int err;
 
 	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
@@ -175,25 +175,25 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
 	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
 
 	/*
-	 * Read the xstate_bv which we copied (directly from the cpu or
+	 * Read the xfeatures which we copied (directly from the cpu or
 	 * from the state in task struct) to the user buffers.
 	 */
-	err |= __get_user(xstate_bv, (__u32 *)&x->header.xstate_bv);
+	err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
 
 	/*
 	 * For legacy compatible, we always set FP/SSE bits in the bit
 	 * vector while saving the state to the user context. This will
 	 * enable us capturing any changes(during sigreturn) to
 	 * the FP/SSE bits by the legacy applications which don't touch
-	 * xstate_bv in the xsave header.
+	 * xfeatures in the xsave header.
 	 *
-	 * xsave aware apps can change the xstate_bv in the xsave
+	 * xsave aware apps can change the xfeatures in the xsave
 	 * header as well as change any contents in the memory layout.
 	 * xrestore as part of sigreturn will capture all the changes.
 	 */
-	xstate_bv |= XSTATE_FPSSE;
+	xfeatures |= XSTATE_FPSSE;
 
-	err |= __put_user(xstate_bv, (__u32 *)&x->header.xstate_bv);
+	err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
 
 	return err;
 }
@@ -277,7 +277,7 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
 static inline void
 sanitize_restored_xstate(struct task_struct *tsk,
 			 struct user_i387_ia32_struct *ia32_env,
-			 u64 xstate_bv, int fx_only)
+			 u64 xfeatures, int fx_only)
 {
 	struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
 	struct xstate_header *header = &xsave->header;
@@ -291,9 +291,9 @@ sanitize_restored_xstate(struct task_struct *tsk,
 		 * layout and not enabled by the OS.
 		 */
 		if (fx_only)
-			header->xstate_bv = XSTATE_FPSSE;
+			header->xfeatures = XSTATE_FPSSE;
 		else
-			header->xstate_bv &= (xfeatures_mask & xstate_bv);
+			header->xfeatures &= (xfeatures_mask & xfeatures);
 	}
 
 	if (use_fxsr()) {
@@ -335,7 +335,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
 	struct task_struct *tsk = current;
 	struct fpu *fpu = &tsk->thread.fpu;
 	int state_size = xstate_size;
-	u64 xstate_bv = 0;
+	u64 xfeatures = 0;
 	int fx_only = 0;
 
 	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
@@ -369,7 +369,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
 			fx_only = 1;
 		} else {
 			state_size = fx_sw_user.xstate_size;
-			xstate_bv = fx_sw_user.xstate_bv;
+			xfeatures = fx_sw_user.xfeatures;
 		}
 	}
 
@@ -398,7 +398,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
 			fpstate_init(fpu);
 			err = -1;
 		} else {
-			sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
+			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
 		}
 
 		fpu->fpstate_active = 1;
@@ -415,7 +415,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
 		 * state to the registers directly (with exceptions handled).
 		 */
 		user_fpu_begin();
-		if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
+		if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
 			fpu_reset_state(fpu);
 			return -1;
 		}
@@ -441,7 +441,7 @@ static void prepare_fx_sw_frame(void)
 
 	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
 	fx_sw_reserved.extended_size = size;
-	fx_sw_reserved.xstate_bv = xfeatures_mask;
+	fx_sw_reserved.xfeatures = xfeatures_mask;
 	fx_sw_reserved.xstate_size = xstate_size;
 
 	if (config_enabled(CONFIG_IA32_EMULATION)) {
@@ -576,7 +576,7 @@ static void __init setup_init_fpu_buf(void)
 	if (cpu_has_xsaves) {
 		init_xstate_buf->header.xcomp_bv =
 						(u64)1 << 63 | xfeatures_mask;
-		init_xstate_buf->header.xstate_bv = xfeatures_mask;
+		init_xstate_buf->header.xfeatures = xfeatures_mask;
 	}
 
 	/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ac24889c8bc3..0b58b9397098 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3197,7 +3197,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 {
 	struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
-	u64 xstate_bv = xsave->header.xstate_bv;
+	u64 xstate_bv = xsave->header.xfeatures;
 	u64 valid;
 
 	/*
@@ -3243,7 +3243,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
 	memcpy(xsave, src, XSAVE_HDR_OFFSET);
 
 	/* Set XSTATE_BV and possibly XCOMP_BV.  */
-	xsave->header.xstate_bv = xstate_bv;
+	xsave->header.xfeatures = xstate_bv;
 	if (cpu_has_xsaves)
 		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
 
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ