lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue,  5 May 2015 19:50:27 +0200
From:	Ingo Molnar <mingo@...nel.org>
To:	linux-kernel@...r.kernel.org
Cc:	Andy Lutomirski <luto@...capital.net>,
	Borislav Petkov <bp@...en8.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	Fenghua Yu <fenghua.yu@...el.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Oleg Nesterov <oleg@...hat.com>,
	Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 135/208] x86/fpu: Remove failure paths from fpstate-alloc low level functions

Now that we always allocate the FPU context as part of task_struct there's
no need for separate allocations - remove them and their primary failure
handling code.

( Note that there's still secondary error codes that have become superfluous,
  those will be removed in separate patches. )

Move the somewhat misplaced setup_xstate_comp() call to the core.

Reviewed-by: Borislav Petkov <bp@...en8.de>
Cc: Andy Lutomirski <luto@...capital.net>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Fenghua Yu <fenghua.yu@...el.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Oleg Nesterov <oleg@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/include/asm/fpu/internal.h |  4 ----
 arch/x86/kernel/fpu/core.c          | 51 ++-------------------------------------------------
 arch/x86/kernel/fpu/init.c          |  1 +
 arch/x86/kernel/process.c           | 10 ----------
 arch/x86/kvm/x86.c                  | 11 -----------
 5 files changed, 3 insertions(+), 74 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 4ce830fb3f31..9454f21f0edf 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -558,10 +558,6 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
 	}
 }
 
-extern void fpstate_cache_init(void);
-
-extern int fpstate_alloc(struct fpu *fpu);
-extern void fpstate_free(struct fpu *fpu);
 extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
 
 static inline unsigned long
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 422cbb4bbe01..7d42a54b5f23 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -226,34 +226,6 @@ void fpstate_init(struct fpu *fpu)
 EXPORT_SYMBOL_GPL(fpstate_init);
 
 /*
- * FPU state allocation:
- */
-static struct kmem_cache *task_xstate_cachep;
-
-void fpstate_cache_init(void)
-{
-	task_xstate_cachep =
-		kmem_cache_create("task_xstate", xstate_size,
-				  __alignof__(union thread_xstate),
-				  SLAB_PANIC | SLAB_NOTRACK, NULL);
-	setup_xstate_comp();
-}
-
-int fpstate_alloc(struct fpu *fpu)
-{
-	/* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
-	WARN_ON((unsigned long)&fpu->state & 15);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(fpstate_alloc);
-
-void fpstate_free(struct fpu *fpu)
-{
-}
-EXPORT_SYMBOL_GPL(fpstate_free);
-
-/*
  * Copy the current task's FPU state to a new task's FPU context.
  *
  * In the 'eager' case we just save to the destination context.
@@ -280,13 +252,9 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 	dst_fpu->fpregs_active = 0;
 	dst_fpu->last_cpu = -1;
 
-	if (src_fpu->fpstate_active) {
-		int err = fpstate_alloc(dst_fpu);
-
-		if (err)
-			return err;
+	if (src_fpu->fpstate_active)
 		fpu_copy(dst_fpu, src_fpu);
-	}
+
 	return 0;
 }
 
@@ -305,13 +273,6 @@ int fpstate_alloc_init(struct fpu *fpu)
 	if (WARN_ON_ONCE(fpu->fpstate_active))
 		return -EINVAL;
 
-	/*
-	 * Memory allocation at the first usage of the FPU and other state.
-	 */
-	ret = fpstate_alloc(fpu);
-	if (ret)
-		return ret;
-
 	fpstate_init(fpu);
 
 	/* Safe to do for the current task: */
@@ -356,13 +317,6 @@ static int fpu__unlazy_stopped(struct fpu *child_fpu)
 		return 0;
 	}
 
-	/*
-	 * Memory allocation at the first usage of the FPU and other state.
-	 */
-	ret = fpstate_alloc(child_fpu);
-	if (ret)
-		return ret;
-
 	fpstate_init(child_fpu);
 
 	/* Safe to do for stopped child tasks: */
@@ -423,7 +377,6 @@ void fpu__clear(struct task_struct *tsk)
 	if (!use_eager_fpu()) {
 		/* FPU state will be reallocated lazily at the first use. */
 		drop_fpu(fpu);
-		fpstate_free(fpu);
 	} else {
 		 if (!fpu->fpstate_active) {
 			/* kthread execs. TODO: cleanup this horror. */
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 7ae5a62918c7..460e7e2c6186 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -265,6 +265,7 @@ void fpu__init_system(struct cpuinfo_x86 *c)
 	fpu__init_system_generic();
 	fpu__init_system_xstate_size_legacy();
 	fpu__init_system_xstate();
+	setup_xstate_comp();
 
 	fpu__init_system_ctx_switch();
 }
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2bd188501ac9..4b4b16c8e6ee 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -86,16 +86,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 	return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
 }
 
-void arch_release_task_struct(struct task_struct *tsk)
-{
-	fpstate_free(&tsk->thread.fpu);
-}
-
-void arch_task_cache_init(void)
-{
-	fpstate_cache_init();
-}
-
 /*
  * Free current thread data structures etc..
  */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8bb0de5bf9c0..68529251e897 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7008,10 +7008,6 @@ int fx_init(struct kvm_vcpu *vcpu)
 {
 	int err;
 
-	err = fpstate_alloc(&vcpu->arch.guest_fpu);
-	if (err)
-		return err;
-
 	fpstate_init(&vcpu->arch.guest_fpu);
 	if (cpu_has_xsaves)
 		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
@@ -7028,11 +7024,6 @@ int fx_init(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(fx_init);
 
-static void fx_free(struct kvm_vcpu *vcpu)
-{
-	fpstate_free(&vcpu->arch.guest_fpu);
-}
-
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
 	if (vcpu->guest_fpu_loaded)
@@ -7070,7 +7061,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 	kvmclock_reset(vcpu);
 
 	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
-	fx_free(vcpu);
 	kvm_x86_ops->vcpu_free(vcpu);
 }
 
@@ -7126,7 +7116,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 	kvm_mmu_unload(vcpu);
 	vcpu_put(vcpu);
 
-	fx_free(vcpu);
 	kvm_x86_ops->vcpu_free(vcpu);
 }
 
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ