lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 23 Jan 2017 01:14:22 -0800
From:   tip-bot for Borislav Petkov <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     tglx@...utronix.de, linux-kernel@...r.kernel.org, mingo@...nel.org,
        bp@...e.de, hpa@...or.com
Subject: [tip:x86/mce] x86/microcode/AMD: Remove AP scanning optimization

Commit-ID:  69f5f983001f6d097aac774a9e917f44657f3367
Gitweb:     http://git.kernel.org/tip/69f5f983001f6d097aac774a9e917f44657f3367
Author:     Borislav Petkov <bp@...e.de>
AuthorDate: Fri, 20 Jan 2017 21:29:54 +0100
Committer:  Thomas Gleixner <tglx@...utronix.de>
CommitDate: Mon, 23 Jan 2017 10:02:51 +0100

x86/microcode/AMD: Remove AP scanning optimization

The idea was to not scan the microcode blob on each AP (Application
Processor) during boot and thus save us some milliseconds. However, on
architectures where the microcode engine is shared between threads, this
doesn't work. Here's why:

The microcode on CPU0, i.e., the first thread, gets updated. The second
thread, i.e., CPU1, i.e., the first AP walks into load_ucode_amd_ap(),
sees that there's no container cached and goes and scans for the proper
blob.

It finds it and as a last step of apply_microcode_early_amd(), it tries
to apply the patch but that core has already the updated microcode
revision which it has received through CPU0's update. So it returns
false and we do desc->size = -1 to prevent other APs from scanning.

However, the next AP, CPU2, has a different microcode engine which
hasn't been updated yet. The desc->size == -1 test prevents it from
scanning the blob anew and we fail to update it.

The fix is much more straight-forward than it looks: the BSP
(BootStrapping Processor), i.e., CPU0, caches the microcode patch
in amd_ucode_patch. We use that on the AP and try to apply it.
In the 99.9999% of cases where we have homogeneous cores - *not*
mixed-steppings - the application will be successful and we're good to
go.

In the remaining small set of systems, we will simply rescan the blob
and find (or not, if none present) the proper patch and apply it then.

Signed-off-by: Borislav Petkov <bp@...e.de>
Reviewed-by: Thomas Gleixner <tglx@...utronix.de>
Link: http://lkml.kernel.org/r/20170120202955.4091-16-bp@alien8.de
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>

---
 arch/x86/kernel/cpu/microcode/amd.c | 78 +++++++++----------------------------
 1 file changed, 18 insertions(+), 60 deletions(-)

diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 2a194e3..5e1b577 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -45,14 +45,14 @@ static struct equiv_cpu_entry *equiv_cpu_table;
  * save from the initrd/builtin before jettisoning its contents. @mc is the
  * microcode patch we found to match.
  */
-static struct cont_desc {
+struct cont_desc {
 	struct microcode_amd *mc;
 	u32		     cpuid_1_eax;
 	u32		     psize;
 	u16		     eq_id;
 	u8		     *data;
 	size_t		     size;
-} cont;
+};
 
 static u32 ucode_new_rev;
 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
@@ -201,8 +201,7 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
  * Returns true if container found (sets @desc), false otherwise.
  */
 static bool
-apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
-			  bool save_patch, struct cont_desc *ret_desc)
+apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
 {
 	struct cont_desc desc = { 0 };
 	u8 (*patch)[PATCH_MAX_SIZE];
@@ -240,9 +239,6 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
 			memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
 	}
 
-	if (ret_desc)
-		*ret_desc = desc;
-
 	return ret;
 }
 
@@ -292,79 +288,41 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
 	struct cpio_data cp = { };
 
 	__load_ucode_amd(cpuid_1_eax, &cp);
-
 	if (!(cp.data && cp.size))
 		return;
 
-	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
+	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
 }
 
 void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 {
-	struct equiv_cpu_entry *eq;
 	struct microcode_amd *mc;
-	struct cont_desc *desc;
-	u16 eq_id;
+	struct cpio_data cp;
+	u32 *new_rev, rev, dummy;
 
 	if (IS_ENABLED(CONFIG_X86_32)) {
-		mc   = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
-		desc = (struct cont_desc *)__pa_nodebug(&cont);
+		mc	= (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
+		new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
 	} else {
-		mc   = (struct microcode_amd *)amd_ucode_patch;
-		desc = &cont;
+		mc	= (struct microcode_amd *)amd_ucode_patch;
+		new_rev = &ucode_new_rev;
 	}
 
-	/* First AP hasn't cached it yet, go through the blob. */
-	if (!desc->data) {
-		struct cpio_data cp = { };
-
-		if (desc->size == -1)
-			return;
-
-reget:
-		__load_ucode_amd(cpuid_1_eax, &cp);
-		if (!(cp.data && cp.size)) {
-			/*
-			 * Mark it so that other APs do not scan again for no
-			 * real reason and slow down boot needlessly.
-			 */
-			desc->size = -1;
-			return;
-		}
+	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
 
-		if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, desc)) {
-			desc->data = NULL;
-			desc->size = -1;
+	/* Check whether we have saved a new patch already: */
+	if (*new_rev && rev < mc->hdr.patch_id) {
+		if (!__apply_microcode_amd(mc)) {
+			*new_rev = mc->hdr.patch_id;
 			return;
 		}
 	}
 
-	eq  = (struct equiv_cpu_entry *)(desc->data + CONTAINER_HDR_SZ);
-
-	eq_id = find_equiv_id(eq, cpuid_1_eax);
-	if (!eq_id)
+	__load_ucode_amd(cpuid_1_eax, &cp);
+	if (!(cp.data && cp.size))
 		return;
 
-	if (eq_id == desc->eq_id) {
-		u32 rev, dummy;
-
-		native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
-		mc = (struct microcode_amd *)amd_ucode_patch;
-
-		if (mc && rev < mc->hdr.patch_id) {
-			if (!__apply_microcode_amd(mc))
-				ucode_new_rev = mc->hdr.patch_id;
-		}
-
-	} else {
-
-		/*
-		 * AP has a different equivalence ID than BSP, looks like
-		 * mixed-steppings silicon so go through the ucode blob anew.
-		 */
-		goto reget;
-	}
+	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
 }
 
 static enum ucode_state

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ