[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250211163648.30531-4-bp@kernel.org>
Date: Tue, 11 Feb 2025 17:36:46 +0100
From: Borislav Petkov <bp@...nel.org>
To: X86 ML <x86@...nel.org>
Cc: LKML <linux-kernel@...r.kernel.org>,
"Borislav Petkov (AMD)" <bp@...en8.de>
Subject: [PATCH 3/5] x86/microcode/AMD: Merge early_apply_microcode() into its single callsite
From: "Borislav Petkov (AMD)" <bp@...en8.de>
No functional changes.
Signed-off-by: Borislav Petkov (AMD) <bp@...en8.de>
---
arch/x86/kernel/cpu/microcode/amd.c | 60 +++++++++++++----------------
1 file changed, 26 insertions(+), 34 deletions(-)
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index f831c0602994..90f93b3ca9db 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -512,39 +512,6 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
return true;
}
-/*
- * Early load occurs before we can vmalloc(). So we look for the microcode
- * patch container file in initrd, traverse equivalent cpu table, look for a
- * matching microcode patch, and update, all in initrd memory in place.
- * When vmalloc() is available for use later -- on 64-bit during first AP load,
- * and on 32-bit during save_microcode_in_initrd() -- we can call
- * load_microcode_amd() to save equivalent cpu table and microcode patches in
- * kernel heap memory.
- *
- * Returns true if container found (sets @desc), false otherwise.
- */
-static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
-{
- struct cont_desc desc = { 0 };
- struct microcode_amd *mc;
-
- scan_containers(ucode, size, &desc);
-
- mc = desc.mc;
- if (!mc)
- return false;
-
- /*
- * Allow application of the same revision to pick up SMT-specific
- * changes even if the revision of the other SMT thread is already
- * up-to-date.
- */
- if (old_rev > mc->hdr.patch_id)
- return false;
-
- return __apply_microcode_amd(mc, desc.psize);
-}
-
static bool get_builtin_microcode(struct cpio_data *cp)
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
@@ -582,8 +549,19 @@ static bool __init find_blobs_in_containers(struct cpio_data *ret)
return found;
}
+/*
+ * Early load occurs before we can vmalloc(). So we look for the microcode
+ * patch container file in initrd, traverse equivalent cpu table, look for a
+ * matching microcode patch, and update, all in initrd memory in place.
+ * When vmalloc() is available for use later -- on 64-bit during first AP load,
+ * and on 32-bit during save_microcode_in_initrd() -- we can call
+ * load_microcode_amd() to save equivalent cpu table and microcode patches in
+ * kernel heap memory.
+ */
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
{
+ struct cont_desc desc = { };
+ struct microcode_amd *mc;
struct cpio_data cp = { };
u32 dummy;
@@ -597,7 +575,21 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
if (!find_blobs_in_containers(&cp))
return;
- if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
+ scan_containers(cp.data, cp.size, &desc);
+
+ mc = desc.mc;
+ if (!mc)
+ return;
+
+ /*
+ * Allow application of the same revision to pick up SMT-specific
+ * changes even if the revision of the other SMT thread is already
+ * up-to-date.
+ */
+ if (ed->old_rev > mc->hdr.patch_id)
+ return;
+
+ if (__apply_microcode_amd(mc, desc.psize))
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
}
--
2.43.0
Powered by blists - more mailing lists