lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230812195728.475622148@linutronix.de>
Date:   Sat, 12 Aug 2023 21:59:01 +0200 (CEST)
From:   Thomas Gleixner <tglx@...utronix.de>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     x86@...nel.org, Borislav Petkov <bp@...en8.de>,
        Ashok Raj <ashok.raj@...el.com>,
        Arjan van de Ven <arjan@...ux.intel.com>,
        Nikolay Borisov <nik.borisov@...e.com>
Subject: [patch V2 16/37] x86/microcode/intel: Switch to kvmalloc()

From: Thomas Gleixner <tglx@...utronix.de>

Microcode blobs are getting larger and might soon reach the kmalloc()
limit. Switch over kvmalloc().

32-bit has to stay with kmalloc() as it needs physically contiguous memory
because the early loading runs before paging is enabled, so there is a
sanity check added to ensure that.

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>

---
 arch/x86/kernel/cpu/microcode/intel.c |   55 +++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 23 deletions(-)
---
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -14,7 +14,6 @@
 #include <linux/earlycpio.h>
 #include <linux/firmware.h>
 #include <linux/uaccess.h>
-#include <linux/vmalloc.h>
 #include <linux/initrd.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -243,7 +242,7 @@ EXPORT_SYMBOL_GPL(intel_microcode_sanity
 
 static void update_ucode_pointer(struct microcode_intel *mc)
 {
-	kfree(ucode_patch_va);
+	kvfree(ucode_patch_va);
 
 	/*
 	 * Save the virtual address for early loading on 64bit
@@ -257,13 +256,18 @@ static void update_ucode_pointer(struct
 
 static void save_microcode_patch(struct microcode_intel *patch)
 {
-	struct microcode_intel *mc;
+	unsigned int size = get_totalsize(&patch->hdr);
+	struct microcode_intel *mc = NULL;
+
+	if (IS_ENABLED(CONFIG_X86_64))
+		mc = kvmemdup(patch, size, GFP_KERNEL);
+	else
+		mc = kmemdup(patch, size, GFP_KERNEL);
 
-	mc = kmemdup(patch, get_totalsize(&patch->hdr), GFP_KERNEL);
 	if (mc)
 		update_ucode_pointer(mc);
 	else
-		pr_err("Unable to allocate microcode memory\n");
+		pr_err("Unable to allocate microcode memory size: %u\n", size);
 }
 
 /* Scan CPIO for microcode matching the boot CPUs family, model, stepping */
@@ -610,36 +614,34 @@ static enum ucode_state read_ucode_intel
 
 		if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
 			pr_err("error! Truncated or inaccessible header in microcode data file\n");
-			break;
+			goto fail;
 		}
 
 		mc_size = get_totalsize(&mc_header);
 		if (mc_size < sizeof(mc_header)) {
 			pr_err("error! Bad data in microcode data file (totalsize too small)\n");
-			break;
+			goto fail;
 		}
-
 		data_size = mc_size - sizeof(mc_header);
 		if (data_size > iov_iter_count(iter)) {
 			pr_err("error! Bad data in microcode data file (truncated file?)\n");
-			break;
+			goto fail;
 		}
 
 		/* For performance reasons, reuse mc area when possible */
 		if (!mc || mc_size > curr_mc_size) {
-			vfree(mc);
-			mc = vmalloc(mc_size);
+			kvfree(mc);
+			mc = kvmalloc(mc_size, GFP_KERNEL);
 			if (!mc)
-				break;
+				goto fail;
 			curr_mc_size = mc_size;
 		}
 
 		memcpy(mc, &mc_header, sizeof(mc_header));
 		data = mc + sizeof(mc_header);
 		if (!copy_from_iter_full(data, data_size, iter) ||
-		    intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) {
-			break;
-		}
+		    intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)
+			goto fail;
 
 		if (cur_rev >= mc_header.rev)
 			continue;
@@ -647,25 +649,32 @@ static enum ucode_state read_ucode_intel
 		if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf))
 			continue;
 
-		vfree(new_mc);
+		kvfree(new_mc);
 		cur_rev = mc_header.rev;
 		new_mc  = mc;
 		new_mc_size = mc_size;
 		mc = NULL;
 	}
 
-	vfree(mc);
+	if (iov_iter_count(iter))
+		goto fail;
 
-	if (iov_iter_count(iter)) {
-		vfree(new_mc);
-		return UCODE_ERROR;
+	if (IS_ENABLED(CONFIG_X86_32) && new_mc && is_vmalloc_addr(new_mc)) {
+		pr_err("Microcode too large for 32-bit mode\n");
+		goto fail;
 	}
 
+	kvfree(mc);
 	if (!new_mc)
 		return UCODE_NFOUND;
 
 	ucode_patch_late = (struct microcode_intel *)new_mc;
 	return UCODE_NEW;
+
+fail:
+	kvfree(mc);
+	kvfree(new_mc);
+	return UCODE_ERROR;
 }
 
 static bool is_blacklisted(unsigned int cpu)
@@ -724,9 +733,9 @@ static enum ucode_state request_microcod
 static void finalize_late_load(int result)
 {
 	if (!result)
-		save_microcode_patch(ucode_patch_late);
-
-	vfree(ucode_patch_late);
+		update_ucode_pointer(ucode_patch_late);
+	else
+		kvfree(ucode_patch_late);
 	ucode_patch_late = NULL;
 }
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ