lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250704152537.55724-6-urezki@gmail.com>
Date: Fri,  4 Jul 2025 17:25:35 +0200
From: "Uladzislau Rezki (Sony)" <urezki@...il.com>
To: linux-mm@...ck.org,
	Andrew Morton <akpm@...ux-foundation.org>
Cc: Michal Hocko <mhocko@...nel.org>,
	LKML <linux-kernel@...r.kernel.org>,
	Baoquan He <bhe@...hat.com>,
	Uladzislau Rezki <urezki@...il.com>
Subject: [RFC 5/7] mm/vmalloc: Defer freeing partly initialized vm_struct

__vmalloc_area_node() may call free_vmap_area() or vfree() on
error paths, both of which can sleep. This becomes problematic
if the function is invoked from an atomic context, such as when
GFP_ATOMIC or GFP_NOWAIT is passed via gfp_mask.

To fix this, unify error paths and defer the cleanup of partly
initialized vm_struct objects to a workqueue. This ensures that
freeing happens in a process context and avoids invalid sleeps
in atomic regions.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
---
 mm/vmalloc.c | 35 ++++++++++++++++++++++++++++++++---
 1 file changed, 32 insertions(+), 3 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5bac15b09b03..2eaff0575a9e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3677,6 +3677,36 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
 	return nr_allocated;
 }
 
+static LLIST_HEAD(pending_vm_area_cleanup);
+
+static void cleanup_vm_area_work(struct work_struct *work)
+{
+	struct llist_node *node, *next;
+	struct vm_struct *vm;
+
+	llist_for_each_safe(node, next, llist_del_all(&pending_vm_area_cleanup)) {
+		vm = (void *) node - offsetof(struct vm_struct, next);
+
+		if (!vm->nr_pages)
+			free_vm_area(vm);
+		else
+			vfree(vm->addr);
+	}
+}
+
+static DECLARE_WORK(cleanup_vm_area, cleanup_vm_area_work);
+
+/*
+ * Helper for __vmalloc_area_node() to defer cleanup
+ * of partially initialized vm_struct in error paths.
+ */
+static void
+defer_vm_area_cleanup(struct vm_struct *area)
+{
+	if (llist_add((struct llist_node *) &area->next, &pending_vm_area_cleanup))
+		schedule_work(&cleanup_vm_area);
+}
+
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 				 pgprot_t prot, unsigned int page_shift,
 				 int node)
@@ -3708,8 +3738,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 		warn_alloc(gfp_mask, NULL,
 			"vmalloc error: size %lu, failed to allocated page array size %lu",
 			nr_small_pages * PAGE_SIZE, array_size);
-		free_vm_area(area);
-		return NULL;
+		goto fail;
 	}
 
 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
@@ -3786,7 +3815,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 	return area->addr;
 
 fail:
-	vfree(area->addr);
+	defer_vm_area_cleanup(area);
 	return NULL;
 }
 
-- 
2.39.5


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ