lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200921020007.35803-4-chenjun102@huawei.com>
Date:   Mon, 21 Sep 2020 02:00:05 +0000
From:   Chen Jun <chenjun102@...wei.com>
To:     <linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>
CC:     <catalin.marinas@....com>, <akpm@...ux-foundation.org>,
        <rui.xiang@...wei.com>, <weiyongjun1@...wei.com>
Subject: [PATCH -next 3/5] mm/kmemleak: Add support for percpu memory leak detect

From: Wei Yongjun <weiyongjun1@...wei.com>

Currently the reporting of the percpu chunks leaking problem
are not supported. This patch introduces this function.

Since __percpu pointer is not pointing directly to the actual chunks,
this patch creates an object for __percpu pointer, but marks it as no
scan block, only check whether this pointer is referenced by other
blocks.

Introduce two global variables, min_percpu_addr and max_percpu_addr,
to store the range of valid percpu pointer values, in order to
speed up pointer lookup when scanning blocks.

Signed-off-by: Wei Yongjun <weiyongjun1@...wei.com>
Signed-off-by: Chen Jun <chenjun102@...wei.com>
---
 mm/kmemleak.c | 71 ++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 59 insertions(+), 12 deletions(-)

diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c09c6b59eda6..feedb72f06f2 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -170,6 +170,8 @@ struct kmemleak_object {
 #define OBJECT_NO_SCAN		(1 << 2)
 /* flag set to fully scan the object when scan_area allocation failed */
 #define OBJECT_FULL_SCAN	(1 << 3)
+/* flag set to percpu ptr object */
+#define OBJECT_PERCPU		(1 << 4)
 
 #define HEX_PREFIX		"    "
 /* number of bytes to print per line; must be 16 or 32 */
@@ -212,6 +214,9 @@ static int kmemleak_error;
 /* minimum and maximum address that may be valid pointers */
 static unsigned long min_addr = ULONG_MAX;
 static unsigned long max_addr;
+/* minimum and maximum address that may be valid percpu pointers */
+static unsigned long min_percpu_addr = ULONG_MAX;
+static unsigned long max_percpu_addr;
 
 static struct task_struct *scan_thread;
 /* used to avoid reporting of recently allocated objects */
@@ -283,6 +288,9 @@ static void hex_dump_object(struct seq_file *seq,
 	const u8 *ptr = (const u8 *)object->pointer;
 	size_t len;
 
+	if (object->flags & OBJECT_PERCPU)
+		ptr = this_cpu_ptr((void __percpu *)object->pointer);
+
 	/* limit the number of lines to HEX_MAX_LINES */
 	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 
@@ -563,17 +571,32 @@ static int __save_stack_trace(unsigned long *trace)
 	return stack_trace_save(trace, MAX_TRACE, 2);
 }
 
+static void __update_address_range(struct kmemleak_object *object)
+{
+	unsigned long ptr = object->pointer;
+	size_t size = object->size;
+	unsigned long untagged_ptr;
+
+	if (object->flags & OBJECT_PERCPU) {
+		min_percpu_addr = min(min_percpu_addr, ptr);
+		max_percpu_addr = max(max_percpu_addr, ptr + size);
+	} else {
+		untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+		min_addr = min(min_addr, untagged_ptr);
+		max_addr = max(max_addr, untagged_ptr + size);
+	}
+}
+
 /*
  * Create the metadata (struct kmemleak_object) corresponding to an allocated
  * memory block and add it to the object_list and object_tree_root.
  */
-static void create_object(unsigned long ptr, size_t size, int min_count,
-			  gfp_t gfp)
+static void __create_object(unsigned long ptr, size_t size, int min_count,
+			    unsigned int obj_flags, gfp_t gfp)
 {
 	unsigned long flags;
 	struct kmemleak_object *object, *parent;
 	struct rb_node **link, *rb_parent;
-	unsigned long untagged_ptr;
 
 	object = mem_pool_alloc(gfp);
 	if (!object) {
@@ -587,7 +610,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
 	INIT_HLIST_HEAD(&object->area_list);
 	raw_spin_lock_init(&object->lock);
 	atomic_set(&object->use_count, 1);
-	object->flags = OBJECT_ALLOCATED;
+	object->flags = OBJECT_ALLOCATED | obj_flags;
 	object->pointer = ptr;
 	object->size = size;
 	object->excess_ref = 0;
@@ -619,9 +642,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
 
 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 
-	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
-	min_addr = min(min_addr, untagged_ptr);
-	max_addr = max(max_addr, untagged_ptr + size);
+	__update_address_range(object);
 	link = &object_tree_root.rb_node;
 	rb_parent = NULL;
 	while (*link) {
@@ -651,6 +672,19 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 }
 
+static void create_object(unsigned long ptr, size_t size, int min_count,
+			  gfp_t gfp)
+{
+	__create_object(ptr, size, min_count, 0, gfp);
+}
+
+static void create_object_percpu(unsigned long ptr, size_t size, int min_count,
+				 gfp_t gfp)
+{
+	__create_object(ptr, size, min_count, OBJECT_PERCPU | OBJECT_NO_SCAN,
+			gfp);
+}
+
 /*
  * Mark the object as not allocated and schedule RCU freeing via put_object().
  */
@@ -912,10 +946,12 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
 	 * Percpu allocations are only scanned and not reported as leaks
 	 * (min_count is set to 0).
 	 */
-	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+	if (kmemleak_enabled && ptr && !IS_ERR(ptr)) {
 		for_each_possible_cpu(cpu)
 			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 				      size, 0, gfp);
+		create_object_percpu((unsigned long)ptr, size, 1, gfp);
+	}
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 
@@ -991,10 +1027,12 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
 
 	pr_debug("%s(0x%p)\n", __func__, ptr);
 
-	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
+	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) {
 		for_each_possible_cpu(cpu)
 			delete_object_full((unsigned long)per_cpu_ptr(ptr,
 								      cpu));
+		delete_object_full((unsigned long)ptr);
+	}
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 
@@ -1224,6 +1262,17 @@ static int scan_should_stop(void)
 	return 0;
 }
 
+static bool is_valid_address(unsigned long ptr)
+{
+	unsigned long untagged_ptr;
+
+	if (ptr >= min_percpu_addr && ptr < max_percpu_addr)
+		return true;
+
+	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+	return (untagged_ptr >= min_addr && untagged_ptr < max_addr);
+}
+
 /*
  * Scan a memory block (exclusive range) for valid pointers and add those
  * found to the gray list.
@@ -1235,7 +1284,6 @@ static void scan_block(void *_start, void *_end,
 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
 	unsigned long flags;
-	unsigned long untagged_ptr;
 
 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 	for (ptr = start; ptr < end; ptr++) {
@@ -1250,8 +1298,7 @@ static void scan_block(void *_start, void *_end,
 		pointer = *ptr;
 		kasan_enable_current();
 
-		untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
-		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
+		if (!is_valid_address(pointer))
 			continue;
 
 		/*
-- 
2.25.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ