[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220603035415.1243913-2-patrick.wang.shcn@gmail.com>
Date: Fri, 3 Jun 2022 11:54:12 +0800
From: Patrick Wang <patrick.wang.shcn@...il.com>
To: catalin.marinas@....com, akpm@...ux-foundation.org
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
yee.lee@...iatek.com, patrick.wang.shcn@...il.com
Subject: [PATCH v2 1/4] mm: kmemleak: add OBJECT_PHYS flag for objects allocated with physical address
Add OBJECT_PHYS flag for object. This flag is used
to identify the objects allocated with physical
address.The create_object() function is added an
argument to set that flag.
Suggested-by: Catalin Marinas <catalin.marinas@....com>
Signed-off-by: Patrick Wang <patrick.wang.shcn@...il.com>
---
mm/kmemleak.c | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a182f5ddaf68..1e9e0aa93ae5 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -172,6 +172,8 @@ struct kmemleak_object {
#define OBJECT_NO_SCAN (1 << 2)
/* flag set to fully scan the object when scan_area allocation failed */
#define OBJECT_FULL_SCAN (1 << 3)
+/* flag set for object allocated with physical address */
+#define OBJECT_PHYS (1 << 4)
#define HEX_PREFIX " "
/* number of bytes to print per line; must be 16 or 32 */
@@ -575,7 +577,8 @@ static int __save_stack_trace(unsigned long *trace)
* memory block and add it to the object_list and object_tree_root.
*/
static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
- int min_count, gfp_t gfp)
+ int min_count, gfp_t gfp,
+ bool is_phys)
{
unsigned long flags;
struct kmemleak_object *object, *parent;
@@ -595,7 +598,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
INIT_HLIST_HEAD(&object->area_list);
raw_spin_lock_init(&object->lock);
atomic_set(&object->use_count, 1);
- object->flags = OBJECT_ALLOCATED;
+ object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
object->pointer = ptr;
object->size = kfence_ksize((void *)ptr) ?: size;
object->excess_ref = 0;
@@ -729,10 +732,10 @@ static void delete_object_part(unsigned long ptr, size_t size)
end = object->pointer + object->size;
if (ptr > start)
create_object(start, ptr - start, object->min_count,
- GFP_KERNEL);
+ GFP_KERNEL, object->flags & OBJECT_PHYS);
if (ptr + size < end)
create_object(ptr + size, end - ptr - size, object->min_count,
- GFP_KERNEL);
+ GFP_KERNEL, object->flags & OBJECT_PHYS);
__delete_object(object);
}
@@ -904,7 +907,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- create_object((unsigned long)ptr, size, min_count, gfp);
+ create_object((unsigned long)ptr, size, min_count, gfp, false);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);
@@ -931,7 +934,7 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
- size, 0, gfp);
+ size, 0, gfp, false);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -953,7 +956,7 @@ void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp
* the virtual address of the vmalloc'ed block.
*/
if (kmemleak_enabled) {
- create_object((unsigned long)area->addr, size, 2, gfp);
+ create_object((unsigned long)area->addr, size, 2, gfp, false);
object_set_excess_ref((unsigned long)area,
(unsigned long)area->addr);
}
@@ -1966,14 +1969,14 @@ void __init kmemleak_init(void)
/* register the data/bss sections */
create_object((unsigned long)_sdata, _edata - _sdata,
- KMEMLEAK_GREY, GFP_ATOMIC);
+ KMEMLEAK_GREY, GFP_ATOMIC, false);
create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
- KMEMLEAK_GREY, GFP_ATOMIC);
+ KMEMLEAK_GREY, GFP_ATOMIC, false);
/* only register .data..ro_after_init if not within .data */
if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
create_object((unsigned long)__start_ro_after_init,
__end_ro_after_init - __start_ro_after_init,
- KMEMLEAK_GREY, GFP_ATOMIC);
+ KMEMLEAK_GREY, GFP_ATOMIC, false);
}
/*
--
2.25.1
Powered by blists - more mailing lists