[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250102065704.647693-1-guoweikang.kernel@gmail.com>
Date: Thu, 2 Jan 2025 14:57:03 +0800
From: Guo Weikang <guoweikang.kernel@...il.com>
To: Catalin Marinas <catalin.marinas@....com>,
Mike Rapoport <rppt@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Guo Weikang <guoweikang.kernel@...il.com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH] mm/memmap: Prevent double scanning of memmap by kmemleak
kmemleak explicitly scans the mem_map through the valid struct page objects.
However, memmap_alloc() was also adding this memory to the gray object list,
causing it to be scanned twice. Removes memmap_alloc() from the
scan list and adds a comment to clarify the behavior.
Link: https://lore.kernel.org/lkml/CAOm6qn=FVeTpH54wGDFMHuCOeYtvoTx30ktnv9-w3Nh8RMofEA@mail.gmail.com/
Signed-off-by: Guo Weikang <guoweikang.kernel@...il.com>
---
include/linux/memblock.h | 8 +++++++-
mm/mm_init.c | 8 ++++++--
mm/sparse-vmemmap.c | 20 ++------------------
3 files changed, 15 insertions(+), 21 deletions(-)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 673d5cae7c81..b0483c534ef7 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -375,7 +375,13 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
}
#endif /* CONFIG_NUMA */
-/* Flags for memblock allocation APIs */
+/*
+ * Flags for memblock allocation APIs
+ * MEMBLOCK_ALLOC_ANYWHERE and MEMBLOCK_ALLOC_ACCESSIBLE
+ * indicates wheather the allocation is limited by memblock.current_limit.
+ * MEMBLOCK_ALLOC_NOLEAKTRACE not only indicates that it does not need to
+ * be scanned by kmemleak, but also implies MEMBLOCK_ALLOC_ACCESSIBLE
+ */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 24b68b425afb..71b58f5f2492 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1580,6 +1580,10 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
}
}
+/*
+ * Kmemleak will explicitly scan mem_map by traversing all valid `struct *page`,
+ * so memblock does not need to be added to the scan list.
+ */
void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, int nid, bool exact_nid)
{
@@ -1587,11 +1591,11 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
if (exact_nid)
ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
- MEMBLOCK_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_NOLEAKTRACE,
nid);
else
ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
- MEMBLOCK_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_NOLEAKTRACE,
nid);
if (ptr && size > 0)
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index cec67c5f37d8..b6ac9b1d4ff7 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -27,25 +27,10 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
-
+#include "internal.h"
#include <asm/dma.h>
#include <asm/pgalloc.h>
-/*
- * Allocate a block of memory to be used to back the virtual memory map
- * or to back the page tables that are used to create the mapping.
- * Uses the main allocators if they are available, else bootmem.
- */
-
-static void * __ref __earlyonly_bootmem_alloc(int node,
- unsigned long size,
- unsigned long align,
- unsigned long goal)
-{
- return memblock_alloc_try_nid_raw(size, align, goal,
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
-}
-
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
/* If the main allocator is up use that, fallback to bootmem. */
@@ -66,8 +51,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
}
return NULL;
} else
- return __earlyonly_bootmem_alloc(node, size, size,
- __pa(MAX_DMA_ADDRESS));
+ return memmap_alloc(size, size, __pa(MAX_DMA_ADDRESS), node, false);
}
static void * __meminit altmap_alloc_block_buf(unsigned long size,
--
2.25.1
Powered by blists - more mailing lists