[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <616bf94910b0c77323ea9ccb86571f78ebfd421b.1726571179.git.ritesh.list@gmail.com>
Date: Thu, 19 Sep 2024 08:26:11 +0530
From: "Ritesh Harjani (IBM)" <ritesh.list@...il.com>
To: linuxppc-dev@...ts.ozlabs.org
Cc: Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Hari Bathini <hbathini@...ux.ibm.com>,
"Aneesh Kumar K . V" <aneesh.kumar@...nel.org>,
Donet Tom <donettom@...ux.vnet.ibm.com>,
Pavithra Prakash <pavrampu@...ux.vnet.ibm.com>,
Nirjhar Roy <nirjhar@...ux.ibm.com>,
LKML <linux-kernel@...r.kernel.org>,
kasan-dev@...glegroups.com,
"Ritesh Harjani (IBM)" <ritesh.list@...il.com>
Subject: [RFC v2 13/13] book3s64/hash: Early detect debug_pagealloc size requirement
Add hash_supports_debug_pagealloc() helper to detect whether
debug_pagealloc can be supported on hash or not. This checks for both,
whether debug_pagealloc config is enabled and the linear map should
fit within rma_size/4 region size.
This can then be used early during htab_init_page_sizes() to decide
linear map pagesize if hash supports either debug_pagealloc or
kfence.
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@...il.com>
---
arch/powerpc/mm/book3s64/hash_utils.c | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index b6da25719e37..3ffc98b3deb1 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -329,25 +329,26 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long idx,
}
#endif
+static inline bool hash_supports_debug_pagealloc(void)
+{
+ unsigned long max_hash_count = ppc64_rma_size / 4;
+ unsigned long linear_map_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
+
+ if (!debug_pagealloc_enabled() || linear_map_count > max_hash_count)
+ return false;
+ return true;
+}
+
#ifdef CONFIG_DEBUG_PAGEALLOC
static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
static void hash_debug_pagealloc_alloc_slots(void)
{
- unsigned long max_hash_count = ppc64_rma_size / 4;
-
- if (!debug_pagealloc_enabled())
- return;
- linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
- if (unlikely(linear_map_hash_count > max_hash_count)) {
- pr_info("linear map size (%llu) greater than 4 times RMA region (%llu). Disabling debug_pagealloc\n",
- ((u64)linear_map_hash_count << PAGE_SHIFT),
- ppc64_rma_size);
- linear_map_hash_count = 0;
+ if (!hash_supports_debug_pagealloc())
return;
- }
+ linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
linear_map_hash_slots = memblock_alloc_try_nid(
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
ppc64_rma_size, NUMA_NO_NODE);
@@ -1076,7 +1077,7 @@ static void __init htab_init_page_sizes(void)
bool aligned = true;
init_hpte_page_sizes();
- if (!debug_pagealloc_enabled() && !kfence_early_init_enabled()) {
+ if (!hash_supports_debug_pagealloc() && !kfence_early_init_enabled()) {
/*
* Pick a size for the linear mapping. Currently, we only
* support 16M, 1M and 4K which is the default
--
2.46.0
Powered by blists - more mailing lists