[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <47af6bef68ce0a82da4694174f004d11519e8757.1726571179.git.ritesh.list@gmail.com>
Date: Thu, 19 Sep 2024 08:26:04 +0530
From: "Ritesh Harjani (IBM)" <ritesh.list@...il.com>
To: linuxppc-dev@...ts.ozlabs.org
Cc: Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Hari Bathini <hbathini@...ux.ibm.com>,
"Aneesh Kumar K . V" <aneesh.kumar@...nel.org>,
Donet Tom <donettom@...ux.vnet.ibm.com>,
Pavithra Prakash <pavrampu@...ux.vnet.ibm.com>,
Nirjhar Roy <nirjhar@...ux.ibm.com>,
LKML <linux-kernel@...r.kernel.org>,
kasan-dev@...glegroups.com,
"Ritesh Harjani (IBM)" <ritesh.list@...il.com>
Subject: [RFC v2 06/13] book3s64/hash: Add hash_debug_pagealloc_alloc_slots() function
This adds hash_debug_pagealloc_alloc_slots() function instead of open
coding that in htab_initialize(). This is required since we will be
separating the kfence functionality to not depend upon debug_pagealloc.
Now that everything required for debug_pagealloc is under a #ifdef
config. Bring in linear_map_hash_slots and linear_map_hash_count
variables under the same config too.
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@...il.com>
---
arch/powerpc/mm/book3s64/hash_utils.c | 29 ++++++++++++++++-----------
1 file changed, 17 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 6e3860224351..030c120d1399 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(mmu_slb_size);
#ifdef CONFIG_PPC_64K_PAGES
int mmu_ci_restrictions;
#endif
-static u8 *linear_map_hash_slots;
-static unsigned long linear_map_hash_count;
struct mmu_hash_ops mmu_hash_ops __ro_after_init;
EXPORT_SYMBOL(mmu_hash_ops);
@@ -274,6 +272,8 @@ void hash__tlbiel_all(unsigned int action)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
+static u8 *linear_map_hash_slots;
+static unsigned long linear_map_hash_count;
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -328,6 +328,19 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}
+static inline void hash_debug_pagealloc_alloc_slots(void)
+{
+ if (!debug_pagealloc_enabled())
+ return;
+ linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
+ linear_map_hash_slots = memblock_alloc_try_nid(
+ linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
+ ppc64_rma_size, NUMA_NO_NODE);
+ if (!linear_map_hash_slots)
+ panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
+ __func__, linear_map_hash_count, &ppc64_rma_size);
+}
+
static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot)
{
if (!debug_pagealloc_enabled())
@@ -361,6 +374,7 @@ int hash__kernel_map_pages(struct page *page, int numpages,
{
return 0;
}
+static inline void hash_debug_pagealloc_alloc_slots(void) {}
static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
@@ -1223,16 +1237,7 @@ static void __init htab_initialize(void)
prot = pgprot_val(PAGE_KERNEL);
- if (debug_pagealloc_enabled()) {
- linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
- linear_map_hash_slots = memblock_alloc_try_nid(
- linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
- ppc64_rma_size, NUMA_NO_NODE);
- if (!linear_map_hash_slots)
- panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
- __func__, linear_map_hash_count, &ppc64_rma_size);
- }
-
+ hash_debug_pagealloc_alloc_slots();
/* create bolted the linear mapping in the hash table */
for_each_mem_range(i, &base, &end) {
size = end - base;
--
2.46.0
Powered by blists - more mailing lists