[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240311164638.2015063-3-pasha.tatashin@soleen.com>
Date: Mon, 11 Mar 2024 16:46:26 +0000
From: Pasha Tatashin <pasha.tatashin@...een.com>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
akpm@...ux-foundation.org,
x86@...nel.org,
bp@...en8.de,
brauner@...nel.org,
bristot@...hat.com,
bsegall@...gle.com,
dave.hansen@...ux.intel.com,
dianders@...omium.org,
dietmar.eggemann@....com,
eric.devolder@...cle.com,
hca@...ux.ibm.com,
hch@...radead.org,
hpa@...or.com,
jacob.jun.pan@...ux.intel.com,
jgg@...pe.ca,
jpoimboe@...nel.org,
jroedel@...e.de,
juri.lelli@...hat.com,
kent.overstreet@...ux.dev,
kinseyho@...gle.com,
kirill.shutemov@...ux.intel.com,
lstoakes@...il.com,
luto@...nel.org,
mgorman@...e.de,
mic@...ikod.net,
michael.christie@...cle.com,
mingo@...hat.com,
mjguzik@...il.com,
mst@...hat.com,
npiggin@...il.com,
peterz@...radead.org,
pmladek@...e.com,
rick.p.edgecombe@...el.com,
rostedt@...dmis.org,
surenb@...gle.com,
tglx@...utronix.de,
urezki@...il.com,
vincent.guittot@...aro.org,
vschneid@...hat.com,
pasha.tatashin@...een.com
Subject: [RFC 02/14] fork: Clean-up ifdef logic around stack allocation
There is unneeded OR in the ifdef functions that are used to allocate
and free kernel stacks based on direct map or vmap. Adding dynamic stack
support would complicate this logic even further.
Therefore, clean up by Changing the order so OR is no longer needed.
Signed-off-by: Pasha Tatashin <pasha.tatashin@...een.com>
---
kernel/fork.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/kernel/fork.c b/kernel/fork.c
index 0d944e92a43f..32600bf2422a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -179,13 +179,7 @@ static inline void free_task_struct(struct task_struct *tsk)
kmem_cache_free(task_struct_cachep, tsk);
}
-/*
- * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
- * kmemcache based allocator.
- */
-# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
-
-# ifdef CONFIG_VMAP_STACK
+#ifdef CONFIG_VMAP_STACK
/*
* vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
* flush. Try to minimize the number of calls by caching stacks.
@@ -337,7 +331,13 @@ static void free_thread_stack(struct task_struct *tsk)
tsk->stack_vm_area = NULL;
}
-# else /* !CONFIG_VMAP_STACK */
+#else /* !CONFIG_VMAP_STACK */
+
+/*
+ * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
+ * kmemcache based allocator.
+ */
+#if THREAD_SIZE >= PAGE_SIZE
static void thread_stack_free_rcu(struct rcu_head *rh)
{
@@ -369,8 +369,7 @@ static void free_thread_stack(struct task_struct *tsk)
tsk->stack = NULL;
}
-# endif /* CONFIG_VMAP_STACK */
-# else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
+#else /* !(THREAD_SIZE >= PAGE_SIZE) */
static struct kmem_cache *thread_stack_cache;
@@ -409,7 +408,8 @@ void thread_stack_cache_init(void)
BUG_ON(thread_stack_cache == NULL);
}
-# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
+#endif /* THREAD_SIZE >= PAGE_SIZE */
+#endif /* CONFIG_VMAP_STACK */
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
--
2.44.0.278.ge034bb2e1d-goog
Powered by blists - more mailing lists