[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240329105610.922675-2-dawei.li@shingroup.cn>
Date: Fri, 29 Mar 2024 18:56:09 +0800
From: Dawei Li <dawei.li@...ngroup.cn>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
ioana.ciornei@....com,
wintera@...ux.ibm.com,
twinkler@...ux.ibm.com
Cc: netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-s390@...r.kernel.org,
Dawei Li <dawei.li@...ngroup.cn>
Subject: [PATCH net-next 1/2] net/iucv: Avoid explicit cpumask var allocation on stack
For CONFIG_CPUMASK_OFFSTACK=y kernel, explicit allocation of cpumask
variable on stack is not recommended since it can cause potential stack
overflow.
Instead, kernel code should always use *cpumask_var API(s) to allocate
cpumask var in config-neutral way, leaving allocation strategy to
CONFIG_CPUMASK_OFFSTACK.
Use *cpumask_var API(s) to address it.
Signed-off-by: Dawei Li <dawei.li@...ngroup.cn>
---
net/iucv/iucv.c | 37 ++++++++++++++++++++++++++-----------
1 file changed, 26 insertions(+), 11 deletions(-)
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a4ab615ca3e3..b51f46ec32f9 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -520,14 +520,19 @@ static void iucv_setmask_mp(void)
*/
static void iucv_setmask_up(void)
{
- cpumask_t cpumask;
+ cpumask_var_t cpumask;
int cpu;
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return;
+
/* Disable all cpu but the first in cpu_irq_cpumask. */
- cpumask_copy(&cpumask, &iucv_irq_cpumask);
- cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
- for_each_cpu(cpu, &cpumask)
+ cpumask_copy(cpumask, &iucv_irq_cpumask);
+ cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), cpumask);
+ for_each_cpu(cpu, cpumask)
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
+
+ free_cpumask_var(cpumask);
}
/*
@@ -628,23 +633,33 @@ static int iucv_cpu_online(unsigned int cpu)
static int iucv_cpu_down_prep(unsigned int cpu)
{
- cpumask_t cpumask;
+ cpumask_var_t cpumask;
+ int ret = 0;
if (!iucv_path_table)
return 0;
- cpumask_copy(&cpumask, &iucv_buffer_cpumask);
- cpumask_clear_cpu(cpu, &cpumask);
- if (cpumask_empty(&cpumask))
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(cpumask, &iucv_buffer_cpumask);
+ cpumask_clear_cpu(cpu, cpumask);
+ if (cpumask_empty(cpumask)) {
/* Can't offline last IUCV enabled cpu. */
- return -EINVAL;
+ ret = -EINVAL;
+ goto __free_cpumask;
+ }
iucv_retrieve_cpu(NULL);
if (!cpumask_empty(&iucv_irq_cpumask))
- return 0;
+ goto __free_cpumask;
+
smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
iucv_allow_cpu, NULL, 1);
- return 0;
+
+__free_cpumask:
+ free_cpumask_var(cpumask);
+ return ret;
}
/**
--
2.27.0
Powered by blists - more mailing lists