[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191006171213.508941626@linuxfoundation.org>
Date: Sun, 6 Oct 2019 19:20:43 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Anju T Sudhakar <anju@...ux.vnet.ibm.com>,
Nicholas Piggin <npiggin@...il.com>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.2 059/137] powerpc/perf: fix imc allocation failure handling
From: Nicholas Piggin <npiggin@...il.com>
[ Upstream commit 10c4bd7cd28e77aeb8cfa65b23cb3c632ede2a49 ]
The alloc_pages_node return value should be tested for failure
before being passed to page_address.
Tested-by: Anju T Sudhakar <anju@...ux.vnet.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@...il.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@...ux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@...erman.id.au>
Link: https://lore.kernel.org/r/20190724084638.24982-3-npiggin@gmail.com
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
arch/powerpc/perf/imc-pmu.c | 29 ++++++++++++++++++-----------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 3bdfc1e320964..2231959c56331 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -570,6 +570,7 @@ static int core_imc_mem_init(int cpu, int size)
{
int nid, rc = 0, core_id = (cpu / threads_per_core);
struct imc_mem_info *mem_info;
+ struct page *page;
/*
* alloc_pages_node() will allocate memory for core in the
@@ -580,11 +581,12 @@ static int core_imc_mem_init(int cpu, int size)
mem_info->id = core_id;
/* We need only vbase for core counters */
- mem_info->vbase = page_address(alloc_pages_node(nid,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
- __GFP_NOWARN, get_order(size)));
- if (!mem_info->vbase)
+ page = alloc_pages_node(nid,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size));
+ if (!page)
return -ENOMEM;
+ mem_info->vbase = page_address(page);
/* Init the mutex */
core_imc_refc[core_id].id = core_id;
@@ -839,15 +841,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
int nid = cpu_to_node(cpu_id);
if (!local_mem) {
+ struct page *page;
/*
* This case could happen only once at start, since we dont
* free the memory in cpu offline path.
*/
- local_mem = page_address(alloc_pages_node(nid,
+ page = alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
- __GFP_NOWARN, get_order(size)));
- if (!local_mem)
+ __GFP_NOWARN, get_order(size));
+ if (!page)
return -ENOMEM;
+ local_mem = page_address(page);
per_cpu(thread_imc_mem, cpu_id) = local_mem;
}
@@ -1085,11 +1089,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
int core_id = (cpu_id / threads_per_core);
if (!local_mem) {
- local_mem = page_address(alloc_pages_node(phys_id,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
- __GFP_NOWARN, get_order(size)));
- if (!local_mem)
+ struct page *page;
+
+ page = alloc_pages_node(phys_id,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size));
+ if (!page)
return -ENOMEM;
+ local_mem = page_address(page);
per_cpu(trace_imc_mem, cpu_id) = local_mem;
/* Initialise the counters for trace mode */
--
2.20.1
Powered by blists - more mailing lists