diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2019-07-24 18:46:36 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-08-20 21:22:20 +1000 |
commit | 10c4bd7cd28e77aeb8cfa65b23cb3c632ede2a49 (patch) | |
tree | 2d4ed2354a3984f1f308573b84190873aeb40109 /arch/powerpc/perf | |
parent | 31f210cf42d4b308eacef89b6cb0b1459338b8de (diff) | |
download | linux-10c4bd7cd28e77aeb8cfa65b23cb3c632ede2a49.tar.bz2 |
powerpc/perf: fix imc allocation failure handling
The alloc_pages_node return value should be tested for failure
before being passed to page_address.
Tested-by: Anju T Sudhakar <anju@linux.vnet.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190724084638.24982-3-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r-- | arch/powerpc/perf/imc-pmu.c | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index dea243185ea4..cb50a9e1fd2d 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size) { int nid, rc = 0, core_id = (cpu / threads_per_core); struct imc_mem_info *mem_info; + struct page *page; /* * alloc_pages_node() will allocate memory for core in the @@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size) mem_info->id = core_id; /* We need only vbase for core counters */ - mem_info->vbase = page_address(alloc_pages_node(nid, - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!mem_info->vbase) + page = alloc_pages_node(nid, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + mem_info->vbase = page_address(page); /* Init the mutex */ core_imc_refc[core_id].id = core_id; @@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size) int nid = cpu_to_node(cpu_id); if (!local_mem) { + struct page *page; /* * This case could happen only once at start, since we dont * free the memory in cpu offline path. */ - local_mem = page_address(alloc_pages_node(nid, + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!local_mem) + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + local_mem = page_address(page); per_cpu(thread_imc_mem, cpu_id) = local_mem; } @@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size) int core_id = (cpu_id / threads_per_core); if (!local_mem) { - local_mem = page_address(alloc_pages_node(phys_id, - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!local_mem) + struct page *page; + + page = alloc_pages_node(phys_id, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + local_mem = page_address(page); per_cpu(trace_imc_mem, cpu_id) = local_mem; /* Initialise the counters for trace mode */ |