summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMahesh Salgaonkar <mahesh@linux.vnet.ibm.com>2014-03-30 11:03:23 +0530
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-04-09 12:53:15 +1000
commit6e556b471036b751aaa1a1b5a189eff76b1a2d0b (patch)
tree5bbd8d03d88a9986950961a0d60b7da9b7db3dff /arch/powerpc
parentfa5c11b790f9e18ec6f69680c4aa728157aef5f5 (diff)
downloadlinux-6e556b471036b751aaa1a1b5a189eff76b1a2d0b.tar.bz2
powerpc/book3s: Fix mc_recoverable_range buffer overrun issue.
Currently we wrongly allocate mc_recoverable_range buffer (to hold recoverable ranges) based on size of the property "mcheck-recoverable-ranges". This results in allocating less memory to hold available recoverable range entries from /proc/device-tree/ibm,opal/mcheck-recoverable-ranges. This patch fixes this issue by allocating mc_recoverable_range buffer based on number of entries of recoverable ranges instead of device property size. Without this change we end up allocating less memory and run into memory corruption issue. Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/powernv/opal.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 778a2793e75b..3697772e3759 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -102,13 +102,13 @@ int __init early_init_dt_scan_opal(unsigned long node,
int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data)
{
- unsigned long i, size;
+ unsigned long i, psize, size;
const __be32 *prop;
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
return 0;
- prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &size);
+ prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
if (!prop)
return 1;
@@ -116,6 +116,23 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
pr_debug("Found machine check recoverable ranges.\n");
/*
+ * Calculate number of available entries.
+ *
+ * Each recoverable address range entry is (start address, len,
+ * recovery address), 2 cells each for start and recovery address,
+ * 1 cell for len, totalling 5 cells per entry.
+ */
+ mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
+
+ /* Sanity check */
+ if (!mc_recoverable_range_len)
+ return 1;
+
+ /* Size required to hold all the entries. */
+ size = mc_recoverable_range_len *
+ sizeof(struct mcheck_recoverable_range);
+
+ /*
* Allocate a buffer to hold the MC recoverable ranges. We would be
* accessing them in real mode, hence it needs to be within
* RMO region.
@@ -124,11 +141,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
ppc64_rma_size));
memset(mc_recoverable_range, 0, size);
- /*
- * Each recoverable address entry is an (start address,len,
- * recover address) pair, * 2 cells each, totalling 4 cells per entry.
- */
- for (i = 0; i < size / (sizeof(*prop) * 5); i++) {
+ for (i = 0; i < mc_recoverable_range_len; i++) {
mc_recoverable_range[i].start_addr =
of_read_number(prop + (i * 5) + 0, 2);
mc_recoverable_range[i].end_addr =
@@ -142,7 +155,6 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
mc_recoverable_range[i].end_addr,
mc_recoverable_range[i].recover_addr);
}
- mc_recoverable_range_len = i;
return 1;
}