From 374d446d25d6271ee615952a3b7f123ba4983c35 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 13 Jan 2017 22:51:08 +0100 Subject: ARM: 8636/1: Cleanup sanity_check_meminfo The logic for sanity_check_meminfo has become difficult to follow. Clean up the code so it's more obvious what the code is actually trying to do. Additionally, meminfo is now removed so rename the function to better describe its purpose. Tested-by: Magnus Lilja Reviewed-by: Nicolas Pitre Signed-off-by: Laura Abbott Signed-off-by: Laura Abbott Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 34e3f3c45634..8a8051cf57d1 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup); extern void init_default_cache_policy(unsigned long); extern void paging_init(const struct machine_desc *desc); extern void early_paging_init(const struct machine_desc *); -extern void sanity_check_meminfo(void); +extern void adjust_lowmem_bounds(void); extern enum reboot_mode reboot_mode; extern void setup_dma_zone(const struct machine_desc *desc); @@ -1093,7 +1093,7 @@ void __init setup_arch(char **cmdline_p) setup_dma_zone(mdesc); xen_early_init(); efi_init(); - sanity_check_meminfo(); + adjust_lowmem_bounds(); arm_memblock_init(mdesc); early_ioremap_reset(); -- cgit v1.2.3 From 985626564eedc470ce2866e53938303368ad41b7 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 13 Jan 2017 22:51:45 +0100 Subject: ARM: 8637/1: Adjust memory boundaries after reservations adjust_lowmem_bounds is responsible for setting up the boundary for lowmem/highmem. This needs to be setup before memblock reservations can occur. At the time memblock reservations can occur, memory can also be removed from the system. The lowmem/highmem boundary and end of memory may be affected by this but it is currently not recalculated. On some systems this may be harmless, on others this may result in incorrect ranges being passed to the main memory allocator. Correct this by recalculating the lowmem/highmem boundary after all reservations have been made. Tested-by: Magnus Lilja Signed-off-by: Laura Abbott Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 6 ++++++ arch/arm/mm/mmu.c | 9 ++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 8a8051cf57d1..f4e54503afa9 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p) setup_dma_zone(mdesc); xen_early_init(); efi_init(); + /* + * Make sure the calculation for lowmem/highmem is set appropriately + * before reserving/allocating any mmeory + */ adjust_lowmem_bounds(); arm_memblock_init(mdesc); + /* Memory may have been removed so recalculate the bounds. */ + adjust_lowmem_bounds(); early_ioremap_reset(); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b8f70a3bb7f8..5cbfd9f86412 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1157,6 +1157,7 @@ void __init adjust_lowmem_bounds(void) phys_addr_t memblock_limit = 0; u64 vmalloc_limit; struct memblock_region *reg; + phys_addr_t lowmem_limit = 0; /* * Let's use our own (unoptimized) equivalent of __pa() that is @@ -1172,14 +1173,14 @@ void __init adjust_lowmem_bounds(void) phys_addr_t block_end = reg->base + reg->size; if (reg->base < vmalloc_limit) { - if (block_end > arm_lowmem_limit) + if (block_end > lowmem_limit) /* * Compare as u64 to ensure vmalloc_limit does * not get truncated. block_end should always * fit in phys_addr_t so there should be no * issue with assignment. */ - arm_lowmem_limit = min_t(u64, + lowmem_limit = min_t(u64, vmalloc_limit, block_end); @@ -1200,12 +1201,14 @@ void __init adjust_lowmem_bounds(void) if (!IS_ALIGNED(block_start, PMD_SIZE)) memblock_limit = block_start; else if (!IS_ALIGNED(block_end, PMD_SIZE)) - memblock_limit = arm_lowmem_limit; + memblock_limit = lowmem_limit; } } } + arm_lowmem_limit = lowmem_limit; + high_memory = __va(arm_lowmem_limit - 1) + 1; /* -- cgit v1.2.3 From 035e787543de709f29b38752251d4724200ec353 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 19 Jan 2017 01:26:28 +0100 Subject: ARM: 8644/1: Reduce "CPU: shutdown" message to debug level Similar to c68b0274fb3c ("ARM: reduce "Booted secondary processor" message to debug level"), demote the "CPU: shutdown" pr_notice() into a pr_debug(). Signed-off-by: Florian Fainelli Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7dd14e8395e6..46377c40d056 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -251,7 +251,7 @@ void __cpu_die(unsigned int cpu) pr_err("CPU%u: cpu didn't die\n", cpu); return; } - pr_notice("CPU%u: shutdown\n", cpu); + pr_debug("CPU%u: shutdown\n", cpu); /* * platform_cpu_kill() is generally expected to do the powering off -- cgit v1.2.3 From ad475117d2015781789364d599b85c67254680a1 Mon Sep 17 00:00:00 2001 From: Afzal Mohammed Date: Wed, 1 Feb 2017 13:47:34 +0100 Subject: ARM: 8649/2: nommu: remove Hivecs configuration is asm Now that exception based address is handled dynamically for processors with CP15, remove Hivecs configuration in assembly. Signed-off-by: afzal mohammed Tested-by: Vladimir Murzin Signed-off-by: Russell King --- arch/arm/kernel/head-nommu.S | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6b4eb27b8758..2e21e08de747 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -151,11 +151,6 @@ __after_proc_init: #endif #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I -#endif -#ifdef CONFIG_CPU_HIGH_VECTOR - orr r0, r0, #CR_V -#else - bic r0, r0, #CR_V #endif mcr p15, 0, r0, c1, c0, 0 @ write control reg #elif defined (CONFIG_CPU_V7M) -- cgit v1.2.3 From 050d18d1c65113b4558d86d53465ebe1d04910fb Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 30 Jan 2017 18:29:28 +0100 Subject: ARM: 8650/1: module: handle negative R_ARM_PREL31 addends correctly According to the spec 'ELF for the ARM Architecture' (IHI 0044E), addends for R_ARM_PREL31 relocations are 31-bit signed quantities, so we need to sign extend the value to 32 bits before it can be used as an offset in the calculation of the relocated value. We have not been bitten by this because these relocations are usually emitted against the start of a section, which means the addends never assume negative values in practice. But it is a bug nonetheless, so fix it. Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King --- arch/arm/kernel/module.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 4f14b5ce6535..80254b47dc34 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -155,8 +155,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, break; case R_ARM_PREL31: - offset = *(u32 *)loc + sym->st_value - loc; - *(u32 *)loc = offset & 0x7fffffff; + offset = (*(s32 *)loc << 1) >> 1; /* sign extend */ + offset += sym->st_value - loc; + if (offset >= 0x40000000 || offset < -0x40000000) { + pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", + module->name, relindex, i, symname, + ELF32_R_TYPE(rel->r_info), loc, + sym->st_value); + return -ENOEXEC; + } + *(u32 *)loc &= 0x80000000; + *(u32 *)loc |= offset & 0x7fffffff; break; case R_ARM_MOVW_ABS_NC: -- cgit v1.2.3