summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-02-28 17:48:40 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-03-03 22:54:16 +0000
commit89c7e4b8bbb3d4fa52df5746a8ad38e610143651 (patch)
treef6d3d835f5fca88fd4de01f166d782f16c2fe3c9 /arch/arm
parent862c588f062fe9339a180cf6429e4df1855c376a (diff)
downloadlinux-89c7e4b8bbb3d4fa52df5746a8ad38e610143651.tar.bz2
ARM: 7661/1: mm: perform explicit branch predictor maintenance when required
The ARM ARM requires branch predictor maintenance if, for a given ASID, the instructions at a specific virtual address appear to change. From the kernel's point of view, that means: - Changing the kernel's view of memory (e.g. switching to the identity map) - ASID rollover (since ASIDs will be re-allocated to new tasks) This patch adds explicit branch predictor maintenance when either of the two conditions above are met. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/kernel/suspend.c1
-rw-r--r--arch/arm/mm/context.c4
-rw-r--r--arch/arm/mm/idmap.c1
4 files changed, 6 insertions, 1 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1bdfd87c8e41..31644f1978d5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* switch away from it before attempting any exclusive accesses.
*/
cpu_switch_mm(mm->pgd, mm);
+ local_flush_bp_all();
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 358bca3a995e..c59c97ea8268 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
ret = __cpu_suspend(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
+ local_flush_bp_all();
local_flush_tlb_all();
}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 44d4ee52f3e2..a5a4b2bc42ba 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -212,8 +212,10 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
atomic64_set(&mm->context.id, asid);
}
- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+ local_flush_bp_all();
local_flush_tlb_all();
+ }
atomic64_set(&per_cpu(active_asids, cpu), asid);
cpumask_set_cpu(cpu, mm_cpumask(mm));
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2dffc010cc41..5ee505c937d1 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
{
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
+ local_flush_bp_all();
#ifdef CONFIG_CPU_HAS_ASID
/*