summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/context.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2017-08-10 13:29:06 +0100
committerWill Deacon <will.deacon@arm.com>2017-12-11 13:40:29 +0000
commit85d13c001497b481b4a8cc5d7db243cc44ac2bd8 (patch)
treeaa9f9db5f6d9b6856c8cbb32bd0c13cdd6cd3430 /arch/arm64/mm/context.c
parent7655abb953860485940d4de74fb45a8192149bb6 (diff)
downloadlinux-85d13c001497b481b4a8cc5d7db243cc44ac2bd8.tar.bz2
arm64: mm: Remove pre_ttbr0_update_workaround for Falkor erratum #E1003
The pre_ttbr0_update_workaround hook is called prior to context-switching TTBR0 because Falkor erratum E1003 can cause TLB allocation with the wrong ASID if both the ASID and the base address of the TTBR are updated at the same time. With the ASID sitting safely in TTBR1, we no longer update things atomically, so we can remove the pre_ttbr0_update_workaround macro as it's no longer required. The erratum infrastructure and documentation is left around for #E1003, as it will be required by the entry trampoline code in a future patch. Reviewed-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Laura Abbott <labbott@redhat.com> Tested-by: Shanker Donthineni <shankerd@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/mm/context.c')
-rw-r--r--arch/arm64/mm/context.c11
1 files changed, 0 insertions, 11 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 6f4017046323..78a2dc596fee 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -79,13 +79,6 @@ void verify_cpu_asid_bits(void)
}
}
-static void set_reserved_asid_bits(void)
-{
- if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) &&
- cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003))
- __set_bit(FALKOR_RESERVED_ASID, asid_map);
-}
-
static void flush_context(unsigned int cpu)
{
int i;
@@ -94,8 +87,6 @@ static void flush_context(unsigned int cpu)
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
- set_reserved_asid_bits();
-
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
/*
@@ -254,8 +245,6 @@ static int asids_init(void)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- set_reserved_asid_bits();
-
pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
return 0;
}