From d98295d31fc116323e1c161f1a100d76e95cde3c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 2 Sep 2020 00:15:21 +1000 Subject: arm64: use asm-generic/mmu_context.h for no-op implementations Signed-off-by: Nicholas Piggin Acked-by: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Arnd Bergmann --- arch/arm64/include/asm/mmu_context.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 0672236e1aea..81189251db41 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -174,9 +174,9 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp) * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you * take CPU migration into account. */ -#define destroy_context(mm) do { } while(0) void check_and_switch_context(struct mm_struct *mm); +#define init_new_context(tsk, mm) init_new_context(tsk, mm) static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { @@ -208,6 +208,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, } #endif +#define enter_lazy_tlb enter_lazy_tlb static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { @@ -248,15 +249,14 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, update_saved_ttbr0(tsk, next); } -#define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, current) - void verify_cpu_asid_bits(void); void post_ttbr_update_workaround(void); unsigned long arm64_mm_context_get(struct mm_struct *mm); void arm64_mm_context_put(struct mm_struct *mm); +#include + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_MMU_CONTEXT_H */ -- cgit v1.2.3