diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/riscv/include/asm/mmu_context.h | 54 | ||||
-rw-r--r-- | arch/riscv/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/riscv/mm/context.c | 69 |
3 files changed, 72 insertions, 52 deletions
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 98c76c821367..bf4f097a9051 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -20,8 +20,6 @@ #include <linux/mm.h> #include <linux/sched.h> -#include <asm/tlbflush.h> -#include <asm/cacheflush.h> static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *task) @@ -39,56 +37,8 @@ static inline void destroy_context(struct mm_struct *mm) { } -/* - * When necessary, performs a deferred icache flush for the given MM context, - * on the local CPU. RISC-V has no direct mechanism for instruction cache - * shoot downs, so instead we send an IPI that informs the remote harts they - * need to flush their local instruction caches. To avoid pathologically slow - * behavior in a common case (a bunch of single-hart processes on a many-hart - * machine, ie 'make -j') we avoid the IPIs for harts that are not currently - * executing a MM context and instead schedule a deferred local instruction - * cache flush to be performed before execution resumes on each hart. This - * actually performs that local instruction cache flush, which implicitly only - * refers to the current hart. - */ -static inline void flush_icache_deferred(struct mm_struct *mm) -{ -#ifdef CONFIG_SMP - unsigned int cpu = smp_processor_id(); - cpumask_t *mask = &mm->context.icache_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - /* - * Ensure the remote hart's writes are visible to this hart. - * This pairs with a barrier in flush_icache_mm. - */ - smp_mb(); - local_flush_icache_all(); - } -#endif -} - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, struct task_struct *task) -{ - if (likely(prev != next)) { - /* - * Mark the current MM context as inactive, and the next as - * active. This is at least used by the icache flushing - * routines in order to determine who should - */ - unsigned int cpu = smp_processor_id(); - - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); - local_flush_tlb_all(); - - flush_icache_deferred(next); - } -} +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *task); static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index b68aac701803..0f1295d8731f 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -9,3 +9,4 @@ obj-y += fault.o obj-y += extable.o obj-y += ioremap.o obj-y += cacheflush.o +obj-y += context.o diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c new file mode 100644 index 000000000000..89ceb3cbe218 --- /dev/null +++ b/arch/riscv/mm/context.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + */ + +#include <linux/mm.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> + +/* + * When necessary, performs a deferred icache flush for the given MM context, + * on the local CPU. RISC-V has no direct mechanism for instruction cache + * shoot downs, so instead we send an IPI that informs the remote harts they + * need to flush their local instruction caches. To avoid pathologically slow + * behavior in a common case (a bunch of single-hart processes on a many-hart + * machine, ie 'make -j') we avoid the IPIs for harts that are not currently + * executing a MM context and instead schedule a deferred local instruction + * cache flush to be performed before execution resumes on each hart. This + * actually performs that local instruction cache flush, which implicitly only + * refers to the current hart. + */ +static inline void flush_icache_deferred(struct mm_struct *mm) +{ +#ifdef CONFIG_SMP + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_flush_icache_all(); + } + +#endif +} + +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *task) +{ + unsigned int cpu; + + if (unlikely(prev == next)) + return; + + /* + * Mark the current MM context as inactive, and the next as + * active. This is at least used by the icache flushing + * routines in order to determine who should be flushed. + */ + cpu = smp_processor_id(); + + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); + + /* + * Use the old spbtr name instead of using the current satp + * name to support binutils 2.29 which doesn't know about the + * privileged ISA 1.10 yet. + */ + csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE); + local_flush_tlb_all(); + + flush_icache_deferred(next); +} |