diff options
author | Guo Ren <ren_guo@c-sky.com> | 2019-06-18 20:34:35 +0800 |
---|---|---|
committer | Guo Ren <ren_guo@c-sky.com> | 2019-07-19 14:21:36 +0800 |
commit | 4e562c11664c0e0e84bb8495894b8637acc1c095 (patch) | |
tree | f0ec771bb3184272cd60db0f530e89361fb0525c /arch | |
parent | 22d55f02b8922a097cd4be1e2f131dfa7ef65901 (diff) | |
download | linux-4e562c11664c0e0e84bb8495894b8637acc1c095.tar.bz2 |
csky: Improve tlb operation with help of asid
There are two generations of tlb operation instruction for C-SKY.
First generation is use mcr register and it need software do more
things, second generation is use specific instructions, eg:
tlbi.va, tlbi.vas, tlbi.alls
We implemented the following functions:
- flush_tlb_range (a range of entries)
- flush_tlb_page (one entry)
Above functions use asid from vma->mm to invalid tlb entries and
we could use tlbi.vas instruction for newest generation csky cpu.
- flush_tlb_kernel_range
- flush_tlb_one
Above functions don't care asid and it invalid the tlb entries only
with vpn and we could use tlbi.vaas instruction for newest generat-
ion csky cpu.
Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/csky/mm/tlb.c | 136 |
1 files changed, 132 insertions, 4 deletions
diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c index efae81ce7fbc..eb3ba6c9c927 100644 --- a/arch/csky/mm/tlb.c +++ b/arch/csky/mm/tlb.c @@ -10,6 +10,13 @@ #include <asm/pgtable.h> #include <asm/setup.h> +/* + * One C-SKY MMU TLB entry contain two PFN/page entry, ie: + * 1VPN -> 2PFN + */ +#define TLB_ENTRY_SIZE (PAGE_SIZE * 2) +#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1) + void flush_tlb_all(void) { tlb_invalid_all(); @@ -17,27 +24,148 @@ void flush_tlb_all(void) void flush_tlb_mm(struct mm_struct *mm) { +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm))); +#else tlb_invalid_all(); +#endif } +/* + * MMU operation regs only could invalid tlb entry in jtlb and we + * need change asid field to invalid I-utlb & D-utlb. + */ +#ifndef CONFIG_CPU_HAS_TLBI +#define restore_asid_inv_utlb(oldpid, newpid) \ +do { \ + if (oldpid == newpid) \ + write_mmu_entryhi(oldpid + 1); \ + write_mmu_entryhi(oldpid); \ +} while (0) +#endif + void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - tlb_invalid_all(); + unsigned long newpid = cpu_asid(vma->vm_mm); + + start &= TLB_ENTRY_SIZE_MASK; + end += TLB_ENTRY_SIZE - 1; + end &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + while (start < end) { + asm volatile("tlbi.vas %0"::"r"(start | newpid)); + start += 2*PAGE_SIZE; + } + sync_is(); +#else + { + unsigned long flags, oldpid; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + while (start < end) { + int idx; + + write_mmu_entryhi(start | newpid); + start += 2*PAGE_SIZE; + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, newpid); + local_irq_restore(flags); + } +#endif } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - tlb_invalid_all(); + start &= TLB_ENTRY_SIZE_MASK; + end += TLB_ENTRY_SIZE - 1; + end &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + while (start < end) { + asm volatile("tlbi.vaas %0"::"r"(start)); + start += 2*PAGE_SIZE; + } + sync_is(); +#else + { + unsigned long flags, oldpid; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + while (start < end) { + int idx; + + write_mmu_entryhi(start | oldpid); + start += 2*PAGE_SIZE; + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - tlb_invalid_all(); + int newpid = cpu_asid(vma->vm_mm); + + addr &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.vas %0"::"r"(addr | newpid)); + sync_is(); +#else + { + int oldpid, idx; + unsigned long flags; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + write_mmu_entryhi(addr | newpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, newpid); + local_irq_restore(flags); + } +#endif } void flush_tlb_one(unsigned long addr) { - tlb_invalid_all(); + addr &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.vaas %0"::"r"(addr)); + sync_is(); +#else + { + int oldpid, idx; + unsigned long flags; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + write_mmu_entryhi(addr | oldpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif } EXPORT_SYMBOL(flush_tlb_one); |