diff options
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/riscv/mm/cacheflush.c | 26 | ||||
-rw-r--r-- | arch/riscv/mm/context.c | 3 | ||||
-rw-r--r-- | arch/riscv/mm/extable.c | 4 | ||||
-rw-r--r-- | arch/riscv/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/riscv/mm/init.c | 67 | ||||
-rw-r--r-- | arch/riscv/mm/ioremap.c | 84 | ||||
-rw-r--r-- | arch/riscv/mm/sifive_l2_cache.c | 2 | ||||
-rw-r--r-- | arch/riscv/mm/tlbflush.c | 25 |
9 files changed, 108 insertions, 114 deletions
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 9d9a17335686..3c8b33258457 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -6,9 +6,8 @@ CFLAGS_REMOVE_init.o = -pg endif obj-y += init.o -obj-y += fault.o obj-y += extable.o -obj-y += ioremap.o +obj-$(CONFIG_MMU) += fault.o obj-y += cacheflush.o obj-y += context.o obj-y += sifive_l2_cache.o diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 3f15938dec89..8f1900686640 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -10,9 +10,17 @@ #include <asm/sbi.h> +static void ipi_remote_fence_i(void *info) +{ + return local_flush_icache_all(); +} + void flush_icache_all(void) { - sbi_remote_fence_i(NULL); + if (IS_ENABLED(CONFIG_RISCV_SBI)) + sbi_remote_fence_i(NULL); + else + on_each_cpu(ipi_remote_fence_i, NULL, 1); } /* @@ -28,7 +36,7 @@ void flush_icache_all(void) void flush_icache_mm(struct mm_struct *mm, bool local) { unsigned int cpu; - cpumask_t others, hmask, *mask; + cpumask_t others, *mask; preempt_disable(); @@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) */ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); local |= cpumask_empty(&others); - if (mm != current->active_mm || !local) { - riscv_cpuid_to_hartid_mask(&others, &hmask); - sbi_remote_fence_i(hmask.bits); - } else { + if (mm == current->active_mm && local) { /* * It's assumed that at least one strongly ordered operation is * performed on this hart between setting a hart's cpumask bit @@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local) * with flush_icache_deferred(). */ smp_mb(); + } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { + cpumask_t hartid_mask; + + riscv_cpuid_to_hartid_mask(&others, &hartid_mask); + sbi_remote_fence_i(cpumask_bits(&hartid_mask)); + } else { + on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); } preempt_enable(); @@ -66,6 +78,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) #endif /* CONFIG_SMP */ +#ifdef CONFIG_MMU void flush_icache_pte(pte_t pte) { struct page *page = pte_page(pte); @@ -73,3 +86,4 @@ void flush_icache_pte(pte_t pte) if (!test_and_set_bit(PG_dcache_clean, &page->flags)) flush_icache_all(); } +#endif /* CONFIG_MMU */ diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index beeb5d7f92ea..613ec81a8979 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> +#include <asm/mmu_context.h> /* * When necessary, performs a deferred icache flush for the given MM context, @@ -57,8 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); +#ifdef CONFIG_MMU csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); local_flush_tlb_all(); +#endif flush_icache_deferred(next); } diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c index 7aed9178d365..2fc729422151 100644 --- a/arch/riscv/mm/extable.c +++ b/arch/riscv/mm/extable.c @@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; - fixup = search_exception_tables(regs->sepc); + fixup = search_exception_tables(regs->epc); if (fixup) { - regs->sepc = fixup->fixup; + regs->epc = fixup->fixup; return 1; } return 0; diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 96add1427a75..cf7248e07f43 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -18,6 +18,8 @@ #include <asm/ptrace.h> #include <asm/tlbflush.h> +#include "../kernel/head.h" + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. @@ -32,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs) int code = SEGV_MAPERR; vm_fault_t fault; - cause = regs->scause; - addr = regs->sbadaddr; + cause = regs->cause; + addr = regs->badaddr; tsk = current; mm = tsk->mm; @@ -51,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) goto vmalloc_fault; /* Enable interrupts if they were enabled in the parent context. */ - if (likely(regs->sstatus & SR_SPIE)) + if (likely(regs->status & SR_PIE)) local_irq_enable(); /* diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 83f7d12042fb..69f6678db7f3 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -19,11 +19,14 @@ #include <asm/pgtable.h> #include <asm/io.h> +#include "../kernel/head.h" + unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); extern char _start[]; +void *dtb_early_va; static void __init zone_sizes_init(void) { @@ -38,11 +41,42 @@ static void __init zone_sizes_init(void) free_area_init_nodes(max_zone_pfns); } -void setup_zero_page(void) +static void setup_zero_page(void) { memset((void *)empty_zero_page, 0, PAGE_SIZE); } +#ifdef CONFIG_DEBUG_VM +static inline void print_mlk(char *name, unsigned long b, unsigned long t) +{ + pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, + (((t) - (b)) >> 10)); +} + +static inline void print_mlm(char *name, unsigned long b, unsigned long t) +{ + pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, + (((t) - (b)) >> 20)); +} + +static void print_vm_layout(void) +{ + pr_notice("Virtual kernel memory layout:\n"); + print_mlk("fixmap", (unsigned long)FIXADDR_START, + (unsigned long)FIXADDR_TOP); + print_mlm("pci io", (unsigned long)PCI_IO_START, + (unsigned long)PCI_IO_END); + print_mlm("vmemmap", (unsigned long)VMEMMAP_START, + (unsigned long)VMEMMAP_END); + print_mlm("vmalloc", (unsigned long)VMALLOC_START, + (unsigned long)VMALLOC_END); + print_mlm("lowmem", (unsigned long)PAGE_OFFSET, + (unsigned long)high_memory); +} +#else +static void print_vm_layout(void) { } +#endif /* CONFIG_DEBUG_VM */ + void __init mem_init(void) { #ifdef CONFIG_FLATMEM @@ -53,6 +87,7 @@ void __init mem_init(void) memblock_free_all(); mem_init_print_info(NULL); + print_vm_layout(); } #ifdef CONFIG_BLK_DEV_INITRD @@ -140,12 +175,12 @@ void __init setup_bootmem(void) } } +#ifdef CONFIG_MMU unsigned long va_pa_offset; EXPORT_SYMBOL(va_pa_offset); unsigned long pfn_base; EXPORT_SYMBOL(pfn_base); -void *dtb_early_va; pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; @@ -271,7 +306,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp, #define get_pgd_next_virt(__pa) get_pmd_virt(__pa) #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ create_pmd_mapping(__nextp, __va, __pa, __sz, __prot) -#define PTE_PARENT_SIZE PMD_SIZE #define fixmap_pgd_next fixmap_pmd #else #define pgd_next_t pte_t @@ -279,7 +313,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp, #define get_pgd_next_virt(__pa) get_pte_virt(__pa) #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ create_pte_mapping(__nextp, __va, __pa, __sz, __prot) -#define PTE_PARENT_SIZE PGDIR_SIZE #define fixmap_pgd_next fixmap_pte #endif @@ -312,14 +345,11 @@ static void __init create_pgd_mapping(pgd_t *pgdp, static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) { - uintptr_t map_size = PAGE_SIZE; - - /* Upgrade to PMD/PGDIR mappings whenever possible */ - if (!(base & (PTE_PARENT_SIZE - 1)) && - !(size & (PTE_PARENT_SIZE - 1))) - map_size = PTE_PARENT_SIZE; + /* Upgrade to PMD_SIZE mappings whenever possible */ + if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1))) + return PAGE_SIZE; - return map_size; + return PMD_SIZE; } /* @@ -337,8 +367,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) */ #ifndef __riscv_cmodel_medany -#error "setup_vm() is called from head.S before relocate so it should " - "not use absolute addressing." +#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif asmlinkage void __init setup_vm(uintptr_t dtb_pa) @@ -448,6 +477,16 @@ static void __init setup_vm_final(void) csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); local_flush_tlb_all(); } +#else +asmlinkage void __init setup_vm(uintptr_t dtb_pa) +{ + dtb_early_va = (void *)dtb_pa; +} + +static inline void setup_vm_final(void) +{ +} +#endif /* CONFIG_MMU */ void __init paging_init(void) { @@ -458,7 +497,7 @@ void __init paging_init(void) zone_sizes_init(); } -#ifdef CONFIG_SPARSEMEM +#ifdef CONFIG_SPARSEMEM_VMEMMAP int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c deleted file mode 100644 index ac621ddb45c0..000000000000 --- a/arch/riscv/mm/ioremap.c +++ /dev/null @@ -1,84 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * (C) Copyright 1995 1996 Linus Torvalds - * (C) Copyright 2012 Regents of the University of California - */ - -#include <linux/export.h> -#include <linux/mm.h> -#include <linux/vmalloc.h> -#include <linux/io.h> - -#include <asm/pgtable.h> - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size, - pgprot_t prot, void *caller) -{ - phys_addr_t last_addr; - unsigned long offset, vaddr; - struct vm_struct *area; - - /* Disallow wrap-around or zero size */ - last_addr = addr + size - 1; - if (!size || last_addr < addr) - return NULL; - - /* Page-align mappings */ - offset = addr & (~PAGE_MASK); - addr -= offset; - size = PAGE_ALIGN(size + offset); - - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (!area) - return NULL; - vaddr = (unsigned long)area->addr; - - if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { - free_vm_area(area); - return NULL; - } - - return (void __iomem *)(vaddr + offset); -} - -/* - * ioremap - map bus memory into CPU space - * @offset: bus address of the memory - * @size: size of the resource to map - * - * ioremap performs a platform specific sequence of operations to - * make bus memory CPU accessible via the readb/readw/readl/writeb/ - * writew/writel functions and the other mmio helpers. The returned - * address is not guaranteed to be usable directly as a virtual - * address. - * - * Must be freed with iounmap. - */ -void __iomem *ioremap(phys_addr_t offset, unsigned long size) -{ - return __ioremap_caller(offset, size, PAGE_KERNEL, - __builtin_return_address(0)); -} -EXPORT_SYMBOL(ioremap); - - -/** - * iounmap - Free a IO remapping - * @addr: virtual address from ioremap_* - * - * Caller must ensure there is only one unmapping for the same pointer. - */ -void iounmap(volatile void __iomem *addr) -{ - vunmap((void *)((unsigned long)addr & PAGE_MASK)); -} -EXPORT_SYMBOL(iounmap); diff --git a/arch/riscv/mm/sifive_l2_cache.c b/arch/riscv/mm/sifive_l2_cache.c index 2e637ad71c05..a9ffff3277c7 100644 --- a/arch/riscv/mm/sifive_l2_cache.c +++ b/arch/riscv/mm/sifive_l2_cache.c @@ -142,7 +142,7 @@ static irqreturn_t l2_int_handler(int irq, void *device) return IRQ_HANDLED; } -int __init sifive_l2_init(void) +static int __init sifive_l2_init(void) { struct device_node *np; struct resource res; diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 24cd33d2c48f..720b443c4528 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -2,6 +2,7 @@ #include <linux/mm.h> #include <linux/smp.h> +#include <linux/sched.h> #include <asm/sbi.h> void flush_tlb_all(void) @@ -9,13 +10,33 @@ void flush_tlb_all(void) sbi_remote_sfence_vma(NULL, 0, -1); } +/* + * This function must not be called with cmask being null. + * Kernel may panic if cmask is NULL. + */ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, unsigned long size) { struct cpumask hmask; + unsigned int cpuid; - riscv_cpuid_to_hartid_mask(cmask, &hmask); - sbi_remote_sfence_vma(hmask.bits, start, size); + if (cpumask_empty(cmask)) + return; + + cpuid = get_cpu(); + + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { + /* local cpu is the only cpu present in cpumask */ + if (size <= PAGE_SIZE) + local_flush_tlb_page(start); + else + local_flush_tlb_all(); + } else { + riscv_cpuid_to_hartid_mask(cmask, &hmask); + sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size); + } + + put_cpu(); } void flush_tlb_mm(struct mm_struct *mm) |