diff options
author | James Hogan <james.hogan@imgtec.com> | 2017-02-09 15:20:06 +0000 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2017-02-13 18:57:31 +0000 |
commit | f700a420088624671ed955bf818c17012bb57bca (patch) | |
tree | edb0148c355db8c8f02b550181a250e94e8c93db /arch/mips | |
parent | 98e58b01e1671e5784c67972ec1b58a8997f784e (diff) | |
parent | 7170bdc777556dad87f92e6a73a6cc557bf3290e (diff) | |
download | linux-f700a420088624671ed955bf818c17012bb57bca.tar.bz2 |
Merge tag 'mips_kvm_4.11_1' into mips-for-linux-next
MIPS dependencies for KVM
Miscellaneous MIPS architecture changes depended on by the MIPS KVM
changes in the KVM tree.
- Move pgd_alloc() out of header.
- Exports so KVM can access page table management and TLBEX functions.
- Add return errors to protected cache ops.
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/include/asm/pgalloc.h | 16 | ||||
-rw-r--r-- | arch/mips/include/asm/r4kcache.h | 55 | ||||
-rw-r--r-- | arch/mips/include/asm/tlbex.h | 26 | ||||
-rw-r--r-- | arch/mips/include/asm/uasm.h | 5 | ||||
-rw-r--r-- | arch/mips/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/pgtable.c | 25 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 38 |
9 files changed, 116 insertions, 54 deletions
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index a03e86969f78..a8705f6c8180 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -43,21 +43,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) * Initialize a new pgd / pmd table with invalid pointers. */ extern void pgd_init(unsigned long page); - -static inline pgd_t *pgd_alloc(struct mm_struct *mm) -{ - pgd_t *ret, *init; - - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); - if (ret) { - init = pgd_offset(&init_mm, 0UL); - pgd_init((unsigned long)ret); - memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - } - - return ret; -} +extern pgd_t *pgd_alloc(struct mm_struct *mm); static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index b42b513007a2..7227c158cbf8 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr) } #define protected_cache_op(op,addr) \ +({ \ + int __err = 0; \ __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ - "1: cache %0, (%1) \n" \ + "1: cache %1, (%2) \n" \ "2: .set pop \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %3 \n" \ + " j 2b \n" \ + " .previous \n" \ " .section __ex_table,\"a\" \n" \ - " "STR(PTR)" 1b, 2b \n" \ + " "STR(PTR)" 1b, 3b \n" \ " .previous" \ - : \ - : "i" (op), "r" (addr)) + : "+r" (__err) \ + : "i" (op), "r" (addr), "i" (-EFAULT)); \ + __err; \ +}) + #define protected_cachee_op(op,addr) \ +({ \ + int __err = 0; \ __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ " .set mips0 \n" \ " .set eva \n" \ - "1: cachee %0, (%1) \n" \ + "1: cachee %1, (%2) \n" \ "2: .set pop \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %3 \n" \ + " j 2b \n" \ + " .previous \n" \ " .section __ex_table,\"a\" \n" \ - " "STR(PTR)" 1b, 2b \n" \ + " "STR(PTR)" 1b, 3b \n" \ " .previous" \ - : \ - : "i" (op), "r" (addr)) + : "+r" (__err) \ + : "i" (op), "r" (addr), "i" (-EFAULT)); \ + __err; \ +}) /* * The next two are for badland addresses like signal trampolines. */ -static inline void protected_flush_icache_line(unsigned long addr) +static inline int protected_flush_icache_line(unsigned long addr) { switch (boot_cpu_type()) { case CPU_LOONGSON2: - protected_cache_op(Hit_Invalidate_I_Loongson2, addr); - break; + return protected_cache_op(Hit_Invalidate_I_Loongson2, addr); default: #ifdef CONFIG_EVA - protected_cachee_op(Hit_Invalidate_I, addr); + return protected_cachee_op(Hit_Invalidate_I, addr); #else - protected_cache_op(Hit_Invalidate_I, addr); + return protected_cache_op(Hit_Invalidate_I, addr); #endif - break; } } @@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr) * caches. We're talking about one cacheline unnecessarily getting invalidated * here so the penalty isn't overly hard. */ -static inline void protected_writeback_dcache_line(unsigned long addr) +static inline int protected_writeback_dcache_line(unsigned long addr) { #ifdef CONFIG_EVA - protected_cachee_op(Hit_Writeback_Inv_D, addr); + return protected_cachee_op(Hit_Writeback_Inv_D, addr); #else - protected_cache_op(Hit_Writeback_Inv_D, addr); + return protected_cache_op(Hit_Writeback_Inv_D, addr); #endif } -static inline void protected_writeback_scache_line(unsigned long addr) +static inline int protected_writeback_scache_line(unsigned long addr) { #ifdef CONFIG_EVA - protected_cachee_op(Hit_Writeback_Inv_SD, addr); + return protected_cachee_op(Hit_Writeback_Inv_SD, addr); #else - protected_cache_op(Hit_Writeback_Inv_SD, addr); + return protected_cache_op(Hit_Writeback_Inv_SD, addr); #endif } diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h new file mode 100644 index 000000000000..53050e9dd2c9 --- /dev/null +++ b/arch/mips/include/asm/tlbex.h @@ -0,0 +1,26 @@ +#ifndef __ASM_TLBEX_H +#define __ASM_TLBEX_H + +#include <asm/uasm.h> + +/* + * Write random or indexed TLB entry, and care about the hazards from + * the preceding mtc0 and for the following eret. + */ +enum tlb_write_entry { + tlb_random, + tlb_indexed +}; + +extern int pgd_reg; + +void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, + unsigned int tmp, unsigned int ptr); +void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr); +void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr); +void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep); +void build_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + enum tlb_write_entry wmode); + +#endif /* __ASM_TLBEX_H */ diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index f7929f65f7ca..e9a9e2ade1d2 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -9,6 +9,9 @@ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ +#ifndef __ASM_UASM_H +#define __ASM_UASM_H + #include <linux/types.h> #ifdef CONFIG_EXPORT_UASM @@ -309,3 +312,5 @@ void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid); void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); + +#endif /* __ASM_UASM_H */ diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index b4c64bd3f723..b4cc8811a664 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -4,7 +4,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \ gup.o init.o mmap.o page.o page-funcs.o \ - tlbex.o tlbex-fault.o tlb-funcs.o + pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o ifdef CONFIG_CPU_MICROMIPS obj-y += uasm-micromips.o diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 9d1d54b8e1e8..aa75849c36bc 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -539,6 +539,7 @@ unsigned long pgd_current[NR_CPUS]; pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); #ifndef __PAGETABLE_PMD_FOLDED pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; +EXPORT_SYMBOL_GPL(invalid_pmd_table); #endif pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; EXPORT_SYMBOL(invalid_pte_table); diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index ce4473e7c0d2..0ae7b28b4db5 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -6,6 +6,7 @@ * Copyright (C) 1999, 2000 by Silicon Graphics * Copyright (C) 2003 by Ralf Baechle */ +#include <linux/export.h> #include <linux/init.h> #include <linux/mm.h> #include <asm/fixmap.h> @@ -60,6 +61,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable) p[-1] = pagetable; } while (p != end); } +EXPORT_SYMBOL_GPL(pmd_init); #endif pmd_t mk_pmd(struct page *page, pgprot_t prot) diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c new file mode 100644 index 000000000000..05560b042d82 --- /dev/null +++ b/arch/mips/mm/pgtable.c @@ -0,0 +1,25 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <asm/pgalloc.h> + +pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return ret; +} +EXPORT_SYMBOL_GPL(pgd_alloc); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 87eed65660f5..9bfee8988eaf 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -22,6 +22,7 @@ */ #include <linux/bug.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/smp.h> @@ -34,6 +35,7 @@ #include <asm/war.h> #include <asm/uasm.h> #include <asm/setup.h> +#include <asm/tlbex.h> static int mips_xpa_disabled; @@ -344,7 +346,8 @@ static int allocate_kscratch(void) } static int scratch_reg; -static int pgd_reg; +int pgd_reg; +EXPORT_SYMBOL_GPL(pgd_reg); enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; static struct work_registers build_get_work_registers(u32 **p) @@ -496,15 +499,9 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p) } } -/* - * Write random or indexed TLB entry, and care about the hazards from - * the preceding mtc0 and for the following eret. - */ -enum tlb_write_entry { tlb_random, tlb_indexed }; - -static void build_tlb_write_entry(u32 **p, struct uasm_label **l, - struct uasm_reloc **r, - enum tlb_write_entry wmode) +void build_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + enum tlb_write_entry wmode) { void(*tlbw)(u32 **) = NULL; @@ -627,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, break; } } +EXPORT_SYMBOL_GPL(build_tlb_write_entry); static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) @@ -781,9 +779,8 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pmd entry. */ -static void -build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, - unsigned int tmp, unsigned int ptr) +void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, + unsigned int tmp, unsigned int ptr) { #ifndef CONFIG_MIPS_PGD_C0_CONTEXT long pgdc = (long)pgd_current; @@ -859,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ #endif } +EXPORT_SYMBOL_GPL(build_get_pmde64); /* * BVADDR is the faulting address, PTR is scratch. @@ -934,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pgd entry. */ -static void __maybe_unused -build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) +void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) { if (pgd_reg != -1) { /* pgd is in pgd_reg */ @@ -960,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ } +EXPORT_SYMBOL_GPL(build_get_pgde32); #endif /* !CONFIG_64BIT */ @@ -989,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx) uasm_i_andi(p, ctx, ctx, mask); } -static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) +void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { /* * Bug workaround for the Nevada. It seems as if under certain @@ -1013,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) build_adjust_context(p, tmp); UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ } +EXPORT_SYMBOL_GPL(build_get_ptep); -static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) +void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { int pte_off_even = 0; int pte_off_odd = sizeof(pte_t); @@ -1063,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } +EXPORT_SYMBOL_GPL(build_update_entries); struct mips_huge_tlb_info { int huge_pte; @@ -1536,7 +1536,9 @@ static void build_loongson3_tlb_refill_handler(void) extern u32 handle_tlbl[], handle_tlbl_end[]; extern u32 handle_tlbs[], handle_tlbs_end[]; extern u32 handle_tlbm[], handle_tlbm_end[]; -extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; +extern u32 tlbmiss_handler_setup_pgd_start[]; +extern u32 tlbmiss_handler_setup_pgd[]; +EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd); extern u32 tlbmiss_handler_setup_pgd_end[]; static void build_setup_pgd(void) |