diff options
Diffstat (limited to 'arch/arm/include')
92 files changed, 1571 insertions, 1193 deletions
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h deleted file mode 100644 index 92f10cb5c70c..000000000000 --- a/arch/arm/include/asm/a.out-core.h +++ /dev/null @@ -1,45 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_A_OUT_CORE_H -#define _ASM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include <linux/user.h> -#include <linux/elfcore.h> - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) -{ - struct task_struct *tsk = current; - - dump->magic = CMAGIC; - dump->start_code = tsk->mm->start_code; - dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); - - dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; - dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; - dump->u_ssize = 0; - - memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg)); - - if (dump->start_stack < 0x04000000) - dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; - - dump->regs = *regs; - dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 7c1bfc0aea0c..5665134bfa3e 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -17,7 +17,8 @@ int arch_timer_arch_init(void); * nicely work out which register we want, and chuck away the rest of * the code. At least it does so with a recent GCC (4.6.3). */ -static inline void arch_timer_reg_write(const int access, const int reg, u32 val) +static __always_inline +void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) { if (access == ARCH_TIMER_PHYS_ACCESS) { switch (reg) { @@ -28,9 +29,7 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); break; } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { + } else if (access == ARCH_TIMER_VIRT_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); @@ -44,7 +43,8 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val isb(); } -static inline u32 arch_timer_reg_read(const int access, const int reg) +static __always_inline +u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) { u32 val = 0; @@ -57,9 +57,7 @@ static inline u32 arch_timer_reg_read(const int access, const int reg) asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); break; } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { + } else if (access == ARCH_TIMER_VIRT_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); @@ -80,15 +78,6 @@ static inline u32 arch_timer_get_cntfrq(void) return val; } -static inline u64 arch_counter_get_cntpct(void) -{ - u64 cval; - - isb(); - asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); - return cval; -} - static inline u64 arch_counter_get_cntvct(void) { u64 cval; @@ -98,7 +87,7 @@ static inline u64 arch_counter_get_cntvct(void) return cval; } -static inline void __cpuinit arch_counter_set_user_access(void) +static inline void arch_counter_set_user_access(void) { u32 cntkctl; diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 05ee9eebad6b..fcc1b5bf6979 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -136,7 +136,11 @@ * assumes FIQs are enabled, and that the processor is in SVC mode. */ .macro save_and_disable_irqs, oldcpsr +#ifdef CONFIG_CPU_V7M + mrs \oldcpsr, primask +#else mrs \oldcpsr, cpsr +#endif disable_irq .endm @@ -150,7 +154,11 @@ * guarantee that this will preserve the flags. */ .macro restore_irqs_notrace, oldcpsr +#ifdef CONFIG_CPU_V7M + msr primask, \oldcpsr +#else msr cpsr_c, \oldcpsr +#endif .endm .macro restore_irqs, oldcpsr @@ -212,9 +220,9 @@ #ifdef CONFIG_SMP #if __LINUX_ARM_ARCH__ >= 7 .ifeqs "\mode","arm" - ALT_SMP(dmb) + ALT_SMP(dmb ish) .else - ALT_SMP(W(dmb)) + ALT_SMP(W(dmb) ish) .endif #elif __LINUX_ARM_ARCH__ == 6 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb @@ -229,7 +237,14 @@ #endif .endm -#ifdef CONFIG_THUMB2_KERNEL +#if defined(CONFIG_CPU_V7M) + /* + * setmode is used to assert to be in svc mode during boot. For v7-M + * this is done in __v7m_setup, so setmode can be empty here. + */ + .macro setmode, mode, reg + .endm +#elif defined(CONFIG_THUMB2_KERNEL) .macro setmode, mode, reg mov \reg, #\mode msr cpsr_c, \reg diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 8dcd9c702d90..60f15e274e6d 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -14,27 +14,27 @@ #endif #if __LINUX_ARM_ARCH__ >= 7 -#define isb() __asm__ __volatile__ ("isb" : : : "memory") -#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") -#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") +#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") +#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") +#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ +#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ : : "r" (0) : "memory") #elif defined(CONFIG_CPU_FA526) -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") #else -#define isb() __asm__ __volatile__ ("" : : : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +#define isb(x) __asm__ __volatile__ ("" : : : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") #endif #ifdef CONFIG_ARCH_HAS_BARRIERS @@ -42,7 +42,7 @@ #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #define mb() do { dsb(); outer_sync(); } while (0) #define rmb() dsb() -#define wmb() mb() +#define wmb() do { dsb(st); outer_sync(); } while (0) #else #define mb() barrier() #define rmb() barrier() @@ -54,9 +54,9 @@ #define smp_rmb() barrier() #define smp_wmb() barrier() #else -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() +#define smp_mb() dmb(ish) +#define smp_rmb() smp_mb() +#define smp_wmb() dmb(ishst) #endif #define read_barrier_depends() do { } while(0) diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index bff71388e72a..15f2d5bf8875 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ -#define flush_cache_user_range(start,end) \ - __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) +#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e) /* * Perform necessary cache operations to ensure that data previously @@ -320,9 +319,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, } #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} +extern void flush_kernel_dcache_page(struct page *); #define flush_dcache_mmap_lock(mapping) \ spin_lock_irq(&(mapping)->tree_lock) @@ -354,7 +351,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) * set_pte_at() called from vmap_pte_range() does not * have a DSB after cleaning the cache line. */ - dsb(); + dsb(ishst); } static inline void flush_cache_vunmap(unsigned long start, unsigned long end) diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 1f3262e99d81..6493802f880a 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -23,6 +23,11 @@ #define CR_RR (1 << 14) /* Round Robin cache replacement */ #define CR_L4 (1 << 15) /* LDR pc can set T bit */ #define CR_DT (1 << 16) +#ifdef CONFIG_MMU +#define CR_HA (1 << 17) /* Hardware management of Access Flag */ +#else +#define CR_BR (1 << 17) /* MPU Background region enable (PMSA) */ +#endif #define CR_IT (1 << 18) #define CR_ST (1 << 19) #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ @@ -61,6 +66,20 @@ static inline void set_cr(unsigned int val) isb(); } +static inline unsigned int get_auxcr(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val)); + return val; +} + +static inline void set_auxcr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR" + : : "r" (val)); + isb(); +} + #ifndef CONFIG_SMP extern void adjust_cr(unsigned long mask, unsigned long set); #endif diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 7652712d1d14..9672e978d50d 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -8,8 +8,25 @@ #define CPUID_CACHETYPE 1 #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 +#define CPUID_MPUIR 4 #define CPUID_MPIDR 5 +#ifdef CONFIG_CPU_V7M +#define CPUID_EXT_PFR0 0x40 +#define CPUID_EXT_PFR1 0x44 +#define CPUID_EXT_DFR0 0x48 +#define CPUID_EXT_AFR0 0x4c +#define CPUID_EXT_MMFR0 0x50 +#define CPUID_EXT_MMFR1 0x54 +#define CPUID_EXT_MMFR2 0x58 +#define CPUID_EXT_MMFR3 0x5c +#define CPUID_EXT_ISAR0 0x60 +#define CPUID_EXT_ISAR1 0x64 +#define CPUID_EXT_ISAR2 0x68 +#define CPUID_EXT_ISAR3 0x6c +#define CPUID_EXT_ISAR4 0x70 +#define CPUID_EXT_ISAR5 0x74 +#else #define CPUID_EXT_PFR0 "c1, 0" #define CPUID_EXT_PFR1 "c1, 1" #define CPUID_EXT_DFR0 "c1, 2" @@ -24,6 +41,7 @@ #define CPUID_EXT_ISAR3 "c2, 3" #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" +#endif #define MPIDR_SMP_BITMASK (0x3 << 30) #define MPIDR_SMP_VALUE (0x2 << 30) @@ -32,6 +50,8 @@ #define MPIDR_HWID_BITMASK 0xFFFFFF +#define MPIDR_INVALID (~MPIDR_HWID_BITMASK) + #define MPIDR_LEVEL_BITS 8 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) @@ -69,17 +89,38 @@ extern unsigned int processor_id; __val; \ }) +/* + * The memory clobber prevents gcc 4.5 from reordering the mrc before + * any is_smp() tests, which can cause undefined instruction aborts on + * ARM1136 r0 due to the missing extended CP15 registers. + */ #define read_cpuid_ext(ext_reg) \ ({ \ unsigned int __val; \ asm("mrc p15, 0, %0, c0, " ext_reg \ : "=r" (__val) \ : \ - : "cc"); \ + : "memory"); \ __val; \ }) -#else /* ifdef CONFIG_CPU_CP15 */ +#elif defined(CONFIG_CPU_V7M) + +#include <asm/io.h> +#include <asm/v7m.h> + +#define read_cpuid(reg) \ + ({ \ + WARN_ON_ONCE(1); \ + 0; \ + }) + +static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset) +{ + return readl(BASEADDR_V7M_SCB + offset); +} + +#else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */ /* * read_cpuid and read_cpuid_ext should only ever be called on machines that @@ -106,7 +147,14 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) return read_cpuid(CPUID_ID); } -#else /* ifdef CONFIG_CPU_CP15 */ +#elif defined(CONFIG_CPU_V7M) + +static inline unsigned int __attribute_const__ read_cpuid_id(void) +{ + return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID); +} + +#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */ static inline unsigned int __attribute_const__ read_cpuid_id(void) { diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index fe92ccf1d0b0..191ada6e4d2d 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -46,7 +46,7 @@ __rem; \ }) -#if __GNUC__ < 4 +#if __GNUC__ < 4 || !defined(CONFIG_AEABI) /* * gcc versions earlier than 4.0 are simply too problematic for the diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h index 3ed37b4d93da..4f8e9e5514b1 100644 --- a/arch/arm/include/asm/dma-contiguous.h +++ b/arch/arm/include/asm/dma-contiguous.h @@ -2,10 +2,9 @@ #define ASMARM_DMA_CONTIGUOUS_H #ifdef __KERNEL__ -#ifdef CONFIG_CMA +#ifdef CONFIG_DMA_CMA #include <linux/types.h> -#include <asm-generic/dma-contiguous.h> void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 38050b1c4800..f4b46d39b9cf 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -19,8 +19,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_fp elf_fpregset_t; -#define EM_ARM 40 - #define EF_ARM_EABI_MASK 0xff000000 #define EF_ARM_EABI_UNKNOWN 0x00000000 #define EF_ARM_EABI_VER1 0x01000000 @@ -130,4 +128,10 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk +#ifdef CONFIG_MMU +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +int arch_setup_additional_pages(struct linux_binprm *, int); +#endif + #endif diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index ea289e1435e7..c81adc08b3fb 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -117,10 +117,37 @@ # endif #endif +#if defined(CONFIG_CPU_V7M) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE nop +# endif +#endif + #if !defined(_CACHE) && !defined(MULTI_CACHE) #error Unknown cache maintenance model #endif +#ifndef __ASSEMBLER__ +extern inline void nop_flush_icache_all(void) { } +extern inline void nop_flush_kern_cache_all(void) { } +extern inline void nop_flush_kern_cache_louis(void) { } +extern inline void nop_flush_user_cache_all(void) { } +extern inline void nop_flush_user_cache_range(unsigned long a, + unsigned long b, unsigned int c) { } + +extern inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { } +extern inline int nop_coherent_user_range(unsigned long a, + unsigned long b) { return 0; } +extern inline void nop_flush_kern_dcache_area(void *a, size_t s) { } + +extern inline void nop_dma_flush_range(const void *a, const void *b) { } + +extern inline void nop_dma_map_area(const void *s, size_t l, int f) { } +extern inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } +#endif + #ifndef MULTI_CACHE #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index b6e9f2c108b5..6b70f1b46a6e 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h @@ -95,6 +95,14 @@ # endif #endif +#ifdef CONFIG_CPU_ABRT_NOMMU +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER nommu_early_abort +# endif +#endif + #ifndef CPU_DABORT_HANDLER #error Unknown data abort handler type #endif diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index ac1dd54724b6..74a8b84f3cb1 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h @@ -230,6 +230,24 @@ # endif #endif +#ifdef CONFIG_CPU_V7M +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v7m +# endif +#endif + +#ifdef CONFIG_CPU_PJ4B +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_pj4b +# endif +#endif + #ifndef MULTI_CPU #define cpu_proc_init __glue(CPU_NAME,_proc_init) #define cpu_proc_fin __glue(CPU_NAME,_proc_fin) diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S deleted file mode 100644 index 22c689255e6e..000000000000 --- a/arch/arm/include/asm/hardware/debug-8250.S +++ /dev/null @@ -1,29 +0,0 @@ -/* - * arch/arm/include/asm/hardware/debug-8250.S - * - * Copyright (C) 1994-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/serial_reg.h> - - .macro senduart,rd,rx - strb \rd, [\rx, #UART_TX << UART_SHIFT] - .endm - - .macro busyuart,rd,rx -1002: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT] - and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE - teq \rd, #UART_LSR_TEMT | UART_LSR_THRE - bne 1002b - .endm - - .macro waituart,rd,rx -#ifdef FLOW_CONTROL -1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT] - tst \rd, #UART_MSR_CTS - beq 1001b -#endif - .endm diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index ed94b1a366ae..423744bf18eb 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -223,11 +223,12 @@ extern int iop3xx_get_init_atu(void); #ifndef __ASSEMBLY__ #include <linux/types.h> +#include <linux/reboot.h> void iop3xx_map_io(void); void iop_init_cp6_handler(void); void iop_init_time(unsigned long tickrate); -void iop3xx_restart(char, const char *); +void iop3xx_restart(enum reboot_mode, const char *); static inline u32 read_tmr0(void) { diff --git a/arch/arm/include/asm/hardware/pci_v3.h b/arch/arm/include/asm/hardware/pci_v3.h deleted file mode 100644 index 2811c7e2cfdf..000000000000 --- a/arch/arm/include/asm/hardware/pci_v3.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * arch/arm/include/asm/hardware/pci_v3.h - * - * Internal header file PCI V3 chip - * - * Copyright (C) ARM Limited - * Copyright (C) 2000-2001 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef ASM_ARM_HARDWARE_PCI_V3_H -#define ASM_ARM_HARDWARE_PCI_V3_H - -/* ------------------------------------------------------------------------------- - * V3 Local Bus to PCI Bridge definitions - * ------------------------------------------------------------------------------- - * Registers (these are taken from page 129 of the EPC User's Manual Rev 1.04 - * All V3 register names are prefaced by V3_ to avoid clashing with any other - * PCI definitions. Their names match the user's manual. - * - * I'm assuming that I20 is disabled. - * - */ -#define V3_PCI_VENDOR 0x00000000 -#define V3_PCI_DEVICE 0x00000002 -#define V3_PCI_CMD 0x00000004 -#define V3_PCI_STAT 0x00000006 -#define V3_PCI_CC_REV 0x00000008 -#define V3_PCI_HDR_CFG 0x0000000C -#define V3_PCI_IO_BASE 0x00000010 -#define V3_PCI_BASE0 0x00000014 -#define V3_PCI_BASE1 0x00000018 -#define V3_PCI_SUB_VENDOR 0x0000002C -#define V3_PCI_SUB_ID 0x0000002E -#define V3_PCI_ROM 0x00000030 -#define V3_PCI_BPARAM 0x0000003C -#define V3_PCI_MAP0 0x00000040 -#define V3_PCI_MAP1 0x00000044 -#define V3_PCI_INT_STAT 0x00000048 -#define V3_PCI_INT_CFG 0x0000004C -#define V3_LB_BASE0 0x00000054 -#define V3_LB_BASE1 0x00000058 -#define V3_LB_MAP0 0x0000005E -#define V3_LB_MAP1 0x00000062 -#define V3_LB_BASE2 0x00000064 -#define V3_LB_MAP2 0x00000066 -#define V3_LB_SIZE 0x00000068 -#define V3_LB_IO_BASE 0x0000006E -#define V3_FIFO_CFG 0x00000070 -#define V3_FIFO_PRIORITY 0x00000072 -#define V3_FIFO_STAT 0x00000074 -#define V3_LB_ISTAT 0x00000076 -#define V3_LB_IMASK 0x00000077 -#define V3_SYSTEM 0x00000078 -#define V3_LB_CFG 0x0000007A -#define V3_PCI_CFG 0x0000007C -#define V3_DMA_PCI_ADR0 0x00000080 -#define V3_DMA_PCI_ADR1 0x00000090 -#define V3_DMA_LOCAL_ADR0 0x00000084 -#define V3_DMA_LOCAL_ADR1 0x00000094 -#define V3_DMA_LENGTH0 0x00000088 -#define V3_DMA_LENGTH1 0x00000098 -#define V3_DMA_CSR0 0x0000008B -#define V3_DMA_CSR1 0x0000009B -#define V3_DMA_CTLB_ADR0 0x0000008C -#define V3_DMA_CTLB_ADR1 0x0000009C -#define V3_DMA_DELAY 0x000000E0 -#define V3_MAIL_DATA 0x000000C0 -#define V3_PCI_MAIL_IEWR 0x000000D0 -#define V3_PCI_MAIL_IERD 0x000000D2 -#define V3_LB_MAIL_IEWR 0x000000D4 -#define V3_LB_MAIL_IERD 0x000000D6 -#define V3_MAIL_WR_STAT 0x000000D8 -#define V3_MAIL_RD_STAT 0x000000DA -#define V3_QBA_MAP 0x000000DC - -/* PCI COMMAND REGISTER bits - */ -#define V3_COMMAND_M_FBB_EN (1 << 9) -#define V3_COMMAND_M_SERR_EN (1 << 8) -#define V3_COMMAND_M_PAR_EN (1 << 6) -#define V3_COMMAND_M_MASTER_EN (1 << 2) -#define V3_COMMAND_M_MEM_EN (1 << 1) -#define V3_COMMAND_M_IO_EN (1 << 0) - -/* SYSTEM REGISTER bits - */ -#define V3_SYSTEM_M_RST_OUT (1 << 15) -#define V3_SYSTEM_M_LOCK (1 << 14) - -/* PCI_CFG bits - */ -#define V3_PCI_CFG_M_I2O_EN (1 << 15) -#define V3_PCI_CFG_M_IO_REG_DIS (1 << 14) -#define V3_PCI_CFG_M_IO_DIS (1 << 13) -#define V3_PCI_CFG_M_EN3V (1 << 12) -#define V3_PCI_CFG_M_RETRY_EN (1 << 10) -#define V3_PCI_CFG_M_AD_LOW1 (1 << 9) -#define V3_PCI_CFG_M_AD_LOW0 (1 << 8) - -/* PCI_BASE register bits (PCI -> Local Bus) - */ -#define V3_PCI_BASE_M_ADR_BASE 0xFFF00000 -#define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00 -#define V3_PCI_BASE_M_PREFETCH (1 << 3) -#define V3_PCI_BASE_M_TYPE (3 << 1) -#define V3_PCI_BASE_M_IO (1 << 0) - -/* PCI MAP register bits (PCI -> Local bus) - */ -#define V3_PCI_MAP_M_MAP_ADR 0xFFF00000 -#define V3_PCI_MAP_M_RD_POST_INH (1 << 15) -#define V3_PCI_MAP_M_ROM_SIZE (3 << 10) -#define V3_PCI_MAP_M_SWAP (3 << 8) -#define V3_PCI_MAP_M_ADR_SIZE 0x000000F0 -#define V3_PCI_MAP_M_REG_EN (1 << 1) -#define V3_PCI_MAP_M_ENABLE (1 << 0) - -/* - * LB_BASE0,1 register bits (Local bus -> PCI) - */ -#define V3_LB_BASE_ADR_BASE 0xfff00000 -#define V3_LB_BASE_SWAP (3 << 8) -#define V3_LB_BASE_ADR_SIZE (15 << 4) -#define V3_LB_BASE_PREFETCH (1 << 3) -#define V3_LB_BASE_ENABLE (1 << 0) - -#define V3_LB_BASE_ADR_SIZE_1MB (0 << 4) -#define V3_LB_BASE_ADR_SIZE_2MB (1 << 4) -#define V3_LB_BASE_ADR_SIZE_4MB (2 << 4) -#define V3_LB_BASE_ADR_SIZE_8MB (3 << 4) -#define V3_LB_BASE_ADR_SIZE_16MB (4 << 4) -#define V3_LB_BASE_ADR_SIZE_32MB (5 << 4) -#define V3_LB_BASE_ADR_SIZE_64MB (6 << 4) -#define V3_LB_BASE_ADR_SIZE_128MB (7 << 4) -#define V3_LB_BASE_ADR_SIZE_256MB (8 << 4) -#define V3_LB_BASE_ADR_SIZE_512MB (9 << 4) -#define V3_LB_BASE_ADR_SIZE_1GB (10 << 4) -#define V3_LB_BASE_ADR_SIZE_2GB (11 << 4) - -#define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE) - -/* - * LB_MAP0,1 register bits (Local bus -> PCI) - */ -#define V3_LB_MAP_MAP_ADR 0xfff0 -#define V3_LB_MAP_TYPE (7 << 1) -#define V3_LB_MAP_AD_LOW_EN (1 << 0) - -#define V3_LB_MAP_TYPE_IACK (0 << 1) -#define V3_LB_MAP_TYPE_IO (1 << 1) -#define V3_LB_MAP_TYPE_MEM (3 << 1) -#define V3_LB_MAP_TYPE_CONFIG (5 << 1) -#define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1) - -#define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR) - -/* - * LB_BASE2 register bits (Local bus -> PCI IO) - */ -#define V3_LB_BASE2_ADR_BASE 0xff00 -#define V3_LB_BASE2_SWAP (3 << 6) -#define V3_LB_BASE2_ENABLE (1 << 0) - -#define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE) - -/* - * LB_MAP2 register bits (Local bus -> PCI IO) - */ -#define V3_LB_MAP2_MAP_ADR 0xff00 - -#define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR) - -#endif diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h new file mode 100644 index 000000000000..d4014fbe5ea3 --- /dev/null +++ b/arch/arm/include/asm/hugetlb-3level.h @@ -0,0 +1,71 @@ +/* + * arch/arm/include/asm/hugetlb-3level.h + * + * Copyright (C) 2012 ARM Ltd. + * + * Based on arch/x86/include/asm/hugetlb.h. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_ARM_HUGETLB_3LEVEL_H +#define _ASM_ARM_HUGETLB_3LEVEL_H + + +/* + * If our huge pte is non-zero then mark the valid bit. + * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero + * ptes. + * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes). + */ +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + pte_t retval = *ptep; + if (pte_val(retval)) + pte_val(retval) |= L_PTE_VALID; + return retval; +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + ptep_clear_flush(vma, addr, ptep); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */ diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h new file mode 100644 index 000000000000..1f1b1cd112f3 --- /dev/null +++ b/arch/arm/include/asm/hugetlb.h @@ -0,0 +1,84 @@ +/* + * arch/arm/include/asm/hugetlb.h + * + * Copyright (C) 2012 ARM Ltd. + * + * Based on arch/x86/include/asm/hugetlb.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_ARM_HUGETLB_H +#define _ASM_ARM_HUGETLB_H + +#include <asm/page.h> +#include <asm-generic/hugetlb.h> + +#include <asm/hugetlb-3level.h> + +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, unsigned long len) +{ + return 0; +} + +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + struct hstate *h = hstate_file(file); + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} + +#endif /* _ASM_ARM_HUGETLB_H */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 652b56086de7..d070741b2b37 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -130,16 +130,16 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) */ extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, size_t, unsigned int, void *); -extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, +extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, void *); extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached); +extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); +extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); extern void __iounmap(volatile void __iomem *addr); extern void __arm_iounmap(volatile void __iomem *addr); -extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, +extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); extern void (*arch_iounmap)(volatile void __iomem *); diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h index 1e6cca55c750..3b763d6652a0 100644 --- a/arch/arm/include/asm/irqflags.h +++ b/arch/arm/include/asm/irqflags.h @@ -8,6 +8,16 @@ /* * CPU interrupt mask handling. */ +#ifdef CONFIG_CPU_V7M +#define IRQMASK_REG_NAME_R "primask" +#define IRQMASK_REG_NAME_W "primask" +#define IRQMASK_I_BIT 1 +#else +#define IRQMASK_REG_NAME_R "cpsr" +#define IRQMASK_REG_NAME_W "cpsr_c" +#define IRQMASK_I_BIT PSR_I_BIT +#endif + #if __LINUX_ARM_ARCH__ >= 6 static inline unsigned long arch_local_irq_save(void) @@ -15,7 +25,7 @@ static inline unsigned long arch_local_irq_save(void) unsigned long flags; asm volatile( - " mrs %0, cpsr @ arch_local_irq_save\n" + " mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n" " cpsid i" : "=r" (flags) : : "memory", "cc"); return flags; @@ -129,7 +139,7 @@ static inline unsigned long arch_local_save_flags(void) { unsigned long flags; asm volatile( - " mrs %0, cpsr @ local_save_flags" + " mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags" : "=r" (flags) : : "memory", "cc"); return flags; } @@ -140,7 +150,7 @@ static inline unsigned long arch_local_save_flags(void) static inline void arch_local_irq_restore(unsigned long flags) { asm volatile( - " msr cpsr_c, %0 @ local_irq_restore" + " msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore" : : "r" (flags) : "memory", "cc"); @@ -148,8 +158,8 @@ static inline void arch_local_irq_restore(unsigned long flags) static inline int arch_irqs_disabled_flags(unsigned long flags) { - return flags & PSR_I_BIT; + return flags & IRQMASK_I_BIT; } -#endif -#endif +#endif /* ifdef __KERNEL__ */ +#endif /* ifndef __ASM_ARM_IRQFLAGS_H */ diff --git a/arch/arm/include/asm/kvm_arch_timer.h b/arch/arm/include/asm/kvm_arch_timer.h deleted file mode 100644 index 68cb9e1dfb81..000000000000 --- a/arch/arm/include/asm/kvm_arch_timer.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ASM_ARM_KVM_ARCH_TIMER_H -#define __ASM_ARM_KVM_ARCH_TIMER_H - -#include <linux/clocksource.h> -#include <linux/hrtimer.h> -#include <linux/workqueue.h> - -struct arch_timer_kvm { -#ifdef CONFIG_KVM_ARM_TIMER - /* Is the timer enabled */ - bool enabled; - - /* Virtual offset */ - cycle_t cntvoff; -#endif -}; - -struct arch_timer_cpu { -#ifdef CONFIG_KVM_ARM_TIMER - /* Registers: control register, timer value */ - u32 cntv_ctl; /* Saved/restored */ - cycle_t cntv_cval; /* Saved/restored */ - - /* - * Anything that is not used directly from assembly code goes - * here. - */ - - /* Background timer used when the guest is not running */ - struct hrtimer timer; - - /* Work queued with the above timer expires */ - struct work_struct expired; - - /* Background timer active */ - bool armed; - - /* Timer IRQ */ - const struct kvm_irq_level *irq; -#endif -}; - -#ifdef CONFIG_KVM_ARM_TIMER -int kvm_timer_hyp_init(void); -int kvm_timer_init(struct kvm *kvm); -void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); -void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); -void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); -#else -static inline int kvm_timer_hyp_init(void) -{ - return 0; -}; - -static inline int kvm_timer_init(struct kvm *kvm) -{ - return 0; -} - -static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} -static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} -static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} -static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {} -#endif - -#endif diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 124623e5ef14..64e96960de29 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -135,7 +135,6 @@ #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) #define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) -#define S2_PGD_SIZE (1 << S2_PGD_ORDER) /* Virtualization Translation Control Register (VTCR) bits */ #define VTCR_SH0 (3 << 12) diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 18d50322a9e2..a2f43ddcc300 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -37,16 +37,18 @@ #define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ #define c6_DFAR 16 /* Data Fault Address Register */ #define c6_IFAR 17 /* Instruction Fault Address Register */ -#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */ -#define c10_PRRR 19 /* Primary Region Remap Register */ -#define c10_NMRR 20 /* Normal Memory Remap Register */ -#define c12_VBAR 21 /* Vector Base Address Register */ -#define c13_CID 22 /* Context ID Register */ -#define c13_TID_URW 23 /* Thread ID, User R/W */ -#define c13_TID_URO 24 /* Thread ID, User R/O */ -#define c13_TID_PRIV 25 /* Thread ID, Privileged */ -#define c14_CNTKCTL 26 /* Timer Control Register (PL1) */ -#define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */ +#define c7_PAR 18 /* Physical Address Register */ +#define c7_PAR_high 19 /* PAR top 32 bits */ +#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */ +#define c10_PRRR 21 /* Primary Region Remap Register */ +#define c10_NMRR 22 /* Normal Memory Remap Register */ +#define c12_VBAR 23 /* Vector Base Address Register */ +#define c13_CID 24 /* Context ID Register */ +#define c13_TID_URW 25 /* Thread ID, User R/W */ +#define c13_TID_URO 26 /* Thread ID, User R/O */ +#define c13_TID_PRIV 27 /* Thread ID, Privileged */ +#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ +#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */ #define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_UNDEFINED 1 @@ -72,8 +74,6 @@ extern char __kvm_hyp_vector[]; extern char __kvm_hyp_code_start[]; extern char __kvm_hyp_code_end[]; -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 82b4babead2c..a464e8d7b6c5 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -65,11 +65,6 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) return cpsr_mode > USR_MODE;; } -static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) -{ - return reg == 15; -} - static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) { return vcpu->arch.fault.hsr; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 57cb786a6203..7d22517d8071 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -23,9 +23,14 @@ #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> #include <asm/fpstate.h> -#include <asm/kvm_arch_timer.h> +#include <kvm/arm_arch_timer.h> +#if defined(CONFIG_KVM_ARM_MAX_VCPUS) #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS +#else +#define KVM_MAX_VCPUS 0 +#endif + #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -38,7 +43,7 @@ #define KVM_NR_PAGE_SIZES 1 #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) -#include <asm/kvm_vgic.h> +#include <kvm/arm_vgic.h> struct kvm_vcpu; u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); @@ -190,8 +195,8 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index); -static inline void __cpu_init_hyp_mode(unsigned long long boot_pgd_ptr, - unsigned long long pgd_ptr, +static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, + phys_addr_t pgd_ptr, unsigned long hyp_stack_ptr, unsigned long vector_ptr) { diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 472ac7091003..9b28c41f4ba9 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -64,7 +64,7 @@ void kvm_clear_hyp_idmap(void); static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) { - pte_val(*pte) = new_pte; + *pte = new_pte; /* * flush_pmd_entry just takes a void pointer and cleans the necessary * cache entries, so we can reuse the function for ptes. diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h deleted file mode 100644 index 343744e4809c..000000000000 --- a/arch/arm/include/asm/kvm_vgic.h +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ASM_ARM_KVM_VGIC_H -#define __ASM_ARM_KVM_VGIC_H - -#include <linux/kernel.h> -#include <linux/kvm.h> -#include <linux/irqreturn.h> -#include <linux/spinlock.h> -#include <linux/types.h> -#include <linux/irqchip/arm-gic.h> - -#define VGIC_NR_IRQS 128 -#define VGIC_NR_SGIS 16 -#define VGIC_NR_PPIS 16 -#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) -#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS) -#define VGIC_MAX_CPUS KVM_MAX_VCPUS -#define VGIC_MAX_LRS (1 << 6) - -/* Sanity checks... */ -#if (VGIC_MAX_CPUS > 8) -#error Invalid number of CPU interfaces -#endif - -#if (VGIC_NR_IRQS & 31) -#error "VGIC_NR_IRQS must be a multiple of 32" -#endif - -#if (VGIC_NR_IRQS > 1024) -#error "VGIC_NR_IRQS must be <= 1024" -#endif - -/* - * The GIC distributor registers describing interrupts have two parts: - * - 32 per-CPU interrupts (SGI + PPI) - * - a bunch of shared interrupts (SPI) - */ -struct vgic_bitmap { - union { - u32 reg[VGIC_NR_PRIVATE_IRQS / 32]; - DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS); - } percpu[VGIC_MAX_CPUS]; - union { - u32 reg[VGIC_NR_SHARED_IRQS / 32]; - DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS); - } shared; -}; - -struct vgic_bytemap { - u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4]; - u32 shared[VGIC_NR_SHARED_IRQS / 4]; -}; - -struct vgic_dist { -#ifdef CONFIG_KVM_ARM_VGIC - spinlock_t lock; - bool ready; - - /* Virtual control interface mapping */ - void __iomem *vctrl_base; - - /* Distributor and vcpu interface mapping in the guest */ - phys_addr_t vgic_dist_base; - phys_addr_t vgic_cpu_base; - - /* Distributor enabled */ - u32 enabled; - - /* Interrupt enabled (one bit per IRQ) */ - struct vgic_bitmap irq_enabled; - - /* Interrupt 'pin' level */ - struct vgic_bitmap irq_state; - - /* Level-triggered interrupt in progress */ - struct vgic_bitmap irq_active; - - /* Interrupt priority. Not used yet. */ - struct vgic_bytemap irq_priority; - - /* Level/edge triggered */ - struct vgic_bitmap irq_cfg; - - /* Source CPU per SGI and target CPU */ - u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS]; - - /* Target CPU for each IRQ */ - u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS]; - struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS]; - - /* Bitmap indicating which CPU has something pending */ - unsigned long irq_pending_on_cpu; -#endif -}; - -struct vgic_cpu { -#ifdef CONFIG_KVM_ARM_VGIC - /* per IRQ to LR mapping */ - u8 vgic_irq_lr_map[VGIC_NR_IRQS]; - - /* Pending interrupts on this VCPU */ - DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); - DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS); - - /* Bitmap of used/free list registers */ - DECLARE_BITMAP( lr_used, VGIC_MAX_LRS); - - /* Number of list registers on this CPU */ - int nr_lr; - - /* CPU vif control registers for world switch */ - u32 vgic_hcr; - u32 vgic_vmcr; - u32 vgic_misr; /* Saved only */ - u32 vgic_eisr[2]; /* Saved only */ - u32 vgic_elrsr[2]; /* Saved only */ - u32 vgic_apr; - u32 vgic_lr[VGIC_MAX_LRS]; -#endif -}; - -#define LR_EMPTY 0xff - -struct kvm; -struct kvm_vcpu; -struct kvm_run; -struct kvm_exit_mmio; - -#ifdef CONFIG_KVM_ARM_VGIC -int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr); -int kvm_vgic_hyp_init(void); -int kvm_vgic_init(struct kvm *kvm); -int kvm_vgic_create(struct kvm *kvm); -int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); -void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); -int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, - bool level); -int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); -bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, - struct kvm_exit_mmio *mmio); - -#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base)) -#define vgic_initialized(k) ((k)->arch.vgic.ready) - -#else -static inline int kvm_vgic_hyp_init(void) -{ - return 0; -} - -static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) -{ - return 0; -} - -static inline int kvm_vgic_init(struct kvm *kvm) -{ - return 0; -} - -static inline int kvm_vgic_create(struct kvm *kvm) -{ - return 0; -} - -static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) -{ - return 0; -} - -static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {} -static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {} - -static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, - unsigned int irq_num, bool level) -{ - return 0; -} - -static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) -{ - return 0; -} - -static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, - struct kvm_exit_mmio *mmio) -{ - return false; -} - -static inline int irqchip_in_kernel(struct kvm *kvm) -{ - return 0; -} - -static inline bool vgic_initialized(struct kvm *kvm) -{ - return true; -} -#endif - -#endif diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h deleted file mode 100644 index f77ffc1eb0c2..000000000000 --- a/arch/arm/include/asm/localtimer.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * arch/arm/include/asm/localtimer.h - * - * Copyright (C) 2004-2005 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_ARM_LOCALTIMER_H -#define __ASM_ARM_LOCALTIMER_H - -#include <linux/errno.h> - -struct clock_event_device; - -struct local_timer_ops { - int (*setup)(struct clock_event_device *); - void (*stop)(struct clock_event_device *); -}; - -#ifdef CONFIG_LOCAL_TIMERS -/* - * Register a local timer driver - */ -int local_timer_register(struct local_timer_ops *); -#else -static inline int local_timer_register(struct local_timer_ops *ops) -{ - return -ENXIO; -} -#endif - -#endif diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 308ad7d6f98b..402a2bc6aa68 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -8,7 +8,10 @@ * published by the Free Software Foundation. */ +#include <linux/types.h> + #ifndef __ASSEMBLY__ +#include <linux/reboot.h> struct tag; struct meminfo; @@ -16,8 +19,10 @@ struct pt_regs; struct smp_operations; #ifdef CONFIG_SMP #define smp_ops(ops) (&(ops)) +#define smp_init_ops(ops) (&(ops)) #else #define smp_ops(ops) (struct smp_operations *)NULL +#define smp_init_ops(ops) (bool (*)(void))NULL #endif struct machine_desc { @@ -30,7 +35,7 @@ struct machine_desc { unsigned int nr_irqs; /* number of IRQs */ #ifdef CONFIG_ZONE_DMA - unsigned long dma_zone_size; /* size of DMA-able area */ + phys_addr_t dma_zone_size; /* size of DMA-able area */ #endif unsigned int video_start; /* start of video RAM */ @@ -39,8 +44,9 @@ struct machine_desc { unsigned char reserve_lp0 :1; /* never has lp0 */ unsigned char reserve_lp1 :1; /* never has lp1 */ unsigned char reserve_lp2 :1; /* never has lp2 */ - char restart_mode; /* default restart mode */ + enum reboot_mode reboot_mode; /* default restart mode */ struct smp_operations *smp; /* SMP operations */ + bool (*smp_init)(void); void (*fixup)(struct tag *, char **, struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ @@ -53,18 +59,18 @@ struct machine_desc { #ifdef CONFIG_MULTI_IRQ_HANDLER void (*handle_irq)(struct pt_regs *); #endif - void (*restart)(char, const char *); + void (*restart)(enum reboot_mode, const char *); }; /* * Current machine - only accessible during boot. */ -extern struct machine_desc *machine_desc; +extern const struct machine_desc *machine_desc; /* * Machine type table - also only accessible during boot */ -extern struct machine_desc __arch_info_begin[], __arch_info_end[]; +extern const struct machine_desc __arch_info_begin[], __arch_info_end[]; #define for_each_machine_desc(p) \ for (p = __arch_info_begin; p < __arch_info_end; p++) diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 7d2c3c843801..454d642a4070 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -16,6 +16,7 @@ struct pci_sys_data; struct pci_ops; struct pci_bus; +struct device; struct hw_pci { #ifdef CONFIG_PCI_DOMAINS @@ -35,6 +36,8 @@ struct hw_pci { resource_size_t start, resource_size_t size, resource_size_t align); + void (*add_bus)(struct pci_bus *bus); + void (*remove_bus)(struct pci_bus *bus); }; /* @@ -62,13 +65,24 @@ struct pci_sys_data { resource_size_t start, resource_size_t size, resource_size_t align); + void (*add_bus)(struct pci_bus *bus); + void (*remove_bus)(struct pci_bus *bus); void *private_data; /* platform controller private data */ }; /* * Call this with your hw_pci struct to initialise the PCI system. */ -void pci_common_init(struct hw_pci *); +void pci_common_init_dev(struct device *, struct hw_pci *); + +/* + * Compatibility wrapper for older platforms that do not care about + * passing the parent device. + */ +static inline void pci_common_init(struct hw_pci *hw) +{ + pci_common_init_dev(NULL, hw); +} /* * Setup early fixed I/O mapping. @@ -96,9 +110,4 @@ extern struct pci_ops via82c505_ops; extern int via82c505_setup(int nr, struct pci_sys_data *); extern void via82c505_init(void *sysdata); -extern struct pci_ops pci_v3_ops; -extern int pci_v3_setup(int nr, struct pci_sys_data *); -extern void pci_v3_preinit(void); -extern void pci_v3_postinit(void); - #endif /* __ASM_MACH_PCI_H */ diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h index 00ca5f92648e..c2f5102ae659 100644 --- a/arch/arm/include/asm/memblock.h +++ b/arch/arm/include/asm/memblock.h @@ -4,8 +4,7 @@ struct meminfo; struct machine_desc; -extern void arm_memblock_init(struct meminfo *, struct machine_desc *); - +void arm_memblock_init(struct meminfo *, const struct machine_desc *); phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 57870ab313c5..e750a938fd3c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -18,6 +18,8 @@ #include <linux/types.h> #include <linux/sizes.h> +#include <asm/cache.h> + #ifdef CONFIG_NEED_MACH_MEMORY_H #include <mach/memory.h> #endif @@ -141,6 +143,20 @@ #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) +/* + * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed + * around in head.S and proc-*.S are shifted by this amount, in order to + * leave spare high bits for systems with physical address extension. This + * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but + * gives us about 38-bits or so. + */ +#ifdef CONFIG_ARM_LPAE +#define ARCH_PGD_SHIFT L1_CACHE_SHIFT +#else +#define ARCH_PGD_SHIFT 0 +#endif +#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) + #ifndef __ASSEMBLY__ /* @@ -207,7 +223,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) * direct-mapped view. We assume this is the first page * of RAM in the mem_map as well. */ -#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) /* * These are *only* valid on the kernel direct mapped RAM memory. @@ -260,12 +276,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) /* * Conversion between a struct page and a physical address. * - * Note: when converting an unknown physical address to a - * struct page, the resulting pointer must be validated - * using VALID_PAGE(). It must return an invalid struct page - * for any physical address not corresponding to a system - * RAM address. - * * page_to_pfn(page) convert a struct page * to a PFN number * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * * diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index e3d55547e755..6f18da09668b 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,8 +6,11 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID atomic64_t id; +#else + int switch_pending; #endif unsigned int vmalloc_seq; + unsigned long sigpage; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a7b85e0d0cc1..9b32f76bb0dd 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -18,6 +18,7 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/proc-fns.h> +#include <asm/smp_plat.h> #include <asm-generic/mm_hooks.h> void __check_vmalloc_seq(struct mm_struct *mm); @@ -27,7 +28,15 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) -DECLARE_PER_CPU(atomic64_t, active_asids); +#ifdef CONFIG_ARM_ERRATA_798181 +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask); +#else /* !CONFIG_ARM_ERRATA_798181 */ +static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask) +{ +} +#endif /* CONFIG_ARM_ERRATA_798181 */ #else /* !CONFIG_CPU_HAS_ASID */ @@ -47,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm, * on non-ASID CPUs, the old mm will remain valid until the * finish_arch_post_lock_switch() call. */ - set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + mm->context.switch_pending = 1; else cpu_switch_mm(mm->pgd, mm); } @@ -56,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm, finish_arch_post_lock_switch static inline void finish_arch_post_lock_switch(void) { - if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { - struct mm_struct *mm = current->mm; - cpu_switch_mm(mm->pgd, mm); + struct mm_struct *mm = current->mm; + + if (mm && mm->context.switch_pending) { + /* + * Preemption must be disabled during cpu_switch_mm() as we + * have some stateful cache flush implementations. Check + * switch_pending again in case we were preempted and the + * switch to this mm was already done. + */ + preempt_disable(); + if (mm->context.switch_pending) { + mm->context.switch_pending = 0; + cpu_switch_mm(mm->pgd, mm); + } + preempt_enable_no_resched(); } } @@ -98,12 +119,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #ifdef CONFIG_MMU unsigned int cpu = smp_processor_id(); -#ifdef CONFIG_SMP - /* check for possible thread migration */ - if (!cpumask_empty(mm_cpumask(next)) && + /* + * __sync_icache_dcache doesn't broadcast the I-cache invalidation, + * so check for possible thread migration and invalidate the I-cache + * if we're new to this CPU. + */ + if (cache_ops_need_broadcast() && + !cpumask_empty(mm_cpumask(next)) && !cpumask_test_cpu(cpu, mm_cpumask(next))) __flush_icache_all(); -#endif + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { check_and_switch_context(next, tsk); if (cache_is_vivt()) diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 0d3a28dbc8e5..ed690c49ef93 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -12,6 +12,8 @@ enum { ARM_SEC_CORE, ARM_SEC_EXIT, ARM_SEC_DEVEXIT, + ARM_SEC_HOT, + ARM_SEC_UNLIKELY, ARM_SEC_MAX, }; diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h new file mode 100644 index 000000000000..c3247cc2fe08 --- /dev/null +++ b/arch/arm/include/asm/mpu.h @@ -0,0 +1,76 @@ +#ifndef __ARM_MPU_H +#define __ARM_MPU_H + +#ifdef CONFIG_ARM_MPU + +/* MPUIR layout */ +#define MPUIR_nU 1 +#define MPUIR_DREGION 8 +#define MPUIR_IREGION 16 +#define MPUIR_DREGION_SZMASK (0xFF << MPUIR_DREGION) +#define MPUIR_IREGION_SZMASK (0xFF << MPUIR_IREGION) + +/* ID_MMFR0 data relevant to MPU */ +#define MMFR0_PMSA (0xF << 4) +#define MMFR0_PMSAv7 (3 << 4) + +/* MPU D/I Size Register fields */ +#define MPU_RSR_SZ 1 +#define MPU_RSR_EN 0 + +/* The D/I RSR value for an enabled region spanning the whole of memory */ +#define MPU_RSR_ALL_MEM 63 + +/* Individual bits in the DR/IR ACR */ +#define MPU_ACR_XN (1 << 12) +#define MPU_ACR_SHARED (1 << 2) + +/* C, B and TEX[2:0] bits only have semantic meanings when grouped */ +#define MPU_RGN_CACHEABLE 0xB +#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) +#define MPU_RGN_STRONGLY_ORDERED 0 + +/* Main region should only be shared for SMP */ +#ifdef CONFIG_SMP +#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) +#else +#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE +#endif + +/* Access permission bits of ACR (only define those that we use)*/ +#define MPU_AP_PL1RW_PL0RW (0x3 << 8) +#define MPU_AP_PL1RW_PL0R0 (0x2 << 8) +#define MPU_AP_PL1RW_PL0NA (0x1 << 8) + +/* For minimal static MPU region configurations */ +#define MPU_PROBE_REGION 0 +#define MPU_BG_REGION 1 +#define MPU_RAM_REGION 2 +#define MPU_VECTORS_REGION 3 + +/* Maximum number of regions Linux is interested in */ +#define MPU_MAX_REGIONS 16 + +#define MPU_DATA_SIDE 0 +#define MPU_INSTR_SIDE 1 + +#ifndef __ASSEMBLY__ + +struct mpu_rgn { + /* Assume same attributes for d/i-side */ + u32 drbar; + u32 drsr; + u32 dracr; +}; + +struct mpu_rgn_info { + u32 mpuir; + struct mpu_rgn rgns[MPU_MAX_REGIONS]; +}; +extern struct mpu_rgn_info mpu_rgn_info; + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_ARM_MPU */ + +#endif diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h new file mode 100644 index 000000000000..8f730fe70093 --- /dev/null +++ b/arch/arm/include/asm/neon.h @@ -0,0 +1,36 @@ +/* + * linux/arch/arm/include/asm/neon.h + * + * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <asm/hwcap.h> + +#define cpu_has_neon() (!!(elf_hwcap & HWCAP_NEON)) + +#ifdef __ARM_NEON__ + +/* + * If you are affected by the BUILD_BUG below, it probably means that you are + * using NEON code /and/ calling the kernel_neon_begin() function from the same + * compilation unit. To prevent issues that may arise from GCC reordering or + * generating(1) NEON instructions outside of these begin/end functions, the + * only supported way of using NEON code in the kernel is by isolating it in a + * separate compilation unit, and calling it from another unit from inside a + * kernel_neon_begin/kernel_neon_end pair. + * + * (1) Current GCC (4.7) might generate NEON instructions at O3 level if + * -mpfu=neon is set. + */ + +#define kernel_neon_begin() \ + BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code") + +#else +void kernel_neon_begin(void); +#endif +void kernel_neon_end(void); diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 12f71a190422..f94784f0e3a6 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -37,10 +37,10 @@ struct outer_cache_fns { void (*resume)(void); }; -#ifdef CONFIG_OUTER_CACHE - extern struct outer_cache_fns outer_cache; +#ifdef CONFIG_OUTER_CACHE + static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.inv_range) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 812a4944e783..4355f0ec44d6 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -13,7 +13,7 @@ /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifndef __ASSEMBLY__ @@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +#ifdef CONFIG_KUSER_HELPERS #define __HAVE_ARCH_GATE_AREA 1 +#endif #ifdef CONFIG_ARM_LPAE #include <asm/pgtable-3level-types.h> diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index 968c0a14e0a3..209e6504922e 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off) static inline unsigned long __my_cpu_offset(void) { unsigned long off; - /* Read TPIDRPRW */ - asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); + register unsigned long *sp asm ("sp"); + + /* + * Read TPIDRPRW. + * We want to allow caching the value, so avoid using volatile and + * instead use a fake stack read to hazard against barrier(). + */ + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); + return off; } #define __my_cpu_offset __my_cpu_offset() diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index 18f5cef82ad5..626989fec4d3 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -30,6 +30,7 @@ #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) +#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1) #define PMD_BIT4 (_AT(pmdval_t, 0)) #define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) #define PMD_APTABLE_SHIFT (61) @@ -41,6 +42,8 @@ */ #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) +#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ +#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) @@ -66,6 +69,7 @@ #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) +#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) #define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ #define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ @@ -79,4 +83,24 @@ #define PHYS_MASK_SHIFT (40) #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) +/* + * TTBR0/TTBR1 split (PAGE_OFFSET): + * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) + * 0x80000000: T0SZ = 0, T1SZ = 1 + * 0xc0000000: T0SZ = 0, T1SZ = 2 + * + * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise + * booting secondary CPUs would end up using TTBR1 for the identity + * mapping set up in TTBR0. + */ +#if defined CONFIG_VMSPLIT_2G +#define TTBR1_OFFSET 16 /* skip two L1 entries */ +#elif defined CONFIG_VMSPLIT_3G +#define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */ +#else +#define TTBR1_OFFSET 0 +#endif + +#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16) + #endif diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 86b8fe398b95..5689c18c85f5 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -33,7 +33,7 @@ #define PTRS_PER_PMD 512 #define PTRS_PER_PGD 4 -#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_PTRS (0) #define PTE_HWTABLE_OFF (0) #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) @@ -48,20 +48,28 @@ #define PMD_SHIFT 21 #define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) +#define PMD_MASK (~((1 << PMD_SHIFT) - 1)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1)) /* * section address mask and size definitions. */ #define SECTION_SHIFT 21 #define SECTION_SIZE (1UL << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) +#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1)) #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) /* + * Hugetlb definitions. + */ +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +/* * "Linux" PTE definitions for LPAE. * * These bits overlap with the hardware bits but the naming is preserved for @@ -79,6 +87,11 @@ #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ +#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) +#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) +#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) +#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) + /* * To be used in assembly code with the upper page attributes. */ @@ -166,8 +179,83 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) clean_pmd_entry(pmdp); \ } while (0) +/* + * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes + * that are written to a page table but not for ptes created with mk_pte. + * + * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to + * hugetlb_cow, where it is compared with an entry in a page table. + * This comparison test fails erroneously leading ultimately to a memory leak. + * + * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is + * present before running the comparison. + */ +#define __HAVE_ARCH_PTE_SAME +#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \ + : pte_val(pte_a)) \ + == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \ + : pte_val(pte_b))) + #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) +#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) +#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) + +#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) +#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) +#endif + +#define PMD_BIT_FUNC(fn,op) \ +static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } + +PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); +PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); +PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); +PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); +PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); +PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); + +#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) + +#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) +#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) +#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) + +/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ +#define pmd_mknotpresent(pmd) (__pmd(0)) + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | + PMD_SECT_VALID | PMD_SECT_NONE; + pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); + return pmd; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + BUG_ON(addr >= TASK_SIZE); + + /* create a faulting entry if PROT_NONE protected */ + if (pmd_val(pmd) & PMD_SECT_NONE) + pmd_val(pmd) &= ~PMD_SECT_VALID; + + *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); + flush_pmd_entry(pmdp); +} + +static inline int has_transparent_hugepage(void) +{ + return 1; +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_PGTABLE_3LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index 7ec60d6075bf..0642228ff785 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -79,8 +79,6 @@ extern unsigned int kobjsize(const void *objp); * No page table caches to initialise. */ #define pgtable_cache_init() do { } while (0) -#define io_remap_pfn_range remap_pfn_range - /* * All 32bit addresses are effectively valid for vmalloc... diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9bcd262a9008..be956dbf6bae 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -24,6 +24,9 @@ #include <asm/memory.h> #include <asm/pgtable-hwdef.h> + +#include <asm/tlbflush.h> + #ifdef CONFIG_ARM_LPAE #include <asm/pgtable-3level.h> #else @@ -97,7 +100,7 @@ extern pgprot_t pgprot_s2_device; #define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) #define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) -#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY) +#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) @@ -318,13 +321,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -/* - * remap a physical page `pfn' of size `size' with page protection `prot' - * into virtual address `from' - */ -#define io_remap_pfn_range(vma,from,pfn,size,prot) \ - remap_pfn_range(vma, from, pfn, size, prot) - #define pgtable_cache_init() do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index f3628fb3d2b3..5324c1112f3a 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -60,7 +60,7 @@ extern struct processor { /* * Set the page table */ - void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); + void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); /* * Set a possibly extended PTE. Non-extended PTEs should * ignore 'ext'. @@ -82,7 +82,7 @@ extern void cpu_proc_init(void); extern void cpu_proc_fin(void); extern int cpu_do_idle(void); extern void cpu_dcache_clean_area(void *, int); -extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); #ifdef CONFIG_ARM_LPAE extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); #else @@ -116,13 +116,25 @@ extern void cpu_resume(void); #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) #ifdef CONFIG_ARM_LPAE + +#define cpu_get_ttbr(nr) \ + ({ \ + u64 ttbr; \ + __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \ + : "=r" (ttbr)); \ + ttbr; \ + }) + +#define cpu_set_ttbr(nr, val) \ + do { \ + u64 ttbr = val; \ + __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \ + : : "r" (ttbr)); \ + } while (0) + #define cpu_get_pgd() \ ({ \ - unsigned long pg, pg2; \ - __asm__("mrrc p15, 0, %0, %1, c2" \ - : "=r" (pg), "=r" (pg2) \ - : \ - : "cc"); \ + u64 pg = cpu_get_ttbr(0); \ pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ (pgd_t *)phys_to_virt(pg); \ }) @@ -137,6 +149,10 @@ extern void cpu_resume(void); }) #endif +#else /*!CONFIG_MMU */ + +#define cpu_switch_mm(pgd,mm) { } + #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 06e7d509eaac..413f3876341c 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -54,7 +54,6 @@ struct thread_struct { #define start_thread(regs,pc,sp) \ ({ \ - unsigned long *stack = (unsigned long *)sp; \ memset(regs->uregs, 0, sizeof(regs->uregs)); \ if (current->personality & ADDR_LIMIT_32BIT) \ regs->ARM_cpsr = USR_MODE; \ @@ -65,9 +64,6 @@ struct thread_struct { regs->ARM_cpsr |= PSR_ENDSTATE; \ regs->ARM_pc = pc & ~1; /* pc */ \ regs->ARM_sp = sp; /* sp */ \ - regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ - regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ - regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ nommu_start_thread(regs); \ }) diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index a219227c3e43..4a2985e21969 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -15,13 +15,13 @@ #ifdef CONFIG_OF -extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); +extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys); extern void arm_dt_memblock_reserve(void); extern void __init arm_dt_init_cpu_maps(void); #else /* CONFIG_OF */ -static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys) +static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys) { return NULL; } diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h index ce0dbe7c1625..c4ae171850f8 100644 --- a/arch/arm/include/asm/psci.h +++ b/arch/arm/include/asm/psci.h @@ -32,5 +32,14 @@ struct psci_operations { }; extern struct psci_operations psci_ops; +extern struct smp_operations psci_smp_ops; + +#ifdef CONFIG_ARM_PSCI +void psci_init(void); +bool psci_smp_available(void); +#else +static inline void psci_init(void) { } +static inline bool psci_smp_available(void) { return false; } +#endif #endif /* __ASM_ARM_PSCI_H */ diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 3d52ee1bfb31..04c99f36ff7f 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -45,6 +45,7 @@ struct pt_regs { */ static inline int valid_user_regs(struct pt_regs *regs) { +#ifndef CONFIG_CPU_V7M unsigned long mode = regs->ARM_cpsr & MODE_MASK; /* @@ -67,6 +68,9 @@ static inline int valid_user_regs(struct pt_regs *regs) regs->ARM_cpsr |= USR_MODE; return 0; +#else /* ifndef CONFIG_CPU_V7M */ + return 1; +#endif } static inline long regs_return_value(struct pt_regs *regs) diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index 3d520ddca61b..2389b71a8e7c 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -1,16 +1,4 @@ -/* - * sched_clock.h: support for extending counters to full 64-bit ns counter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. +/* You shouldn't include this file. Use linux/sched_clock.h instead. + * Temporary file until all asm/sched_clock.h users are gone */ -#ifndef ASM_SCHED_CLOCK -#define ASM_SCHED_CLOCK - -extern void sched_clock_postinit(void); -extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); - -extern unsigned long long (*sched_clock_func)(void); - -#endif +#include <linux/sched_clock.h> diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index d3a22bebe6ce..a8cae71caceb 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -65,7 +65,10 @@ asmlinkage void secondary_start_kernel(void); * Initial data for bringing up a secondary CPU. */ struct secondary_data { - unsigned long pgdir; + union { + unsigned long mpu_rgn_szr; + unsigned long pgdir; + }; unsigned long swapper_pg_dir; void *stack; }; diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index aaa61b6f50ff..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -26,6 +26,9 @@ static inline bool is_smp(void) } /* all SMP configurations have the extended CPUID registers */ +#ifndef CONFIG_MMU +#define tlb_ops_need_broadcast() 0 +#else static inline int tlb_ops_need_broadcast(void) { if (!is_smp()) @@ -33,6 +36,7 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } +#endif #if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7 #define cache_ops_need_broadcast() 0 @@ -49,7 +53,7 @@ static inline int cache_ops_need_broadcast(void) /* * Logical CPU mapping. */ -extern int __cpu_logical_map[]; +extern u32 __cpu_logical_map[]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] /* * Retrieve logical cpu index corresponding to a given MPIDR[23:0] @@ -66,4 +70,25 @@ static inline int get_logical_index(u32 mpidr) return -EINVAL; } +/* + * NOTE ! Assembly code relies on the following + * structure memory layout in order to carry out load + * multiple from its base address. For more + * information check arch/arm/kernel/sleep.S + */ +struct mpidr_hash { + u32 mask; /* used by sleep.S */ + u32 shift_aff[3]; /* used by sleep.S */ + u32 bits; +}; + +extern struct mpidr_hash mpidr_hash; + +static inline u32 mpidr_hash_size(void) +{ + return 1 << mpidr_hash.bits; +} + +extern int platform_can_cpu_hotplug(void); + #endif diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 18d169373612..0393fbab8dd5 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -23,10 +23,21 @@ static inline unsigned long scu_a9_get_base(void) return pa; } +#ifdef CONFIG_HAVE_ARM_SCU unsigned int scu_get_core_count(void __iomem *); int scu_power_mode(void __iomem *, unsigned int); +#else +static inline unsigned int scu_get_core_count(void __iomem *scu_base) +{ + return 0; +} +static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode) +{ + return -EINVAL; +} +#endif -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU) void scu_enable(void __iomem *scu_base); #else static inline void scu_enable(void __iomem *scu_base) {} diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 6220e9fdf4c7..4f2c28060c9a 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -46,7 +46,7 @@ static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 __asm__ __volatile__ ( - "dsb\n" + "dsb ishst\n" SEV ); #else @@ -97,19 +97,22 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) static inline int arch_spin_trylock(arch_spinlock_t *lock) { - unsigned long tmp; + unsigned long contended, res; u32 slock; - __asm__ __volatile__( -" ldrex %0, [%2]\n" -" subs %1, %0, %0, ror #16\n" -" addeq %0, %0, %3\n" -" strexeq %1, %0, [%2]" - : "=&r" (slock), "=&r" (tmp) - : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) - : "cc"); - - if (tmp == 0) { + do { + __asm__ __volatile__( + " ldrex %0, [%3]\n" + " mov %2, #0\n" + " subs %1, %0, %0, ror #16\n" + " addeq %0, %0, %4\n" + " strexeq %2, %0, [%3]" + : "=&r" (slock), "=&r" (contended), "=&r" (res) + : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) + : "cc"); + } while (res); + + if (!contended) { smp_mb(); return 1; } else { @@ -165,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw) { - unsigned long tmp; - - __asm__ __volatile__( -" ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "cc"); - - if (tmp == 0) { + unsigned long contended, res; + + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " teq %0, #0\n" + " strexeq %1, %3, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock), "r" (0x80000000) + : "cc"); + } while (res); + + if (!contended) { smp_mb(); return 1; } else { @@ -251,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw) { - unsigned long tmp, tmp2 = 1; - - __asm__ __volatile__( -" ldrex %0, [%2]\n" -" adds %0, %0, #1\n" -" strexpl %1, %0, [%2]\n" - : "=&r" (tmp), "+r" (tmp2) - : "r" (&rw->lock) - : "cc"); - - smp_mb(); - return tmp2 == 0; + unsigned long contended, res; + + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " adds %0, %0, #1\n" + " strexpl %1, %0, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock) + : "cc"); + } while (res); + + /* If the lock is negative, then it is already held for write. */ + if (contended < 0x80000000) { + smp_mb(); + return 1; + } else { + return 0; + } } /* read_can_lock - would read_trylock() succeed? */ diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h index 1c0a551ae375..cd20029bcd94 100644 --- a/arch/arm/include/asm/suspend.h +++ b/arch/arm/include/asm/suspend.h @@ -1,6 +1,11 @@ #ifndef __ASM_ARM_SUSPEND_H #define __ASM_ARM_SUSPEND_H +struct sleep_save_sp { + u32 *save_ptr_stash; + u32 save_ptr_stash_phys; +}; + extern void cpu_resume(void); extern int cpu_suspend(unsigned long, int (*)(unsigned long)); diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index fa09e6b49bf1..c99e259469f7 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -4,6 +4,16 @@ #include <linux/thread_info.h> /* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb + * to ensure that the maintenance completes in case we migrate to another + * CPU. + */ +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) +#define finish_arch_switch(prev) dsb(ish) +#endif + +/* * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. schedule() itself * contains the memory barrier to tell GCC not to cache `current'. diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h index dfd386d0c022..720ea0320a6d 100644 --- a/arch/arm/include/asm/system_info.h +++ b/arch/arm/include/asm/system_info.h @@ -11,6 +11,7 @@ #define CPU_ARCH_ARMv5TEJ 7 #define CPU_ARCH_ARMv6 8 #define CPU_ARCH_ARMv7 9 +#define CPU_ARCH_ARMv7M 10 #ifndef __ASSEMBLY__ diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 21a23e378bbe..a3d61ad984af 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -6,11 +6,12 @@ #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/irqflags.h> +#include <linux/reboot.h> extern void cpu_init(void); void soft_restart(unsigned long); -extern void (*arm_pm_restart)(char str, const char *cmd); +extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_idle)(void); #define UDBG_UNDEFINED (1 << 0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 1995d1a84060..df5e13d64f2c 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -43,6 +43,16 @@ struct cpu_context_save { __u32 extra[2]; /* Xscale 'acc' register, etc */ }; +struct arm_restart_block { + union { + /* For user cache flushing */ + struct { + unsigned long start; + unsigned long end; + } cache; + }; +}; + /* * low level task data that entry.S needs immediate access to. * __switch_to() assumes cpu_context follows immediately after cpu_domain. @@ -58,7 +68,7 @@ struct thread_info { struct cpu_context_save cpu_context; /* cpu context */ __u32 syscall; /* syscall number */ __u8 used_cp[16]; /* thread used copro */ - unsigned long tp_value; + unsigned long tp_value[2]; /* TLS registers */ #ifdef CONFIG_CRUNCH struct crunch_state crunchstate; #endif @@ -68,6 +78,7 @@ struct thread_info { unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif struct restart_block restart_block; + struct arm_restart_block arm_restart_block; }; #define INIT_THREAD_INFO(tsk) \ @@ -156,7 +167,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 -#define TIF_SWITCH_MM 22 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index bdf2b8458ec1..0baf7f0d9394 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -43,6 +43,7 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; struct vm_area_struct *vma; + unsigned long start, end; unsigned long range_start; unsigned long range_end; unsigned int nr; @@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = fullmm; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->vma = NULL; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; @@ -204,6 +207,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, #endif } +static inline void +tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) +{ + tlb_add_flush(tlb, addr); +} + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index a3625d141c1d..38960264040c 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -319,67 +319,110 @@ extern struct cpu_tlb_fns cpu_tlb; #define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) #define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) -static inline void local_flush_tlb_all(void) +static inline void __local_flush_tlb_all(void) { const int zero = 0; const unsigned int __tlb_flag = __cpu_tlb_flags; - if (tlb_flag(TLB_WB)) - dsb(); - tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); - tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); +} + +static inline void local_flush_tlb_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(nshst); + + __local_flush_tlb_all(); + tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero); if (tlb_flag(TLB_BARRIER)) { - dsb(); + dsb(nsh); isb(); } } -static inline void local_flush_tlb_mm(struct mm_struct *mm) +static inline void __flush_tlb_all(void) { const int zero = 0; - const int asid = ASID(mm); const unsigned int __tlb_flag = __cpu_tlb_flags; if (tlb_flag(TLB_WB)) - dsb(); + dsb(ishst); + + __local_flush_tlb_all(); + tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); + + if (tlb_flag(TLB_BARRIER)) { + dsb(ish); + isb(); + } +} + +static inline void __local_flush_tlb_mm(struct mm_struct *mm) +{ + const int zero = 0; + const int asid = ASID(mm); + const unsigned int __tlb_flag = __cpu_tlb_flags; if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { - if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); } - put_cpu(); } tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + const int asid = ASID(mm); + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(nshst); + + __local_flush_tlb_mm(mm); + tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid); + + if (tlb_flag(TLB_BARRIER)) + dsb(nsh); +} + +static inline void __flush_tlb_mm(struct mm_struct *mm) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(ishst); + + __local_flush_tlb_mm(mm); #ifdef CONFIG_ARM_ERRATA_720789 - tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero); + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0); #else - tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid); + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm)); #endif if (tlb_flag(TLB_BARRIER)) - dsb(); + dsb(ish); } static inline void -local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { const int zero = 0; const unsigned int __tlb_flag = __cpu_tlb_flags; uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); - if (tlb_flag(TLB_WB)) - dsb(); - if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); @@ -392,6 +435,36 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); +} + +static inline void +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + + if (tlb_flag(TLB_WB)) + dsb(nshst); + + __local_flush_tlb_page(vma, uaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr); + + if (tlb_flag(TLB_BARRIER)) + dsb(nsh); +} + +static inline void +__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + + if (tlb_flag(TLB_WB)) + dsb(ishst); + + __local_flush_tlb_page(vma, uaddr); #ifdef CONFIG_ARM_ERRATA_720789 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); #else @@ -399,19 +472,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) #endif if (tlb_flag(TLB_BARRIER)) - dsb(); + dsb(ish); } -static inline void local_flush_tlb_kernel_page(unsigned long kaddr) +static inline void __local_flush_tlb_kernel_page(unsigned long kaddr) { const int zero = 0; const unsigned int __tlb_flag = __cpu_tlb_flags; - kaddr &= PAGE_MASK; - - if (tlb_flag(TLB_WB)) - dsb(); - tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); @@ -421,38 +489,103 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); +} + +static inline void local_flush_tlb_kernel_page(unsigned long kaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + kaddr &= PAGE_MASK; + + if (tlb_flag(TLB_WB)) + dsb(nshst); + + __local_flush_tlb_kernel_page(kaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr); + + if (tlb_flag(TLB_BARRIER)) { + dsb(nsh); + isb(); + } +} + +static inline void __flush_tlb_kernel_page(unsigned long kaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + kaddr &= PAGE_MASK; + + if (tlb_flag(TLB_WB)) + dsb(ishst); + + __local_flush_tlb_kernel_page(kaddr); tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); if (tlb_flag(TLB_BARRIER)) { - dsb(); + dsb(ish); isb(); } } +/* + * Branch predictor maintenance is paired with full TLB invalidation, so + * there is no need for any barriers here. + */ +static inline void __local_flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_V6_BP)) + asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); +} + static inline void local_flush_bp_all(void) { const int zero = 0; const unsigned int __tlb_flag = __cpu_tlb_flags; + __local_flush_bp_all(); if (tlb_flag(TLB_V7_UIS_BP)) - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); - else if (tlb_flag(TLB_V6_BP)) asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); +} - if (tlb_flag(TLB_BARRIER)) - isb(); +static inline void __flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + __local_flush_bp_all(); + if (tlb_flag(TLB_V7_UIS_BP)) + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); } +#include <asm/cputype.h> #ifdef CONFIG_ARM_ERRATA_798181 +static inline int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} + static inline void dummy_flush_tlb_a15_erratum(void) { /* * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. */ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); - dsb(); + dsb(ish); } #else +static inline int erratum_a15_798181(void) +{ + return 0; +} + static inline void dummy_flush_tlb_a15_erratum(void) { } @@ -479,7 +612,7 @@ static inline void flush_pmd_entry(void *pmd) tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); if (tlb_flag(TLB_WB)) - dsb(); + dsb(ishst); } static inline void clean_pmd_entry(void *pmd) @@ -535,8 +668,33 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, } #endif +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + #endif -#endif /* CONFIG_MMU */ +#elif defined(CONFIG_SMP) /* !CONFIG_MMU */ + +#ifndef __ASSEMBLY__ + +#include <linux/mm_types.h> + +static inline void local_flush_tlb_all(void) { } +static inline void local_flush_tlb_mm(struct mm_struct *mm) { } +static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { } +static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { } +static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { } +static inline void local_flush_bp_all(void) { } + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); +extern void flush_tlb_kernel_page(unsigned long kaddr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_bp_all(void); +#endif /* __ASSEMBLY__ */ + +#endif #endif diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 73409e6c0251..83259b873333 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -2,27 +2,30 @@ #define __ASMARM_TLS_H #ifdef __ASSEMBLY__ - .macro set_tls_none, tp, tmp1, tmp2 +#include <asm/asm-offsets.h> + .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 .endm - .macro set_tls_v6k, tp, tmp1, tmp2 + .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2 + mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register mcr p15, 0, \tp, c13, c0, 3 @ set TLS register - mov \tmp1, #0 - mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register + mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register + str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it .endm - .macro set_tls_v6, tp, tmp1, tmp2 + .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2 ldr \tmp1, =elf_hwcap ldr \tmp1, [\tmp1, #0] mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? - mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register - movne \tmp1, #0 - mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 + mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register + mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register + strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it .endm - .macro set_tls_software, tp, tmp1, tmp2 + .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2 mov \tmp1, #0xffff0fff str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 .endm @@ -31,19 +34,30 @@ #ifdef CONFIG_TLS_REG_EMUL #define tls_emu 1 #define has_tls_reg 1 -#define set_tls set_tls_none +#define switch_tls switch_tls_none #elif defined(CONFIG_CPU_V6) #define tls_emu 0 #define has_tls_reg (elf_hwcap & HWCAP_TLS) -#define set_tls set_tls_v6 +#define switch_tls switch_tls_v6 #elif defined(CONFIG_CPU_32v6K) #define tls_emu 0 #define has_tls_reg 1 -#define set_tls set_tls_v6k +#define switch_tls switch_tls_v6k #else #define tls_emu 0 #define has_tls_reg 0 -#define set_tls set_tls_software +#define switch_tls switch_tls_software #endif +#ifndef __ASSEMBLY__ +static inline unsigned long get_tpuser(void) +{ + unsigned long reg = 0; + + if (has_tls_reg && !tls_emu) + __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg)); + + return reg; +} +#endif #endif /* __ASMARM_TLS_H */ diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h new file mode 100644 index 000000000000..a53cdb8f068c --- /dev/null +++ b/arch/arm/include/asm/types.h @@ -0,0 +1,40 @@ +#ifndef _ASM_TYPES_H +#define _ASM_TYPES_H + +#include <asm-generic/int-ll64.h> + +/* + * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as + * unambiguous on ARM as you would expect. For the types below, there is a + * difference on ARM between GCC built for bare metal ARM, GCC built for glibc + * and the kernel itself, which results in build errors if you try to build with + * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h' + * in order to use NEON intrinsics) + * + * As the typedefs for these types in 'stdint.h' are based on builtin defines + * supplied by GCC, we can tweak these to align with the kernel's idea of those + * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same + * source file (provided that -ffreestanding is used). + * + * int32_t uint32_t uintptr_t + * bare metal GCC long unsigned long unsigned int + * glibc GCC int unsigned int unsigned int + * kernel int unsigned int unsigned long + */ + +#ifdef __INT32_TYPE__ +#undef __INT32_TYPE__ +#define __INT32_TYPE__ int +#endif + +#ifdef __UINT32_TYPE__ +#undef __UINT32_TYPE__ +#define __UINT32_TYPE__ unsigned int +#endif + +#ifdef __UINTPTR_TYPE__ +#undef __UINTPTR_TYPE__ +#define __UINTPTR_TYPE__ unsigned long +#endif + +#endif /* _ASM_TYPES_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 7e1f76027f66..72abdc541f38 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -19,6 +19,13 @@ #include <asm/unified.h> #include <asm/compiler.h> +#if __LINUX_ARM_ARCH__ < 6 +#include <asm-generic/uaccess-unaligned.h> +#else +#define __get_user_unaligned __get_user +#define __put_user_unaligned __put_user +#endif + #define VERIFY_READ 0 #define VERIFY_WRITE 1 diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h new file mode 100644 index 000000000000..615781c61627 --- /dev/null +++ b/arch/arm/include/asm/v7m.h @@ -0,0 +1,56 @@ +/* + * Common defines for v7m cpus + */ +#define V7M_SCS_ICTR IOMEM(0xe000e004) +#define V7M_SCS_ICTR_INTLINESNUM_MASK 0x0000000f + +#define BASEADDR_V7M_SCB IOMEM(0xe000ed00) + +#define V7M_SCB_CPUID 0x00 + +#define V7M_SCB_ICSR 0x04 +#define V7M_SCB_ICSR_PENDSVSET (1 << 28) +#define V7M_SCB_ICSR_PENDSVCLR (1 << 27) +#define V7M_SCB_ICSR_RETTOBASE (1 << 11) + +#define V7M_SCB_VTOR 0x08 + +#define V7M_SCB_AIRCR 0x0c +#define V7M_SCB_AIRCR_VECTKEY (0x05fa << 16) +#define V7M_SCB_AIRCR_SYSRESETREQ (1 << 2) + +#define V7M_SCB_SCR 0x10 +#define V7M_SCB_SCR_SLEEPDEEP (1 << 2) + +#define V7M_SCB_CCR 0x14 +#define V7M_SCB_CCR_STKALIGN (1 << 9) + +#define V7M_SCB_SHPR2 0x1c +#define V7M_SCB_SHPR3 0x20 + +#define V7M_SCB_SHCSR 0x24 +#define V7M_SCB_SHCSR_USGFAULTENA (1 << 18) +#define V7M_SCB_SHCSR_BUSFAULTENA (1 << 17) +#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16) + +#define V7M_xPSR_FRAMEPTRALIGN 0x00000200 +#define V7M_xPSR_EXCEPTIONNO 0x000001ff + +/* + * When branching to an address that has bits [31:28] == 0xf an exception return + * occurs. Bits [27:5] are reserved (SBOP). If the processor implements the FP + * extension Bit [4] defines if the exception frame has space allocated for FP + * state information, SBOP otherwise. Bit [3] defines the mode that is returned + * to (0 -> handler mode; 1 -> thread mode). Bit [2] defines which sp is used + * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01. + */ +#define EXC_RET_STACK_MASK 0x00000004 +#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd + +#ifndef __ASSEMBLY__ + +enum reboot_mode; + +void armv7m_restart(enum reboot_mode mode, const char *cmd); + +#endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 50af92bac737..4371f45c5784 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -29,6 +29,7 @@ #define BOOT_CPU_MODE_MISMATCH PSR_N_BIT #ifndef __ASSEMBLY__ +#include <asm/cacheflush.h> #ifdef CONFIG_ARM_VIRT_EXT /* @@ -41,10 +42,21 @@ */ extern int __boot_cpu_mode; +static inline void sync_boot_mode(void) +{ + /* + * As secondaries write to __boot_cpu_mode with caches disabled, we + * must flush the corresponding cache entries to ensure the visibility + * of their writes. + */ + sync_cache_r(&__boot_cpu_mode); +} + void __hyp_set_vectors(unsigned long phys_vector_base); unsigned long __hyp_get_vectors(void); #else #define __boot_cpu_mode (SVC_MODE) +#define sync_boot_mode() #endif #ifndef ZIMAGE diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 799f42ecca63..7704e28c3483 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -47,6 +47,7 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg); int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg); int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); +int HYPERVISOR_tmem_op(void *arg); static inline void MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 30cdacb675af..359a7b50b158 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -1,7 +1,6 @@ #ifndef _ASM_ARM_XEN_PAGE_H #define _ASM_ARM_XEN_PAGE_H -#include <asm/mach/map.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -88,6 +87,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) return __set_phys_to_machine(pfn, mfn); } -#define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY); +#define xen_remap(cookie, size) ioremap_cached((cookie), (size)); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h index 7604673dc427..4ffb26d4cad8 100644 --- a/arch/arm/include/asm/xor.h +++ b/arch/arm/include/asm/xor.h @@ -7,7 +7,10 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <linux/hardirq.h> #include <asm-generic/xor.h> +#include <asm/hwcap.h> +#include <asm/neon.h> #define __XOR(a1, a2) a1 ^= a2 @@ -138,4 +141,74 @@ static struct xor_block_template xor_block_arm4regs = { xor_speed(&xor_block_arm4regs); \ xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_32regs); \ + NEON_TEMPLATES; \ } while (0) + +#ifdef CONFIG_KERNEL_MODE_NEON + +extern struct xor_block_template const xor_block_neon_inner; + +static void +xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + if (in_interrupt()) { + xor_arm4regs_2(bytes, p1, p2); + } else { + kernel_neon_begin(); + xor_block_neon_inner.do_2(bytes, p1, p2); + kernel_neon_end(); + } +} + +static void +xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + if (in_interrupt()) { + xor_arm4regs_3(bytes, p1, p2, p3); + } else { + kernel_neon_begin(); + xor_block_neon_inner.do_3(bytes, p1, p2, p3); + kernel_neon_end(); + } +} + +static void +xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + if (in_interrupt()) { + xor_arm4regs_4(bytes, p1, p2, p3, p4); + } else { + kernel_neon_begin(); + xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4); + kernel_neon_end(); + } +} + +static void +xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + if (in_interrupt()) { + xor_arm4regs_5(bytes, p1, p2, p3, p4, p5); + } else { + kernel_neon_begin(); + xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5); + kernel_neon_end(); + } +} + +static struct xor_block_template xor_block_neon = { + .name = "neon", + .do_2 = xor_neon_2, + .do_3 = xor_neon_3, + .do_4 = xor_neon_4, + .do_5 = xor_neon_5 +}; + +#define NEON_TEMPLATES \ + do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0) +#else +#define NEON_TEMPLATES +#endif diff --git a/arch/arm/include/debug/8250.S b/arch/arm/include/debug/8250.S new file mode 100644 index 000000000000..7a2baf913aa0 --- /dev/null +++ b/arch/arm/include/debug/8250.S @@ -0,0 +1,54 @@ +/* + * arch/arm/include/debug/8250.S + * + * Copyright (C) 1994-2013 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/serial_reg.h> + + .macro addruart, rp, rv, tmp + ldr \rp, =CONFIG_DEBUG_UART_PHYS + ldr \rv, =CONFIG_DEBUG_UART_VIRT + .endm + +#ifdef CONFIG_DEBUG_UART_8250_WORD + .macro store, rd, rx:vararg + str \rd, \rx + .endm + + .macro load, rd, rx:vararg + ldr \rd, \rx + .endm +#else + .macro store, rd, rx:vararg + strb \rd, \rx + .endm + + .macro load, rd, rx:vararg + ldrb \rd, \rx + .endm +#endif + +#define UART_SHIFT CONFIG_DEBUG_UART_8250_SHIFT + + .macro senduart,rd,rx + store \rd, [\rx, #UART_TX << UART_SHIFT] + .endm + + .macro busyuart,rd,rx +1002: load \rd, [\rx, #UART_LSR << UART_SHIFT] + and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE + teq \rd, #UART_LSR_TEMT | UART_LSR_THRE + bne 1002b + .endm + + .macro waituart,rd,rx +#ifdef CONFIG_DEBUG_UART_8250_FLOW_CONTROL +1001: load \rd, [\rx, #UART_MSR << UART_SHIFT] + tst \rd, #UART_MSR_CTS + beq 1001b +#endif + .endm diff --git a/arch/arm/include/debug/8250_32.S b/arch/arm/include/debug/8250_32.S deleted file mode 100644 index 8db01eeabbb4..000000000000 --- a/arch/arm/include/debug/8250_32.S +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2011 Picochip Ltd., Jamie Iles - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Derived from arch/arm/mach-davinci/include/mach/debug-macro.S to use 32-bit - * accesses to the 8250. - */ - -#include <linux/serial_reg.h> - - .macro senduart,rd,rx - str \rd, [\rx, #UART_TX << UART_SHIFT] - .endm - - .macro busyuart,rd,rx -1002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT] - and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE - teq \rd, #UART_LSR_TEMT | UART_LSR_THRE - bne 1002b - .endm - - /* The UART's don't have any flow control IO's wired up. */ - .macro waituart,rd,rx - .endm diff --git a/arch/arm/include/debug/bcm2835.S b/arch/arm/include/debug/bcm2835.S deleted file mode 100644 index aed9199bd847..000000000000 --- a/arch/arm/include/debug/bcm2835.S +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Debugging macro include header - * - * Copyright (C) 2010 Broadcom - * Copyright (C) 1994-1999 Russell King - * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#define BCM2835_DEBUG_PHYS 0x20201000 -#define BCM2835_DEBUG_VIRT 0xf0201000 - - .macro addruart, rp, rv, tmp - ldr \rp, =BCM2835_DEBUG_PHYS - ldr \rv, =BCM2835_DEBUG_VIRT - .endm - -#include <asm/hardware/debug-pl01x.S> diff --git a/arch/arm/include/debug/cns3xxx.S b/arch/arm/include/debug/cns3xxx.S deleted file mode 100644 index d04c150baa1c..000000000000 --- a/arch/arm/include/debug/cns3xxx.S +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Debugging macro include header - * - * Copyright 1994-1999 Russell King - * Copyright 2008 Cavium Networks - * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks - * - * This file is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, Version 2, as - * published by the Free Software Foundation. - */ - - .macro addruart,rp,rv,tmp - mov \rp, #0x00009000 - orr \rv, \rp, #0xf0000000 @ virtual base - orr \rp, \rp, #0x10000000 - .endm - -#include <asm/hardware/debug-pl01x.S> diff --git a/arch/arm/include/debug/highbank.S b/arch/arm/include/debug/highbank.S deleted file mode 100644 index 8cad4322a5a2..000000000000 --- a/arch/arm/include/debug/highbank.S +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Debugging macro include header - * - * Copyright (C) 1994-1999 Russell King - * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - - .macro addruart,rp,rv,tmp - ldr \rv, =0xfee36000 - ldr \rp, =0xfff36000 - .endm - -#include <asm/hardware/debug-pl01x.S> diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h index 91d38e38a0b4..29da84e183f4 100644 --- a/arch/arm/include/debug/imx-uart.h +++ b/arch/arm/include/debug/imx-uart.h @@ -65,6 +65,14 @@ #define IMX6Q_UART_BASE_ADDR(n) IMX6Q_UART##n##_BASE_ADDR #define IMX6Q_UART_BASE(n) IMX6Q_UART_BASE_ADDR(n) +#define IMX6SL_UART1_BASE_ADDR 0x02020000 +#define IMX6SL_UART2_BASE_ADDR 0x02024000 +#define IMX6SL_UART3_BASE_ADDR 0x02034000 +#define IMX6SL_UART4_BASE_ADDR 0x02038000 +#define IMX6SL_UART5_BASE_ADDR 0x02018000 +#define IMX6SL_UART_BASE_ADDR(n) IMX6SL_UART##n##_BASE_ADDR +#define IMX6SL_UART_BASE(n) IMX6SL_UART_BASE_ADDR(n) + #define IMX_DEBUG_UART_BASE(soc) soc##_UART_BASE(CONFIG_DEBUG_IMX_UART_PORT) #ifdef CONFIG_DEBUG_IMX1_UART @@ -83,6 +91,8 @@ #define UART_PADDR IMX_DEBUG_UART_BASE(IMX53) #elif defined(CONFIG_DEBUG_IMX6Q_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX6Q) +#elif defined(CONFIG_DEBUG_IMX6SL_UART) +#define UART_PADDR IMX_DEBUG_UART_BASE(IMX6SL) #endif #endif /* __DEBUG_IMX_UART_H */ diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S new file mode 100644 index 000000000000..9166e1bc470e --- /dev/null +++ b/arch/arm/include/debug/msm.S @@ -0,0 +1,93 @@ +/* + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * Author: Brian Swetland <swetland@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#if defined(CONFIG_ARCH_MSM7X00A) || defined(CONFIG_ARCH_QSD8X50) +#define MSM_UART1_PHYS 0xA9A00000 +#define MSM_UART2_PHYS 0xA9B00000 +#define MSM_UART3_PHYS 0xA9C00000 +#elif defined(CONFIG_ARCH_MSM7X30) +#define MSM_UART1_PHYS 0xACA00000 +#define MSM_UART2_PHYS 0xACB00000 +#define MSM_UART3_PHYS 0xACC00000 +#endif + +#if defined(CONFIG_DEBUG_MSM_UART1) +#define MSM_DEBUG_UART_BASE 0xE1000000 +#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS +#elif defined(CONFIG_DEBUG_MSM_UART2) +#define MSM_DEBUG_UART_BASE 0xE1000000 +#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS +#elif defined(CONFIG_DEBUG_MSM_UART3) +#define MSM_DEBUG_UART_BASE 0xE1000000 +#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS +#endif + +#ifdef CONFIG_DEBUG_MSM8660_UART +#define MSM_DEBUG_UART_BASE 0xF0040000 +#define MSM_DEBUG_UART_PHYS 0x19C40000 +#endif + +#ifdef CONFIG_DEBUG_MSM8960_UART +#define MSM_DEBUG_UART_BASE 0xF0040000 +#define MSM_DEBUG_UART_PHYS 0x16440000 +#endif + + .macro addruart, rp, rv, tmp +#ifdef MSM_DEBUG_UART_PHYS + ldr \rp, =MSM_DEBUG_UART_PHYS + ldr \rv, =MSM_DEBUG_UART_BASE +#endif + .endm + + .macro senduart, rd, rx +#ifdef CONFIG_MSM_HAS_DEBUG_UART_HS + @ Write the 1 character to UARTDM_TF + str \rd, [\rx, #0x70] +#else + str \rd, [\rx, #0x0C] +#endif + .endm + + .macro waituart, rd, rx +#ifdef CONFIG_MSM_HAS_DEBUG_UART_HS + @ check for TX_EMT in UARTDM_SR + ldr \rd, [\rx, #0x08] + tst \rd, #0x08 + bne 1002f + @ wait for TXREADY in UARTDM_ISR +1001: ldr \rd, [\rx, #0x14] + tst \rd, #0x80 + beq 1001b +1002: + @ Clear TX_READY by writing to the UARTDM_CR register + mov \rd, #0x300 + str \rd, [\rx, #0x10] + @ Write 0x1 to NCF register + mov \rd, #0x1 + str \rd, [\rx, #0x40] + @ UARTDM reg. Read to induce delay + ldr \rd, [\rx, #0x08] +#else + @ wait for TX_READY +1001: ldr \rd, [\rx, #0x08] + tst \rd, #0x04 + beq 1001b +#endif + .endm + + .macro busyuart, rd, rx + .endm diff --git a/arch/arm/include/debug/mvebu.S b/arch/arm/include/debug/mvebu.S deleted file mode 100644 index df191afa3be1..000000000000 --- a/arch/arm/include/debug/mvebu.S +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Early serial output macro for Marvell SoC - * - * Copyright (C) 2012 Marvell - * - * Lior Amsalem <alior@marvell.com> - * Gregory Clement <gregory.clement@free-electrons.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#define ARMADA_370_XP_REGS_PHYS_BASE 0xd0000000 -#define ARMADA_370_XP_REGS_VIRT_BASE 0xfec00000 - - .macro addruart, rp, rv, tmp - ldr \rp, =ARMADA_370_XP_REGS_PHYS_BASE - ldr \rv, =ARMADA_370_XP_REGS_VIRT_BASE - orr \rp, \rp, #0x00012000 - orr \rv, \rv, #0x00012000 - .endm - -#define UART_SHIFT 2 -#include <asm/hardware/debug-8250.S> diff --git a/arch/arm/include/debug/mxs.S b/arch/arm/include/debug/mxs.S deleted file mode 100644 index d86951551ca1..000000000000 --- a/arch/arm/include/debug/mxs.S +++ /dev/null @@ -1,27 +0,0 @@ -/* arch/arm/mach-mxs/include/mach/debug-macro.S - * - * Debugging macro include header - * - * Copyright (C) 1994-1999 Russell King - * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#ifdef CONFIG_DEBUG_IMX23_UART -#define UART_PADDR 0x80070000 -#elif defined (CONFIG_DEBUG_IMX28_UART) -#define UART_PADDR 0x80074000 -#endif - -#define UART_VADDR 0xfe100000 - - .macro addruart, rp, rv, tmp - ldr \rp, =UART_PADDR @ physical - ldr \rv, =UART_VADDR @ virtual - .endm - -#include <asm/hardware/debug-pl01x.S> diff --git a/arch/arm/include/debug/nomadik.S b/arch/arm/include/debug/nomadik.S deleted file mode 100644 index 735417922ce2..000000000000 --- a/arch/arm/include/debug/nomadik.S +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Debugging macro include header - * - * Copyright (C) 1994-1999 Russell King - * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * -*/ - - .macro addruart, rp, rv, tmp - mov \rp, #0x00100000 - add \rp, \rp, #0x000fb000 - add \rv, \rp, #0xf0000000 @ virtual base - add \rp, \rp, #0x10000000 @ physical base address - .endm - -#include <asm/hardware/debug-pl01x.S> diff --git a/arch/arm/include/debug/picoxcell.S b/arch/arm/include/debug/picoxcell.S deleted file mode 100644 index bc1f07c49cd4..000000000000 --- a/arch/arm/include/debug/picoxcell.S +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) 2011 Picochip Ltd., Jamie Iles - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#define UART_SHIFT 2 -#define PICOXCELL_UART1_BASE 0x80230000 -#define PHYS_TO_IO(x) (((x) & 0x00ffffff) | 0xfe000000) - - .macro addruart, rp, rv, tmp - ldr \rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE) - ldr \rp, =PICOXCELL_UART1_BASE - .endm - -#include "8250_32.S" diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/debug/pl01x.S index f9fd083eff63..37c6895b87e6 100644 --- a/arch/arm/include/asm/hardware/debug-pl01x.S +++ b/arch/arm/include/debug/pl01x.S @@ -1,4 +1,4 @@ -/* arch/arm/include/asm/hardware/debug-pl01x.S +/* arch/arm/include/debug/pl01x.S * * Debugging macro include header * @@ -12,6 +12,13 @@ */ #include <linux/amba/serial.h> +#ifdef CONFIG_DEBUG_UART_PHYS + .macro addruart, rp, rv, tmp + ldr \rp, =CONFIG_DEBUG_UART_PHYS + ldr \rv, =CONFIG_DEBUG_UART_VIRT + .endm +#endif + .macro senduart,rd,rx strb \rd, [\rx, #UART01x_DR] .endm diff --git a/arch/arm/include/debug/pxa.S b/arch/arm/include/debug/pxa.S deleted file mode 100644 index e1e795aa3d7f..000000000000 --- a/arch/arm/include/debug/pxa.S +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Early serial output macro for Marvell PXA/MMP SoC - * - * Copyright (C) 1994-1999 Russell King - * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks - * - * Copyright (C) 2013 Haojian Zhuang - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#if defined(CONFIG_DEBUG_PXA_UART1) -#define PXA_UART_REG_PHYS_BASE 0x40100000 -#define PXA_UART_REG_VIRT_BASE 0xf2100000 -#elif defined(CONFIG_DEBUG_MMP_UART2) -#define PXA_UART_REG_PHYS_BASE 0xd4017000 -#define PXA_UART_REG_VIRT_BASE 0xfe017000 -#elif defined(CONFIG_DEBUG_MMP_UART3) -#define PXA_UART_REG_PHYS_BASE 0xd4018000 -#define PXA_UART_REG_VIRT_BASE 0xfe018000 -#else -#error "Select uart for DEBUG_LL" -#endif - - .macro addruart, rp, rv, tmp - ldr \rp, =PXA_UART_REG_PHYS_BASE - ldr \rv, =PXA_UART_REG_VIRT_BASE - .endm - -#define UART_SHIFT 2 -#include <asm/hardware/debug-8250.S> diff --git a/arch/arm/include/debug/socfpga.S b/arch/arm/include/debug/socfpga.S deleted file mode 100644 index 966b2f994946..000000000000 --- a/arch/arm/include/debug/socfpga.S +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (C) 1994-1999 Russell King - * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#define UART_SHIFT 2 -#define DEBUG_LL_UART_OFFSET 0x00002000 - - .macro addruart, rp, rv, tmp - mov \rp, #DEBUG_LL_UART_OFFSET - orr \rp, \rp, #0x00c00000 - orr \rv, \rp, #0xfe000000 @ virtual base - orr \rp, \rp, #0xff000000 @ physical base - .endm - -#include "8250_32.S" - diff --git a/arch/arm/include/debug/sti.S b/arch/arm/include/debug/sti.S new file mode 100644 index 000000000000..e3aa58ff1776 --- /dev/null +++ b/arch/arm/include/debug/sti.S @@ -0,0 +1,61 @@ +/* + * arch/arm/include/debug/sti.S + * + * Debugging macro include header + * Copyright (C) 2013 STMicroelectronics (R&D) Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define STIH41X_COMMS_BASE 0xfed00000 +#define STIH41X_ASC2_BASE (STIH41X_COMMS_BASE+0x32000) + +#define STIH41X_SBC_LPM_BASE 0xfe400000 +#define STIH41X_SBC_COMMS_BASE (STIH41X_SBC_LPM_BASE + 0x100000) +#define STIH41X_SBC_ASC1_BASE (STIH41X_SBC_COMMS_BASE + 0x31000) + + +#define VIRT_ADDRESS(x) (x - 0x1000000) + +#if IS_ENABLED(CONFIG_STIH41X_DEBUG_ASC2) +#define DEBUG_LL_UART_BASE STIH41X_ASC2_BASE +#endif + +#if IS_ENABLED(CONFIG_STIH41X_DEBUG_SBC_ASC1) +#define DEBUG_LL_UART_BASE STIH41X_SBC_ASC1_BASE +#endif + +#ifndef DEBUG_LL_UART_BASE +#error "DEBUG UART is not Configured" +#endif + +#define ASC_TX_BUF_OFF 0x04 +#define ASC_CTRL_OFF 0x0c +#define ASC_STA_OFF 0x14 + +#define ASC_STA_TX_FULL (1<<9) +#define ASC_STA_TX_EMPTY (1<<1) + + + .macro addruart, rp, rv, tmp + ldr \rp, =DEBUG_LL_UART_BASE @ physical base + ldr \rv, =VIRT_ADDRESS(DEBUG_LL_UART_BASE) @ virt base + .endm + + .macro senduart,rd,rx + strb \rd, [\rx, #ASC_TX_BUF_OFF] + .endm + + .macro waituart,rd,rx +1001: ldr \rd, [\rx, #ASC_STA_OFF] + tst \rd, #ASC_STA_TX_FULL + bne 1001b + .endm + + .macro busyuart,rd,rx +1001: ldr \rd, [\rx, #ASC_STA_OFF] + tst \rd, #ASC_STA_TX_EMPTY + beq 1001b + .endm diff --git a/arch/arm/include/debug/sunxi.S b/arch/arm/include/debug/sunxi.S deleted file mode 100644 index 04eb56d5db2c..000000000000 --- a/arch/arm/include/debug/sunxi.S +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Early serial output macro for Allwinner A1X SoCs - * - * Copyright (C) 2012 Maxime Ripard - * - * Maxime Ripard <maxime.ripard@free-electrons.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#if defined(CONFIG_DEBUG_SUNXI_UART0) -#define SUNXI_UART_DEBUG_PHYS_BASE 0x01c28000 -#define SUNXI_UART_DEBUG_VIRT_BASE 0xf1c28000 -#elif defined(CONFIG_DEBUG_SUNXI_UART1) -#define SUNXI_UART_DEBUG_PHYS_BASE 0x01c28400 -#define SUNXI_UART_DEBUG_VIRT_BASE 0xf1c28400 -#endif - - .macro addruart, rp, rv, tmp - ldr \rp, =SUNXI_UART_DEBUG_PHYS_BASE - ldr \rv, =SUNXI_UART_DEBUG_VIRT_BASE - .endm - -#define UART_SHIFT 2 -#include <asm/hardware/debug-8250.S> diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S index 883d7c22fd9d..be6a720dd183 100644 --- a/arch/arm/include/debug/tegra.S +++ b/arch/arm/include/debug/tegra.S @@ -221,3 +221,32 @@ 1002: #endif .endm + +/* + * Storage for the state maintained by the macros above. + * + * In the kernel proper, this data is located in arch/arm/mach-tegra/common.c. + * That's because this header is included from multiple files, and we only + * want a single copy of the data. In particular, the UART probing code above + * assumes it's running using physical addresses. This is true when this file + * is included from head.o, but not when included from debug.o. So we need + * to share the probe results between the two copies, rather than having + * to re-run the probing again later. + * + * In the decompressor, we put the symbol/storage right here, since common.c + * isn't included in the decompressor build. This symbol gets put in .text + * even though it's really data, since .data is discarded from the + * decompressor. Luckily, .text is writeable in the decompressor, unless + * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug. + */ +#if defined(ZIMAGE) +tegra_uart_config: + /* Debug UART initialization required */ + .word 1 + /* Debug UART physical address */ + .word 0 + /* Debug UART virtual address */ + .word 0 + /* Scratch space for debug macro */ + .word 0 +#endif diff --git a/arch/arm/include/debug/ux500.S b/arch/arm/include/debug/ux500.S index fbd24beeb1fa..aa7f63a8b5e0 100644 --- a/arch/arm/include/debug/ux500.S +++ b/arch/arm/include/debug/ux500.S @@ -45,4 +45,4 @@ ldr \rv, =UART_VIRT_BASE @ yes, virtual address .endm -#include <asm/hardware/debug-pl01x.S> +#include <debug/pl01x.S> diff --git a/arch/arm/include/debug/vexpress.S b/arch/arm/include/debug/vexpress.S index dc8e882a6257..524acd5a223e 100644 --- a/arch/arm/include/debug/vexpress.S +++ b/arch/arm/include/debug/vexpress.S @@ -16,6 +16,8 @@ #define DEBUG_LL_PHYS_BASE_RS1 0x1c000000 #define DEBUG_LL_UART_OFFSET_RS1 0x00090000 +#define DEBUG_LL_UART_PHYS_CRX 0xb0090000 + #define DEBUG_LL_VIRT_BASE 0xf8000000 #if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT) @@ -45,43 +47,5 @@ .endm -#include <asm/hardware/debug-pl01x.S> - -#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CA9) - - .macro addruart,rp,rv,tmp - mov \rp, #DEBUG_LL_UART_OFFSET - orr \rv, \rp, #DEBUG_LL_VIRT_BASE - orr \rp, \rp, #DEBUG_LL_PHYS_BASE - .endm - -#include <asm/hardware/debug-pl01x.S> - -#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_RS1) - - .macro addruart,rp,rv,tmp - mov \rp, #DEBUG_LL_UART_OFFSET_RS1 - orr \rv, \rp, #DEBUG_LL_VIRT_BASE - orr \rp, \rp, #DEBUG_LL_PHYS_BASE_RS1 - .endm - -#include <asm/hardware/debug-pl01x.S> - -#else /* CONFIG_DEBUG_LL_UART_NONE */ - - .macro addruart, rp, rv, tmp - /* Safe dummy values */ - mov \rp, #0 - mov \rv, #DEBUG_LL_VIRT_BASE - .endm - - .macro senduart,rd,rx - .endm - - .macro waituart,rd,rx - .endm - - .macro busyuart,rd,rx - .endm - +#include <debug/pl01x.S> #endif diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index 47bcb2d254af..18d76fd5a2af 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -1,7 +1,6 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += a.out.h header-y += byteorder.h header-y += fcntl.h header-y += hwcap.h diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h deleted file mode 100644 index 083894b2e3bc..000000000000 --- a/arch/arm/include/uapi/asm/a.out.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef __ARM_A_OUT_H__ -#define __ARM_A_OUT_H__ - -#include <linux/personality.h> -#include <linux/types.h> - -struct exec -{ - __u32 a_info; /* Use macros N_MAGIC, etc for access */ - __u32 a_text; /* length of text, in bytes */ - __u32 a_data; /* length of data, in bytes */ - __u32 a_bss; /* length of uninitialized data area for file, in bytes */ - __u32 a_syms; /* length of symbol table data in file, in bytes */ - __u32 a_entry; /* start address */ - __u32 a_trsize; /* length of relocation info for text, in bytes */ - __u32 a_drsize; /* length of relocation info for data, in bytes */ -}; - -/* - * This is always the same - */ -#define N_TXTADDR(a) (0x00008000) - -#define N_TRSIZE(a) ((a).a_trsize) -#define N_DRSIZE(a) ((a).a_drsize) -#define N_SYMSIZE(a) ((a).a_syms) - -#define M_ARM 103 - -#ifndef LIBRARY_START_TEXT -#define LIBRARY_START_TEXT (0x00c00000) -#endif - -#endif /* __A_OUT_GNU_H__ */ diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h index 3688fd15a32d..6d34d080372a 100644 --- a/arch/arm/include/uapi/asm/hwcap.h +++ b/arch/arm/include/uapi/asm/hwcap.h @@ -25,6 +25,6 @@ #define HWCAP_IDIVT (1 << 18) #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) - +#define HWCAP_LPAE (1 << 20) #endif /* _UAPI__ASMARM_HWCAP_H */ diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h index 96ee0929790f..5af0ed1b825a 100644 --- a/arch/arm/include/uapi/asm/ptrace.h +++ b/arch/arm/include/uapi/asm/ptrace.h @@ -34,28 +34,47 @@ /* * PSR bits + * Note on V7M there is no mode contained in the PSR */ #define USR26_MODE 0x00000000 #define FIQ26_MODE 0x00000001 #define IRQ26_MODE 0x00000002 #define SVC26_MODE 0x00000003 +#if defined(__KERNEL__) && defined(CONFIG_CPU_V7M) +/* + * Use 0 here to get code right that creates a userspace + * or kernel space thread. + */ +#define USR_MODE 0x00000000 +#define SVC_MODE 0x00000000 +#else #define USR_MODE 0x00000010 +#define SVC_MODE 0x00000013 +#endif #define FIQ_MODE 0x00000011 #define IRQ_MODE 0x00000012 -#define SVC_MODE 0x00000013 #define ABT_MODE 0x00000017 #define HYP_MODE 0x0000001a #define UND_MODE 0x0000001b #define SYSTEM_MODE 0x0000001f #define MODE32_BIT 0x00000010 #define MODE_MASK 0x0000001f -#define PSR_T_BIT 0x00000020 -#define PSR_F_BIT 0x00000040 -#define PSR_I_BIT 0x00000080 -#define PSR_A_BIT 0x00000100 -#define PSR_E_BIT 0x00000200 -#define PSR_J_BIT 0x01000000 -#define PSR_Q_BIT 0x08000000 + +#define V4_PSR_T_BIT 0x00000020 /* >= V4T, but not V7M */ +#define V7M_PSR_T_BIT 0x01000000 +#if defined(__KERNEL__) && defined(CONFIG_CPU_V7M) +#define PSR_T_BIT V7M_PSR_T_BIT +#else +/* for compatibility */ +#define PSR_T_BIT V4_PSR_T_BIT +#endif + +#define PSR_F_BIT 0x00000040 /* >= V4, but not V7M */ +#define PSR_I_BIT 0x00000080 /* >= V4, but not V7M */ +#define PSR_A_BIT 0x00000100 /* >= V6, but not V7M */ +#define PSR_E_BIT 0x00000200 /* >= V6, but not V7M */ +#define PSR_J_BIT 0x01000000 /* >= V5J, but not V7M */ +#define PSR_Q_BIT 0x08000000 /* >= V5E, including V7M */ #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 |