diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/assembler.h | 29 | ||||
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 15 | ||||
-rw-r--r-- | arch/arm/include/asm/glue-cache.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/opcodes-virt.h | 10 | ||||
-rw-r--r-- | arch/arm/include/asm/ptrace.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/virt.h | 69 |
6 files changed, 125 insertions, 0 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 5c8b3bf4d825..2ef95813fce0 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -22,6 +22,7 @@ #include <asm/ptrace.h> #include <asm/domain.h> +#include <asm/opcodes-virt.h> #define IOMEM(x) (x) @@ -240,6 +241,34 @@ #endif /* + * Helper macro to enter SVC mode cleanly and mask interrupts. reg is + * a scratch register for the macro to overwrite. + * + * This macro is intended for forcing the CPU into SVC mode at boot time. + * you cannot return to the original mode. + * + * Beware, it also clobers LR. + */ +.macro safe_svcmode_maskall reg:req + mrs \reg , cpsr + mov lr , \reg + and lr , lr , #MODE_MASK + cmp lr , #HYP_MODE + orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT + bic \reg , \reg , #MODE_MASK + orr \reg , \reg , #SVC_MODE +THUMB( orr \reg , \reg , #PSR_T_BIT ) + bne 1f + orr \reg, \reg, #PSR_A_BIT + adr lr, BSYM(2f) + msr spsr_cxsf, \reg + __MSR_ELR_HYP(14) + __ERET +1: msr cpsr_c, \reg +2: +.endm + +/* * STRT/LDRT access macros with ARM and Thumb-2 variants */ #ifdef CONFIG_THUMB2_KERNEL diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e4448e16046d..e1489c54cd12 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -49,6 +49,13 @@ * * Unconditionally clean and invalidate the entire cache. * + * flush_kern_louis() + * + * Flush data cache levels up to the level of unification + * inner shareable and invalidate the I-cache. + * Only needed from v7 onwards, falls back to flush_cache_all() + * for all other processor versions. + * * flush_user_all() * * Clean and invalidate all user space cache entries @@ -97,6 +104,7 @@ struct cpu_cache_fns { void (*flush_icache_all)(void); void (*flush_kern_all)(void); + void (*flush_kern_louis)(void); void (*flush_user_all)(void); void (*flush_user_range)(unsigned long, unsigned long, unsigned int); @@ -119,6 +127,7 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_icache_all cpu_cache.flush_icache_all #define __cpuc_flush_kern_all cpu_cache.flush_kern_all +#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis #define __cpuc_flush_user_all cpu_cache.flush_user_all #define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range @@ -139,6 +148,7 @@ extern struct cpu_cache_fns cpu_cache; extern void __cpuc_flush_icache_all(void); extern void __cpuc_flush_kern_all(void); +extern void __cpuc_flush_kern_louis(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); @@ -204,6 +214,11 @@ static inline void __flush_icache_all(void) __flush_icache_preferred(); } +/* + * Flush caches up to Level of Unification Inner Shareable + */ +#define flush_cache_louis() __cpuc_flush_kern_louis() + #define flush_cache_all() __cpuc_flush_kern_all() static inline void vivt_flush_cache_mm(struct mm_struct *mm) diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index 4f8d2c0dc441..cca9f15704ed 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -132,6 +132,7 @@ #ifndef MULTI_CACHE #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis) #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h index b85665a96f8e..efcfdf92d9d5 100644 --- a/arch/arm/include/asm/opcodes-virt.h +++ b/arch/arm/include/asm/opcodes-virt.h @@ -26,4 +26,14 @@ 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ ) +#define __ERET __inst_arm_thumb32( \ + 0xE160006E, \ + 0xF3DE8F00 \ +) + +#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \ + 0xE12EF300 | regnum, \ + 0xF3808E30 | (regnum << 16) \ +) + #endif /* ! __ASM_ARM_OPCODES_VIRT_H */ diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 355ece523f41..91ef6c231c47 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -44,6 +44,7 @@ #define IRQ_MODE 0x00000012 #define SVC_MODE 0x00000013 #define ABT_MODE 0x00000017 +#define HYP_MODE 0x0000001a #define UND_MODE 0x0000001b #define SYSTEM_MODE 0x0000001f #define MODE32_BIT 0x00000010 diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h new file mode 100644 index 000000000000..86164df86cb4 --- /dev/null +++ b/arch/arm/include/asm/virt.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2012 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef VIRT_H +#define VIRT_H + +#include <asm/ptrace.h> + +/* + * Flag indicating that the kernel was not entered in the same mode on every + * CPU. The zImage loader stashes this value in an SPSR, so we need an + * architecturally defined flag bit here (the N flag, as it happens) + */ +#define BOOT_CPU_MODE_MISMATCH (1<<31) + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_ARM_VIRT_EXT +/* + * __boot_cpu_mode records what mode the primary CPU was booted in. + * A correctly-implemented bootloader must start all CPUs in the same mode: + * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate + * that some CPU(s) were booted in a different mode. + * + * This allows the kernel to flag an error when the secondaries have come up. + */ +extern int __boot_cpu_mode; + +void __hyp_set_vectors(unsigned long phys_vector_base); +unsigned long __hyp_get_vectors(void); +#else +#define __boot_cpu_mode (SVC_MODE) +#endif + +#ifndef ZIMAGE +void hyp_mode_check(void); + +/* Reports the availability of HYP mode */ +static inline bool is_hyp_mode_available(void) +{ + return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE && + !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH)); +} + +/* Check if the bootloader has booted CPUs in different modes */ +static inline bool is_hyp_mode_mismatched(void) +{ + return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); +} +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* ! VIRT_H */ |