diff options
Diffstat (limited to 'arch/arm64/include')
30 files changed, 211 insertions, 136 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 55103e50c51b..b112a39834d0 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -35,7 +35,6 @@ generic-y += poll.h generic-y += preempt.h generic-y += resource.h generic-y += rwsem.h -generic-y += scatterlist.h generic-y += sections.h generic-y += segment.h generic-y += sembuf.h diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 59c05d8ea4a0..39248d3adf5d 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -16,6 +16,7 @@ #include <linux/irqchip/arm-gic-acpi.h> #include <asm/cputype.h> +#include <asm/psci.h> #include <asm/smp_plat.h> /* Basic configuration for ACPI */ @@ -39,18 +40,6 @@ extern int acpi_disabled; extern int acpi_noirq; extern int acpi_pci_disabled; -/* 1 to indicate PSCI 0.2+ is implemented */ -static inline bool acpi_psci_present(void) -{ - return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT; -} - -/* 1 to indicate HVC must be used instead of SMC as the PSCI conduit */ -static inline bool acpi_psci_use_hvc(void) -{ - return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC; -} - static inline void disable_acpi(void) { acpi_disabled = 1; @@ -88,9 +77,11 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { } void __init acpi_init_cpus(void); #else -static inline bool acpi_psci_present(void) { return false; } -static inline bool acpi_psci_use_hvc(void) { return false; } static inline void acpi_init_cpus(void) { } #endif /* CONFIG_ACPI */ +static inline const char *acpi_get_enable_method(int cpu) +{ + return acpi_psci_present() ? "psci" : NULL; +} #endif /*_ASM_ACPI_H*/ diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h deleted file mode 100644 index 919a67855b63..000000000000 --- a/arch/arm64/include/asm/alternative-asm.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __ASM_ALTERNATIVE_ASM_H -#define __ASM_ALTERNATIVE_ASM_H - -#ifdef __ASSEMBLY__ - -.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len - .word \orig_offset - . - .word \alt_offset - . - .hword \feature - .byte \orig_len - .byte \alt_len -.endm - -.macro alternative_insn insn1 insn2 cap -661: \insn1 -662: .pushsection .altinstructions, "a" - altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f - .popsection - .pushsection .altinstr_replacement, "ax" -663: \insn2 -664: .popsection - .if ((664b-663b) != (662b-661b)) - .error "Alternatives instruction length mismatch" - .endif -.endm - -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_ALTERNATIVE_ASM_H */ diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index d261f01e2bae..c385a0c4057f 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -1,6 +1,8 @@ #ifndef __ASM_ALTERNATIVE_H #define __ASM_ALTERNATIVE_H +#ifndef __ASSEMBLY__ + #include <linux/types.h> #include <linux/stddef.h> #include <linux/stringify.h> @@ -24,7 +26,20 @@ void free_alternatives_memory(void); " .byte 662b-661b\n" /* source len */ \ " .byte 664f-663f\n" /* replacement len */ -/* alternative assembly primitive: */ +/* + * alternative assembly primitive: + * + * If any of these .org directive fail, it means that insn1 and insn2 + * don't have the same length. This used to be written as + * + * .if ((664b-663b) != (662b-661b)) + * .error "Alternatives instruction length mismatch" + * .endif + * + * but most assemblers die if insn1 or insn2 have a .inst. This should + * be fixed in a binutils release posterior to 2.25.51.0.2 (anything + * containing commit 4e4d08cf7399b606 or c1baaddf8861). + */ #define ALTERNATIVE(oldinstr, newinstr, feature) \ "661:\n\t" \ oldinstr "\n" \ @@ -37,8 +52,31 @@ void free_alternatives_memory(void); newinstr "\n" \ "664:\n\t" \ ".popsection\n\t" \ - ".if ((664b-663b) != (662b-661b))\n\t" \ - " .error \"Alternatives instruction length mismatch\"\n\t"\ - ".endif\n" + ".org . - (664b-663b) + (662b-661b)\n\t" \ + ".org . - (662b-661b) + (664b-663b)\n" + +#else + +.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len + .word \orig_offset - . + .word \alt_offset - . + .hword \feature + .byte \orig_len + .byte \alt_len +.endm + +.macro alternative_insn insn1 insn2 cap +661: \insn1 +662: .pushsection .altinstructions, "a" + altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f + .popsection + .pushsection .altinstr_replacement, "ax" +663: \insn2 +664: .popsection + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) +.endm + +#endif /* __ASSEMBLY__ */ #endif /* __ASM_ALTERNATIVE_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 71f19c4dc0de..0fa47c4275cb 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -114,7 +114,7 @@ do { \ #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define nop() asm volatile("nop"); #define smp_mb__before_atomic() smp_mb() diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h new file mode 100644 index 000000000000..81151b67b26b --- /dev/null +++ b/arch/arm64/include/asm/boot.h @@ -0,0 +1,14 @@ + +#ifndef __ASM_BOOT_H +#define __ASM_BOOT_H + +#include <asm/sizes.h> + +/* + * arm64 requires the DTB to be 8 byte aligned and + * not exceed 2MB in size. + */ +#define MIN_FDT_ALIGN 8 +#define MAX_FDT_SIZE SZ_2M + +#endif diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 67d309cc3b6b..c75b8d027eb1 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -40,10 +40,6 @@ * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT or ASID-tagged VIVT I-cache. * - * flush_cache_all() - * - * Unconditionally clean and invalidate the entire cache. - * * flush_cache_mm(mm) * * Clean and invalidate all user space cache entries @@ -69,7 +65,6 @@ * - kaddr - page address * - size - region size */ -extern void flush_cache_all(void); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 5a31d6716914..8f03446cf89f 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -19,15 +19,15 @@ #include <linux/init.h> #include <linux/threads.h> -struct device_node; - /** * struct cpu_operations - Callback operations for hotplugging CPUs. * * @name: Name of the property as appears in a devicetree cpu node's - * enable-method property. - * @cpu_init: Reads any data necessary for a specific enable-method from the - * devicetree, for a given cpu node and proposed logical id. + * enable-method property. On systems booting with ACPI, @name + * identifies the struct cpu_operations entry corresponding to + * the boot protocol specified in the ACPI MADT table. + * @cpu_init: Reads any data necessary for a specific enable-method for a + * proposed logical id. * @cpu_prepare: Early one-time preparation step for a cpu. If there is a * mechanism for doing so, tests whether it is possible to boot * the given CPU. @@ -40,15 +40,15 @@ struct device_node; * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * cpu being killed. * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. - * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from - * devicetree, for a given cpu node and proposed logical id. + * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for + * a proposed logical id. * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing * to wrong parameters or error conditions. Called from the * CPU being suspended. Must be called with IRQs disabled. */ struct cpu_operations { const char *name; - int (*cpu_init)(struct device_node *, unsigned int); + int (*cpu_init)(unsigned int); int (*cpu_prepare)(unsigned int); int (*cpu_boot)(unsigned int); void (*cpu_postboot)(void); @@ -58,14 +58,17 @@ struct cpu_operations { int (*cpu_kill)(unsigned int cpu); #endif #ifdef CONFIG_CPU_IDLE - int (*cpu_init_idle)(struct device_node *, unsigned int); + int (*cpu_init_idle)(unsigned int); int (*cpu_suspend)(unsigned long); #endif }; extern const struct cpu_operations *cpu_ops[NR_CPUS]; -int __init cpu_read_ops(struct device_node *dn, int cpu); -void __init cpu_read_bootcpu_ops(void); -const struct cpu_operations *cpu_get_ops(const char *name); +int __init cpu_read_ops(int cpu); + +static inline void __init cpu_read_bootcpu_ops(void) +{ + cpu_read_ops(0); +} #endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 82cb9f98ba1a..c1044218a63a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -24,8 +24,9 @@ #define ARM64_WORKAROUND_CLEAN_CACHE 0 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 #define ARM64_WORKAROUND_845719 2 +#define ARM64_HAS_SYSREG_GIC_CPUIF 3 -#define ARM64_NCAPS 3 +#define ARM64_NCAPS 4 #ifndef __ASSEMBLY__ @@ -38,6 +39,11 @@ struct arm64_cpu_capabilities { u32 midr_model; u32 midr_range_min, midr_range_max; }; + + struct { /* Feature register checking */ + u64 register_mask; + u64 register_value; + }; }; }; diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 141b2fcabaa6..0f74f05d662a 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -5,20 +5,16 @@ #ifdef CONFIG_CPU_IDLE extern int arm_cpuidle_init(unsigned int cpu); -extern int cpu_suspend(unsigned long arg); +extern int arm_cpuidle_suspend(int index); #else static inline int arm_cpuidle_init(unsigned int cpu) { return -EOPNOTSUPP; } -static inline int cpu_suspend(unsigned long arg) +static inline int arm_cpuidle_suspend(int index) { return -EOPNOTSUPP; } #endif -static inline int arm_cpuidle_suspend(int index) -{ - return cpu_suspend(index); -} #endif diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 9437e3dc5833..f0d6d0bfe55c 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -18,6 +18,7 @@ #ifdef __KERNEL__ +#include <linux/acpi.h> #include <linux/types.h> #include <linux/vmalloc.h> @@ -28,13 +29,23 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0) extern struct dma_map_ops *dma_ops; +extern struct dma_map_ops dummy_dma_ops; static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { - if (unlikely(!dev) || !dev->archdata.dma_ops) + if (unlikely(!dev)) return dma_ops; - else + else if (dev->archdata.dma_ops) return dev->archdata.dma_ops; + else if (acpi_disabled) + return dma_ops; + + /* + * When ACPI is enabled, if arch_set_dma_ops is not called, + * we will disable device DMA capability by setting it + * to dummy_dma_ops. + */ + return &dummy_dma_ops; } static inline struct dma_map_ops *get_dma_ops(struct device *dev) @@ -48,6 +59,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, struct iommu_ops *iommu, bool coherent) { + if (!acpi_disabled && !dev->archdata.dma_ops) + dev->archdata.dma_ops = dma_ops; + dev->archdata.dma_coherent = coherent; } #define arch_setup_dma_ops arch_setup_dma_ops diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 95e6b6dcbe37..c0739187a920 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -17,6 +17,7 @@ #ifndef __ASSEMBLY__ #include <linux/kernel.h> +#include <asm/boot.h> #include <asm/page.h> /* @@ -32,6 +33,20 @@ */ enum fixed_addresses { FIX_HOLE, + + /* + * Reserve a virtual window for the FDT that is 2 MB larger than the + * maximum supported size, and put it at the top of the fixmap region. + * The additional space ensures that any FDT that does not exceed + * MAX_FDT_SIZE can be mapped regardless of whether it crosses any + * 2 MB alignment boundaries. + * + * Keep this at the top so it remains 2 MB aligned. + */ +#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) + FIX_FDT_END, + FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, + FIX_EARLYCON_MEM_BASE, FIX_TEXT_POKE0, __end_of_permanent_fixed_addresses, diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 5f750dc96e0f..74069b3bd919 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -58,7 +58,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - pagefault_disable(); /* implies preempt_disable() */ + pagefault_disable(); switch (op) { case FUTEX_OP_SET: @@ -85,7 +85,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); /* subsumes preempt_enable() */ + pagefault_enable(); if (!ret) { switch (cmp) { diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 5b7ca8ace95f..2fd9b14ca295 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -86,10 +86,6 @@ static inline int prepare_hugepage_range(struct file *file, return 0; } -static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) -{ -} - static inline int huge_pte_none(pte_t pte) { return pte_none(pte); @@ -100,15 +96,6 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) return pte_wrprotect(pte); } -static inline int arch_prepare_hugepage(struct page *page) -{ - return 0; -} - -static inline void arch_release_hugepage(struct page *page) -{ -} - static inline void arch_clear_hugepage_flags(struct page *page) { clear_bit(PG_dcache_clean, &page->flags); diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index f81b328d9cf4..30e50eb54a67 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -281,6 +281,7 @@ __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) #undef __AARCH64_INSN_FUNCS bool aarch64_insn_is_nop(u32 insn); +bool aarch64_insn_is_branch_imm(u32 insn); int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); @@ -351,6 +352,8 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, int shift, enum aarch64_insn_variant variant, enum aarch64_insn_logic_type type); +s32 aarch64_get_branch_offset(u32 insn); +u32 aarch64_set_branch_offset(u32 insn, s32 offset); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 540f7c0aea82..44be1e03ed65 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -117,10 +117,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * ordering rules but do not guarantee any ordering relative to Normal memory * accesses. */ -#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) -#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) -#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) -#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; }) +#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) +#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) @@ -170,6 +170,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) +#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define iounmap __iounmap /* diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 4f7310fa77f0..3c5fe685a2d6 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -27,7 +27,7 @@ #define MPIDR_EL1 1 /* MultiProcessor Affinity Register */ #define CSSELR_EL1 2 /* Cache Size Selection Register */ #define SCTLR_EL1 3 /* System Control Register */ -#define ACTLR_EL1 4 /* Auxilliary Control Register */ +#define ACTLR_EL1 4 /* Auxiliary Control Register */ #define CPACR_EL1 5 /* Coprocessor Access Control */ #define TTBR0_EL1 6 /* Translation Table Base Register 0 */ #define TTBR1_EL1 7 /* Translation Table Base Register 1 */ @@ -132,11 +132,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern u64 __vgic_v3_get_ich_vtr_el2(void); -extern char __save_vgic_v2_state[]; -extern char __restore_vgic_v2_state[]; -extern char __save_vgic_v3_state[]; -extern char __restore_vgic_v3_state[]; - #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f0f58c9beec0..2709db2a7eac 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -221,29 +221,6 @@ struct vgic_sr_vectors { void *restore_vgic; }; -static inline void vgic_arch_setup(const struct vgic_params *vgic) -{ - extern struct vgic_sr_vectors __vgic_sr_vectors; - - switch(vgic->type) - { - case VGIC_V2: - __vgic_sr_vectors.save_vgic = __save_vgic_v2_state; - __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state; - break; - -#ifdef CONFIG_ARM_GIC_V3 - case VGIC_V3: - __vgic_sr_vectors.save_vgic = __save_vgic_v3_state; - __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state; - break; -#endif - - default: - BUG(); - } -} - static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} diff --git a/arch/arm64/include/asm/mm-arch-hooks.h b/arch/arm64/include/asm/mm-arch-hooks.h new file mode 100644 index 000000000000..562b655f5ba9 --- /dev/null +++ b/arch/arm64/include/asm/mm-arch-hooks.h @@ -0,0 +1,15 @@ +/* + * Architecture specific mm hooks + * + * Copyright (C) 2015, IBM Corporation + * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_ARM64_MM_ARCH_HOOKS_H +#define _ASM_ARM64_MM_ARCH_HOOKS_H + +#endif /* _ASM_ARM64_MM_ARCH_HOOKS_H */ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 3d311761e3c2..79fcfb048884 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -34,5 +34,6 @@ extern void init_mem_pgprot(void); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot); +extern void *fixmap_remap_fdt(phys_addr_t dt_phys); #endif diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index d26d1d53c0d7..6471773db6fd 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -24,4 +24,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) #endif +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->pc = (__ip); \ + (regs)->regs[29] = (unsigned long) __builtin_frame_address(0); \ + (regs)->sp = current_stack_pointer; \ + (regs)->pstate = PSR_MODE_EL1h; \ +} + #endif diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 220633b791b8..14ad6e4e87d1 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -28,12 +28,8 @@ struct mm_struct; struct cpu_suspend_ctx; -extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); -extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); -void cpu_soft_restart(phys_addr_t cpu_reset, - unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index d2c37a1df0eb..e4c893e54f01 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -78,13 +78,30 @@ struct cpu_context { struct thread_struct { struct cpu_context cpu_context; /* cpu context */ - unsigned long tp_value; + unsigned long tp_value; /* TLS register */ +#ifdef CONFIG_COMPAT + unsigned long tp2_value; +#endif struct fpsimd_state fpsimd_state; unsigned long fault_address; /* fault info */ unsigned long fault_code; /* ESR_EL1 value */ struct debug_info debug; /* debugging */ }; +#ifdef CONFIG_COMPAT +#define task_user_tls(t) \ +({ \ + unsigned long *__tls; \ + if (is_compat_thread(task_thread_info(t))) \ + __tls = &(t)->thread.tp2_value; \ + else \ + __tls = &(t)->thread.tp_value; \ + __tls; \ + }) +#else +#define task_user_tls(t) (&(t)->thread.tp_value) +#endif + #define INIT_THREAD { } static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h index 2454bc59c916..49d7e1aaebdc 100644 --- a/arch/arm64/include/asm/psci.h +++ b/arch/arm64/include/asm/psci.h @@ -14,7 +14,15 @@ #ifndef __ASM_PSCI_H #define __ASM_PSCI_H -int psci_dt_init(void); -int psci_acpi_init(void); +int __init psci_dt_init(void); + +#ifdef CONFIG_ACPI +int __init psci_acpi_init(void); +bool __init acpi_psci_present(void); +bool __init acpi_psci_use_hvc(void); +#else +static inline int psci_acpi_init(void) { return 0; } +static inline bool acpi_psci_present(void) { return false; } +#endif #endif /* __ASM_PSCI_H */ diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index bf22650b1a78..db02be81b90a 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -42,7 +42,7 @@ extern void handle_IPI(int ipinr, struct pt_regs *regs); * Discover the set of possible CPUs and determine their * SMP operations. */ -extern void of_smp_init_cpus(void); +extern void smp_init_cpus(void); /* * Provide a function to raise an IPI cross call on CPUs in callmap. diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 8dcd61e32176..7abf7570c00f 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -19,6 +19,8 @@ #ifndef __ASM_SMP_PLAT_H #define __ASM_SMP_PLAT_H +#include <linux/cpumask.h> + #include <asm/types.h> struct mpidr_hash { @@ -39,6 +41,20 @@ static inline u32 mpidr_hash_size(void) */ extern u64 __cpu_logical_map[NR_CPUS]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] +/* + * Retrieve logical cpu index corresponding to a given MPIDR.Aff* + * - mpidr: MPIDR.Aff* bits to be used for the look-up + * + * Returns the cpu logical index or -EINVAL on look-up error + */ +static inline int get_logical_index(u64 mpidr) +{ + int cpu; + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + if (cpu_logical_map(cpu) == mpidr) + return cpu; + return -EINVAL; +} void __init do_post_cpus_up_work(void); diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 003802f58963..59a5b0f1e81c 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -21,6 +21,6 @@ struct sleep_save_sp { phys_addr_t save_ptr_stash_phys; }; -extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); +extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); #endif diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 7a18fabbe0f6..57f110bea6a8 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -23,6 +23,8 @@ #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/irqflags.h> +#include <linux/signal.h> +#include <linux/ratelimit.h> #include <linux/reboot.h> struct pt_regs; @@ -41,9 +43,19 @@ struct mm_struct; extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); -void soft_restart(unsigned long); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); +#define show_unhandled_signals_ratelimited() \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + bool __show_ratelimited = false; \ + if (show_unhandled_signals && __ratelimit(&_rs)) \ + __show_ratelimited = true; \ + __show_ratelimited; \ +}) + #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) #define UDBG_BADABORT (1 << 2) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index c3bb05b98616..934815d45eda 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -28,8 +28,6 @@ * TLB Management * ============== * - * The arch/arm64/mm/tlb.S files implement these methods. - * * The TLB specific code is expected to perform whatever tests it needs * to determine if it should invalidate the TLB for each call. Start * addresses are inclusive and end addresses are exclusive; it is safe to diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 7ebcd31ce51c..225ec3524fbf 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -18,7 +18,7 @@ extern struct cpu_topology cpu_topology[NR_CPUS]; #define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id) #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) -#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); |