summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/acpi.h15
-rw-r--r--arch/arm64/include/asm/cpufeature.h1
-rw-r--r--arch/arm64/include/asm/hugetlb.h2
-rw-r--r--arch/arm64/include/asm/hwcap.h1
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h21
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h46
-rw-r--r--arch/arm64/include/asm/vdso.h2
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h12
-rw-r--r--arch/arm64/include/asm/vdso/gettimeofday.h8
12 files changed, 86 insertions, 31 deletions
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index a45366c3909b..bd68e1b7f29f 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -47,20 +47,7 @@
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
/* ACPI table mapping after acpi_permanent_mmap is set */
-static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
- acpi_size size)
-{
- /* For normal memory we already have a cacheable mapping. */
- if (memblock_is_map_memory(phys))
- return (void __iomem *)__phys_to_virt(phys);
-
- /*
- * We should still honor the memory's attribute here because
- * crash dump kernel possibly excludes some ACPI (reclaim)
- * regions from memblock list.
- */
- return __ioremap(phys, size, __acpi_get_mem_attribute(phys));
-}
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
#define acpi_os_ioremap acpi_os_ioremap
typedef u64 phys_cpuid_t;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 5d1f4ae42799..e375529ca9fc 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -774,6 +774,7 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
}
u32 get_kvm_ipa_limit(void);
+void dump_cpu_features(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 94ba0c5bced2..5abf91e3494c 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -49,6 +49,8 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz);
#define set_huge_swap_pte_at set_huge_swap_pte_at
+void __init arm64_hugetlb_cma_reserve(void);
+
#include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index d683bcbf1e7c..22f73fe09030 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -95,6 +95,7 @@
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
+/* reserved for KERNEL_HWCAP_MTE __khwcap2_feature(MTE) */
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index a1871bb32bb1..2a88cb734d06 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -322,6 +322,7 @@ static inline void *phys_to_virt(phys_addr_t x)
__is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
})
+void dump_mem_limit(void);
#endif /* !ASSEMBLY */
/*
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index b0bd9b55594c..f2d7537d6f83 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -175,7 +175,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
* take CPU migration into account.
*/
#define destroy_context(mm) do { } while(0)
-void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
+void check_and_switch_context(struct mm_struct *mm);
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
@@ -214,8 +214,6 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
static inline void __switch_mm(struct mm_struct *next)
{
- unsigned int cpu = smp_processor_id();
-
/*
* init_mm.pgd does not contain any user mappings and it is always
* active for kernel addresses in TTBR1. Just set the reserved TTBR0.
@@ -225,7 +223,7 @@ static inline void __switch_mm(struct mm_struct *next)
return;
}
- check_and_switch_context(next, cpu);
+ check_and_switch_context(next);
}
static inline void
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 9c91a8f93a0e..f5d7f23f14d7 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -29,7 +29,7 @@
* Size mapped by an entry at level n ( 0 <= n <= 3)
* We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
* in the final page. The maximum number of translation levels supported by
- * the architecture is 4. Hence, starting at at level n, we have further
+ * the architecture is 4. Hence, starting at level n, we have further
* ((4 - n) - 1) levels of translation excluding the offset within the page.
* So, the total number of bits mapped by an entry at level n is :
*
@@ -82,23 +82,23 @@
* Contiguous page definitions.
*/
#ifdef CONFIG_ARM64_64K_PAGES
-#define CONT_PTE_SHIFT 5
-#define CONT_PMD_SHIFT 5
+#define CONT_PTE_SHIFT (5 + PAGE_SHIFT)
+#define CONT_PMD_SHIFT (5 + PMD_SHIFT)
#elif defined(CONFIG_ARM64_16K_PAGES)
-#define CONT_PTE_SHIFT 7
-#define CONT_PMD_SHIFT 5
+#define CONT_PTE_SHIFT (7 + PAGE_SHIFT)
+#define CONT_PMD_SHIFT (5 + PMD_SHIFT)
#else
-#define CONT_PTE_SHIFT 4
-#define CONT_PMD_SHIFT 4
+#define CONT_PTE_SHIFT (4 + PAGE_SHIFT)
+#define CONT_PMD_SHIFT (4 + PMD_SHIFT)
#endif
-#define CONT_PTES (1 << CONT_PTE_SHIFT)
+#define CONT_PTES (1 << (CONT_PTE_SHIFT - PAGE_SHIFT))
#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
-#define CONT_PMDS (1 << CONT_PMD_SHIFT)
+#define CONT_PMDS (1 << (CONT_PMD_SHIFT - PMD_SHIFT))
#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
-/* the the numerical offset of the PTE within a range of CONT_PTES */
+/* the numerical offset of the PTE within a range of CONT_PTES */
#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
/*
@@ -216,6 +216,7 @@
#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
#define TCR_TxSZ_WIDTH 6
#define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET)
+#define TCR_T1SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T1SZ_OFFSET)
#define TCR_EPD0_SHIFT 7
#define TCR_EPD0_MASK (UL(1) << TCR_EPD0_SHIFT)
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 953b6a1ce549..966ed30ed5f7 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -27,7 +27,7 @@
*
* Some code sections either automatically switch back to PSR.I or explicitly
* require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
- * in the the priority mask, it indicates that PSR.I should be set and
+ * in the priority mask, it indicates that PSR.I should be set and
* interrupt disabling temporarily does not rely on IRQ priorities.
*/
#define GIC_PRIO_IRQON 0xe0
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 463175f80341..0372262489ee 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -421,9 +421,9 @@
*/
#define SYS_AMEVCNTR0_EL0(n) SYS_AM_EL0(4 + ((n) >> 3), (n) & 7)
-#define SYS_AMEVTYPE0_EL0(n) SYS_AM_EL0(6 + ((n) >> 3), (n) & 7)
+#define SYS_AMEVTYPER0_EL0(n) SYS_AM_EL0(6 + ((n) >> 3), (n) & 7)
#define SYS_AMEVCNTR1_EL0(n) SYS_AM_EL0(12 + ((n) >> 3), (n) & 7)
-#define SYS_AMEVTYPE1_EL0(n) SYS_AM_EL0(14 + ((n) >> 3), (n) & 7)
+#define SYS_AMEVTYPER1_EL0(n) SYS_AM_EL0(14 + ((n) >> 3), (n) & 7)
/* AMU v1: Fixed (architecturally defined) activity monitors */
#define SYS_AMEVCNTR0_CORE_EL0 SYS_AMEVCNTR0_EL0(0)
@@ -706,6 +706,9 @@
#define ID_AA64ZFR0_SVEVER_SVE2 0x1
/* id_aa64mmfr0 */
+#define ID_AA64MMFR0_ECV_SHIFT 60
+#define ID_AA64MMFR0_FGT_SHIFT 56
+#define ID_AA64MMFR0_EXS_SHIFT 44
#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40
#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36
#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32
@@ -734,6 +737,10 @@
#endif
/* id_aa64mmfr1 */
+#define ID_AA64MMFR1_ETS_SHIFT 36
+#define ID_AA64MMFR1_TWED_SHIFT 32
+#define ID_AA64MMFR1_XNX_SHIFT 28
+#define ID_AA64MMFR1_SPECSEI_SHIFT 24
#define ID_AA64MMFR1_PAN_SHIFT 20
#define ID_AA64MMFR1_LOR_SHIFT 16
#define ID_AA64MMFR1_HPD_SHIFT 12
@@ -746,8 +753,15 @@
/* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60
+#define ID_AA64MMFR2_EVT_SHIFT 56
+#define ID_AA64MMFR2_BBM_SHIFT 52
+#define ID_AA64MMFR2_TTL_SHIFT 48
#define ID_AA64MMFR2_FWB_SHIFT 40
+#define ID_AA64MMFR2_IDS_SHIFT 36
#define ID_AA64MMFR2_AT_SHIFT 32
+#define ID_AA64MMFR2_ST_SHIFT 28
+#define ID_AA64MMFR2_NV_SHIFT 24
+#define ID_AA64MMFR2_CCIDX_SHIFT 20
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8
@@ -755,6 +769,7 @@
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
+#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
@@ -807,18 +822,40 @@
#define ID_ISAR6_DP_SHIFT 4
#define ID_ISAR6_JSCVT_SHIFT 0
+#define ID_MMFR0_INNERSHR_SHIFT 28
+#define ID_MMFR0_FCSE_SHIFT 24
+#define ID_MMFR0_AUXREG_SHIFT 20
+#define ID_MMFR0_TCM_SHIFT 16
+#define ID_MMFR0_SHARELVL_SHIFT 12
+#define ID_MMFR0_OUTERSHR_SHIFT 8
+#define ID_MMFR0_PMSA_SHIFT 4
+#define ID_MMFR0_VMSA_SHIFT 0
+
#define ID_MMFR4_EVT_SHIFT 28
#define ID_MMFR4_CCIDX_SHIFT 24
#define ID_MMFR4_LSM_SHIFT 20
#define ID_MMFR4_HPDS_SHIFT 16
#define ID_MMFR4_CNP_SHIFT 12
#define ID_MMFR4_XNX_SHIFT 8
+#define ID_MMFR4_AC2_SHIFT 4
#define ID_MMFR4_SPECSEI_SHIFT 0
#define ID_MMFR5_ETS_SHIFT 0
#define ID_PFR0_DIT_SHIFT 24
#define ID_PFR0_CSV2_SHIFT 16
+#define ID_PFR0_STATE3_SHIFT 12
+#define ID_PFR0_STATE2_SHIFT 8
+#define ID_PFR0_STATE1_SHIFT 4
+#define ID_PFR0_STATE0_SHIFT 0
+
+#define ID_DFR0_PERFMON_SHIFT 24
+#define ID_DFR0_MPROFDBG_SHIFT 20
+#define ID_DFR0_MMAPTRC_SHIFT 16
+#define ID_DFR0_COPTRC_SHIFT 12
+#define ID_DFR0_MMAPDBG_SHIFT 8
+#define ID_DFR0_COPSDBG_SHIFT 4
+#define ID_DFR0_COPDBG_SHIFT 0
#define ID_PFR2_SSBS_SHIFT 4
#define ID_PFR2_CSV3_SHIFT 0
@@ -861,6 +898,11 @@
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
#endif
+#define MVFR2_FPMISC_SHIFT 4
+#define MVFR2_SIMDMISC_SHIFT 0
+
+#define DCZID_DZP_SHIFT 4
+#define DCZID_BS_SHIFT 0
/*
* The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index 07468428fd29..f99dcb94b438 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -12,6 +12,8 @@
*/
#define VDSO_LBASE 0x0
+#define __VVAR_PAGES 2
+
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index b6907ae78e53..b7c549d46d18 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -152,6 +152,18 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return ret;
}
+#ifdef CONFIG_TIME_NS
+static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+{
+ const struct vdso_data *ret;
+
+ /* See __arch_get_vdso_data(). */
+ asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
+
+ return ret;
+}
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h
index afba6ba332f8..cf39eae5eaaf 100644
--- a/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -96,6 +96,14 @@ const struct vdso_data *__arch_get_vdso_data(void)
return _vdso_data;
}
+#ifdef CONFIG_TIME_NS
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(void)
+{
+ return _timens_data;
+}
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */