summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-03-25 11:07:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-03-25 11:07:40 -0700
commit43f0b562590e7ac16b74b298ab80b5fb290d02af (patch)
tree9d5882cd20a7a32e92d02f05e1f53f8b7506d578
parent7aae5432ac61ffae3e5afadbd2713bb73177f11d (diff)
parent20109a859a9b514eb10c22b8a14b5704ffe93897 (diff)
downloadlinux-43f0b562590e7ac16b74b298ab80b5fb290d02af.tar.bz2
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "Minor fixes all over, ranging from typos to tests to errata workarounds: - Fix possible memory hotplug failure with KASLR - Fix FFR value in SVE kselftest - Fix backtraces reported in /proc/$pid/stack - Disable broken CnP implementation on NVIDIA Carmel - Typo fixes and ACPI documentation clarification - Fix some W=1 warnings" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: kernel: disable CNP on Carmel arm64/process.c: fix Wmissing-prototypes build warnings kselftest/arm64: sve: Do not use non-canonical FFR register value arm64: mm: correct the inside linear map range during hotplug check arm64: kdump: update ppos when reading elfcorehdr arm64: cpuinfo: Fix a typo Documentation: arm64/acpi : clarify arm64 support of IBFT arm64: stacktrace: don't trace arch_stack_walk() arm64: csum: cast to the proper type
-rw-r--r--Documentation/arm64/acpi_object_usage.rst10
-rw-r--r--Documentation/arm64/silicon-errata.rst3
-rw-r--r--arch/arm64/Kconfig10
-rw-r--r--arch/arm64/include/asm/checksum.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/kernel/cpu_errata.c8
-rw-r--r--arch/arm64/kernel/cpufeature.c5
-rw-r--r--arch/arm64/kernel/cpuinfo.c2
-rw-r--r--arch/arm64/kernel/crash_dump.c2
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c9
-rw-r--r--arch/arm64/mm/mmu.c21
-rw-r--r--tools/testing/selftests/arm64/fp/sve-test.S22
15 files changed, 83 insertions, 20 deletions
diff --git a/Documentation/arm64/acpi_object_usage.rst b/Documentation/arm64/acpi_object_usage.rst
index 377e9d224db0..0609da73970b 100644
--- a/Documentation/arm64/acpi_object_usage.rst
+++ b/Documentation/arm64/acpi_object_usage.rst
@@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories:
- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
- - Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT,
- MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO,
- TCPA, TPM2, UEFI, XENV
+ - Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
+ IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
+ STAO, TCPA, TPM2, UEFI, XENV
- - Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT,
- MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
+ - Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
+ PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
====== ========================================================================
Table Usage for ARMv8 Linux
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 719510247292..d410a47ffa57 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -130,6 +130,9 @@ stable kernels.
| Marvell | ARM-MMU-500 | #582743 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
+| NVIDIA | Carmel Core | N/A | NVIDIA_CARMEL_CNP_ERRATUM |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5656e7aacd69..e4e1b6550115 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
If unsure, say Y.
+config NVIDIA_CARMEL_CNP_ERRATUM
+ bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
+ default y
+ help
+ If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
+ invalidate shared TLB entries installed by a different core, as it would
+ on standard ARM cores.
+
+ If unsure, say Y.
+
config SOCIONEXT_SYNQUACER_PREITS
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
default y
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index 93a161b3bf3f..dc52b733675d 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
- return csum_fold((__force u32)(sum >> 32));
+ return csum_fold((__force __wsum)(sum >> 32));
}
#define ip_fast_csum ip_fast_csum
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index b77d997b173b..c40f2490cd7b 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -66,7 +66,8 @@
#define ARM64_WORKAROUND_1508412 58
#define ARM64_HAS_LDAPR 59
#define ARM64_KVM_PROTECTED_MODE 60
+#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
-#define ARM64_NCAPS 61
+#define ARM64_NCAPS 62
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ca2cd75d3286..efc10e9041a0 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
+asmlinkage void arm64_preempt_schedule_irq(void);
+
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 9f4e3b266f21..6623c99f0984 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst,
+ struct task_struct *src);
#endif
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 506a1cd37973..e2c20c036442 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -526,6 +526,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1, 0),
},
#endif
+#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
+ {
+ /* NVIDIA Carmel */
+ .desc = "NVIDIA Carmel CNP erratum",
+ .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 066030717a4c..2a5d9854d664 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1321,7 +1321,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
* may share TLB entries with a CPU stuck in the crashed
* kernel.
*/
- if (is_kdump_kernel())
+ if (is_kdump_kernel())
+ return false;
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
return false;
return has_cpuid_feature(entry, scope);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 77605aec25fe..51fcf99d5351 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
* with the CLIDR_EL1 fields to avoid triggering false warnings
* when there is a mismatch across the CPUs. Keep track of the
* effective value of the CTR_EL0 in our internal records for
- * acurate sanity check and feature enablement.
+ * accurate sanity check and feature enablement.
*/
info->reg_ctr = read_cpuid_effective_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0);
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index e6e284265f19..58303a9ec32c 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
+ *ppos += count;
+
return count;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 325c83b1a24d..6e60aa3b5ea9 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -57,6 +57,8 @@
#include <asm/processor.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
+#include <asm/switch_to.h>
+#include <asm/system_misc.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index ad20981dfda4..d55bdfb7789c 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
#ifdef CONFIG_STACKTRACE
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
- struct task_struct *task, struct pt_regs *regs)
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
{
struct stackframe frame;
@@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
start_backtrace(&frame, regs->regs[29], regs->pc);
else if (task == current)
start_backtrace(&frame,
- (unsigned long)__builtin_frame_address(0),
- (unsigned long)arch_stack_walk);
+ (unsigned long)__builtin_frame_address(1),
+ (unsigned long)__builtin_return_address(0));
else
start_backtrace(&frame, thread_saved_fp(task),
thread_saved_pc(task));
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 7484ea4f6ba0..5d9550fdb9cf 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
struct range arch_get_mappable_range(void)
{
struct range mhp_range;
+ u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
+ u64 end_linear_pa = __pa(PAGE_END - 1);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ /*
+ * Check for a wrap, it is possible because of randomized linear
+ * mapping the start physical address is actually bigger than
+ * the end physical address. In this case set start to zero
+ * because [0, end_linear_pa] range must still be able to cover
+ * all addressable physical addresses.
+ */
+ if (start_linear_pa > end_linear_pa)
+ start_linear_pa = 0;
+ }
+
+ WARN_ON(start_linear_pa > end_linear_pa);
/*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
@@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void)
* range which can be mapped inside this linear mapping range, must
* also be derived from its end points.
*/
- mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
- mhp_range.end = __pa(PAGE_END - 1);
+ mhp_range.start = start_linear_pa;
+ mhp_range.end = end_linear_pa;
+
return mhp_range;
}
diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S
index 9210691aa998..e3e08d9c7020 100644
--- a/tools/testing/selftests/arm64/fp/sve-test.S
+++ b/tools/testing/selftests/arm64/fp/sve-test.S
@@ -284,16 +284,28 @@ endfunction
// Set up test pattern in the FFR
// x0: pid
// x2: generation
+//
+// We need to generate a canonical FFR value, which consists of a number of
+// low "1" bits, followed by a number of zeros. This gives us 17 unique values
+// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
+// generation, and use that as the initial number of ones in the pattern.
+// We fill the upper lanes of FFR with zeros.
// Beware: corrupts P0.
function setup_ffr
mov x4, x30
- bl pattern
+ and w0, w0, #0x3
+ bfi w0, w2, #2, #2
+ mov w1, #1
+ lsl w1, w1, w0
+ sub w1, w1, #1
+
ldr x0, =ffrref
- ldr x1, =scratch
- rdvl x2, #1
- lsr x2, x2, #3
- bl memcpy
+ strh w1, [x0], 2
+ rdvl x1, #1
+ lsr x1, x1, #3
+ sub x1, x1, #2
+ bl memclr
mov x0, #0
ldr x1, =ffrref