summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry-fpsimd.S
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2021-10-19 18:22:09 +0100
committerWill Deacon <will@kernel.org>2021-10-21 10:18:17 +0100
commit9f5848665788a0f07bc175cb2cdd06d367b7556e (patch)
tree1dac56919c1e02594e369ce832f8c4ce1447a136 /arch/arm64/kernel/entry-fpsimd.S
parent12cc2352bfb34dbdf97e51b006c32a8bd0d13bcb (diff)
downloadlinux-9f5848665788a0f07bc175cb2cdd06d367b7556e.tar.bz2
arm64/sve: Make access to FFR optional
SME introduces streaming SVE mode in which FFR is not present and the instructions for accessing it UNDEF. In preparation for handling this update the low level SVE state access functions to take a flag specifying if FFR should be handled. When saving the register state we store a zero for FFR to guard against uninitialized data being read. No behaviour change should be introduced by this patch. Signed-off-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20211019172247.3045838-5-broonie@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/entry-fpsimd.S')
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S19
1 files changed, 12 insertions, 7 deletions
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index afbf7dc47e1d..f588c214d44b 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -38,9 +38,10 @@ SYM_FUNC_END(fpsimd_load_state)
*
* x0 - pointer to buffer for state
* x1 - pointer to storage for FPSR
+ * x2 - Save FFR if non-zero
*/
SYM_FUNC_START(sve_save_state)
- sve_save 0, x1, 2
+ sve_save 0, x1, x2, 3
ret
SYM_FUNC_END(sve_save_state)
@@ -49,10 +50,11 @@ SYM_FUNC_END(sve_save_state)
*
* x0 - pointer to buffer for state
* x1 - pointer to storage for FPSR
- * x2 - VQ-1
+ * x2 - Restore FFR if non-zero
+ * x3 - VQ-1
*/
SYM_FUNC_START(sve_load_state)
- sve_load 0, x1, x2, 3, x4
+ sve_load 0, x1, x2, x3, 4, x5
ret
SYM_FUNC_END(sve_load_state)
@@ -72,13 +74,16 @@ SYM_FUNC_END(sve_set_vq)
* VQ must already be configured by caller, any further updates of VQ
* will need to ensure that the register state remains valid.
*
- * x0 = VQ - 1
+ * x0 = include FFR?
+ * x1 = VQ - 1
*/
SYM_FUNC_START(sve_flush_live)
- cbz x0, 1f // A VQ-1 of 0 is 128 bits so no extra Z state
+ cbz x1, 1f // A VQ-1 of 0 is 128 bits so no extra Z state
sve_flush_z
-1: sve_flush_p_ffr
- ret
+1: sve_flush_p
+ tbz x0, #0, 2f
+ sve_flush_ffr
+2: ret
SYM_FUNC_END(sve_flush_live)
#endif /* CONFIG_ARM64_SVE */