summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorCyril Bur <cyrilbur@gmail.com>2016-09-23 16:18:15 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-10-04 20:10:12 +1100
commit2b4093790abdf0219f8ca192ecc4ecc63cdb5124 (patch)
treea3e438d0ae5b58047447032d0231af26044b466a /tools
parentbe4a9f56666af94eccf7993661c1a62db033bff9 (diff)
downloadlinux-2b4093790abdf0219f8ca192ecc4ecc63cdb5124.tar.bz2
selftests/powerpc: Move VMX stack frame macros to header file
Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_asm.S85
-rw-r--r--tools/testing/selftests/powerpc/vmx_asm.h96
2 files changed, 97 insertions, 84 deletions
diff --git a/tools/testing/selftests/powerpc/math/vmx_asm.S b/tools/testing/selftests/powerpc/math/vmx_asm.S
index 1b8c248b3ac1..fd74da488625 100644
--- a/tools/testing/selftests/powerpc/math/vmx_asm.S
+++ b/tools/testing/selftests/powerpc/math/vmx_asm.S
@@ -8,90 +8,7 @@
*/
#include "../basic_asm.h"
-
-# POS MUST BE 16 ALIGNED!
-#define PUSH_VMX(pos,reg) \
- li reg,pos; \
- stvx v20,reg,sp; \
- addi reg,reg,16; \
- stvx v21,reg,sp; \
- addi reg,reg,16; \
- stvx v22,reg,sp; \
- addi reg,reg,16; \
- stvx v23,reg,sp; \
- addi reg,reg,16; \
- stvx v24,reg,sp; \
- addi reg,reg,16; \
- stvx v25,reg,sp; \
- addi reg,reg,16; \
- stvx v26,reg,sp; \
- addi reg,reg,16; \
- stvx v27,reg,sp; \
- addi reg,reg,16; \
- stvx v28,reg,sp; \
- addi reg,reg,16; \
- stvx v29,reg,sp; \
- addi reg,reg,16; \
- stvx v30,reg,sp; \
- addi reg,reg,16; \
- stvx v31,reg,sp;
-
-# POS MUST BE 16 ALIGNED!
-#define POP_VMX(pos,reg) \
- li reg,pos; \
- lvx v20,reg,sp; \
- addi reg,reg,16; \
- lvx v21,reg,sp; \
- addi reg,reg,16; \
- lvx v22,reg,sp; \
- addi reg,reg,16; \
- lvx v23,reg,sp; \
- addi reg,reg,16; \
- lvx v24,reg,sp; \
- addi reg,reg,16; \
- lvx v25,reg,sp; \
- addi reg,reg,16; \
- lvx v26,reg,sp; \
- addi reg,reg,16; \
- lvx v27,reg,sp; \
- addi reg,reg,16; \
- lvx v28,reg,sp; \
- addi reg,reg,16; \
- lvx v29,reg,sp; \
- addi reg,reg,16; \
- lvx v30,reg,sp; \
- addi reg,reg,16; \
- lvx v31,reg,sp;
-
-# Carefull this will 'clobber' vmx (by design)
-# Don't call this from C
-FUNC_START(load_vmx)
- li r5,0
- lvx v20,r5,r3
- addi r5,r5,16
- lvx v21,r5,r3
- addi r5,r5,16
- lvx v22,r5,r3
- addi r5,r5,16
- lvx v23,r5,r3
- addi r5,r5,16
- lvx v24,r5,r3
- addi r5,r5,16
- lvx v25,r5,r3
- addi r5,r5,16
- lvx v26,r5,r3
- addi r5,r5,16
- lvx v27,r5,r3
- addi r5,r5,16
- lvx v28,r5,r3
- addi r5,r5,16
- lvx v29,r5,r3
- addi r5,r5,16
- lvx v30,r5,r3
- addi r5,r5,16
- lvx v31,r5,r3
- blr
-FUNC_END(load_vmx)
+#include "../vmx_asm.h"
# Should be safe from C, only touches r4, r5 and v0,v1,v2
FUNC_START(check_vmx)
diff --git a/tools/testing/selftests/powerpc/vmx_asm.h b/tools/testing/selftests/powerpc/vmx_asm.h
new file mode 100644
index 000000000000..2eaaeca9cf1d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vmx_asm.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "basic_asm.h"
+
+/* POS MUST BE 16 ALIGNED! */
+#define PUSH_VMX(pos,reg) \
+ li reg,pos; \
+ stvx v20,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v21,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v22,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v23,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v24,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v25,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v26,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v27,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v28,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v29,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v30,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v31,reg,%r1;
+
+/* POS MUST BE 16 ALIGNED! */
+#define POP_VMX(pos,reg) \
+ li reg,pos; \
+ lvx v20,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v21,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v22,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v23,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v24,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v25,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v26,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v27,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v28,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v29,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v30,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v31,reg,%r1;
+
+/*
+ * Careful this will 'clobber' vmx (by design)
+ * Don't call this from C
+ */
+FUNC_START(load_vmx)
+ li r5,0
+ lvx v20,r5,r3
+ addi r5,r5,16
+ lvx v21,r5,r3
+ addi r5,r5,16
+ lvx v22,r5,r3
+ addi r5,r5,16
+ lvx v23,r5,r3
+ addi r5,r5,16
+ lvx v24,r5,r3
+ addi r5,r5,16
+ lvx v25,r5,r3
+ addi r5,r5,16
+ lvx v26,r5,r3
+ addi r5,r5,16
+ lvx v27,r5,r3
+ addi r5,r5,16
+ lvx v28,r5,r3
+ addi r5,r5,16
+ lvx v29,r5,r3
+ addi r5,r5,16
+ lvx v30,r5,r3
+ addi r5,r5,16
+ lvx v31,r5,r3
+ blr
+FUNC_END(load_vmx)