1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*/
#include "basic_asm.h"
/* POS MUST BE 16 ALIGNED! */
#define PUSH_VMX(pos,reg) \
li reg,pos; \
stvx v20,reg,%r1; \
addi reg,reg,16; \
stvx v21,reg,%r1; \
addi reg,reg,16; \
stvx v22,reg,%r1; \
addi reg,reg,16; \
stvx v23,reg,%r1; \
addi reg,reg,16; \
stvx v24,reg,%r1; \
addi reg,reg,16; \
stvx v25,reg,%r1; \
addi reg,reg,16; \
stvx v26,reg,%r1; \
addi reg,reg,16; \
stvx v27,reg,%r1; \
addi reg,reg,16; \
stvx v28,reg,%r1; \
addi reg,reg,16; \
stvx v29,reg,%r1; \
addi reg,reg,16; \
stvx v30,reg,%r1; \
addi reg,reg,16; \
stvx v31,reg,%r1;
/* POS MUST BE 16 ALIGNED! */
#define POP_VMX(pos,reg) \
li reg,pos; \
lvx v20,reg,%r1; \
addi reg,reg,16; \
lvx v21,reg,%r1; \
addi reg,reg,16; \
lvx v22,reg,%r1; \
addi reg,reg,16; \
lvx v23,reg,%r1; \
addi reg,reg,16; \
lvx v24,reg,%r1; \
addi reg,reg,16; \
lvx v25,reg,%r1; \
addi reg,reg,16; \
lvx v26,reg,%r1; \
addi reg,reg,16; \
lvx v27,reg,%r1; \
addi reg,reg,16; \
lvx v28,reg,%r1; \
addi reg,reg,16; \
lvx v29,reg,%r1; \
addi reg,reg,16; \
lvx v30,reg,%r1; \
addi reg,reg,16; \
lvx v31,reg,%r1;
/*
* Careful this will 'clobber' vmx (by design)
* Don't call this from C
*/
FUNC_START(load_vmx)
li r5,0
lvx v20,r5,r3
addi r5,r5,16
lvx v21,r5,r3
addi r5,r5,16
lvx v22,r5,r3
addi r5,r5,16
lvx v23,r5,r3
addi r5,r5,16
lvx v24,r5,r3
addi r5,r5,16
lvx v25,r5,r3
addi r5,r5,16
lvx v26,r5,r3
addi r5,r5,16
lvx v27,r5,r3
addi r5,r5,16
lvx v28,r5,r3
addi r5,r5,16
lvx v29,r5,r3
addi r5,r5,16
lvx v30,r5,r3
addi r5,r5,16
lvx v31,r5,r3
blr
FUNC_END(load_vmx)
|