summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/vdso/gettimeofday.S
blob: 76320e9209651fd307659dcbab8092ff7c1c09e2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
/*
 * Userspace implementations of gettimeofday() and friends.
 *
 * Copyright (C) 2012 ARM Limited
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 * Author: Will Deacon <will.deacon@arm.com>
 */

#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>

#define NSEC_PER_SEC_LO16	0xca00
#define NSEC_PER_SEC_HI16	0x3b9a

vdso_data	.req	x6
seqcnt		.req	w7
w_tmp		.req	w8
x_tmp		.req	x8

/*
 * Conventions for macro arguments:
 * - An argument is write-only if its name starts with "res".
 * - All other arguments are read-only, unless otherwise specified.
 */

	.macro	seqcnt_acquire
9999:	ldr	seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
	tbnz	seqcnt, #0, 9999b
	dmb	ishld
	.endm

	.macro	seqcnt_check fail
	dmb	ishld
	ldr	w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
	cmp	w_tmp, seqcnt
	b.ne	\fail
	.endm

	.macro	syscall_check fail
	ldr	w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
	cbnz	w_tmp, \fail
	.endm

	.macro get_nsec_per_sec res
	mov	\res, #NSEC_PER_SEC_LO16
	movk	\res, #NSEC_PER_SEC_HI16, lsl #16
	.endm

	/*
	 * Returns the clock delta, in nanoseconds left-shifted by the clock
	 * shift.
	 */
	.macro	get_clock_shifted_nsec res, cycle_last, mult
	/* Read the virtual counter. */
	isb
	mrs	x_tmp, cntvct_el0
	/* Calculate cycle delta and convert to ns. */
	sub	\res, x_tmp, \cycle_last
	/* We can only guarantee 56 bits of precision. */
	movn	x_tmp, #0xff00, lsl #48
	and	\res, x_tmp, \res
	mul	\res, \res, \mult
	.endm

	/*
	 * Returns in res_{sec,nsec} the REALTIME timespec, based on the
	 * "wall time" (xtime) and the clock_mono delta.
	 */
	.macro	get_ts_realtime res_sec, res_nsec, \
			clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
	add	\res_nsec, \clock_nsec, \xtime_nsec
	udiv	x_tmp, \res_nsec, \nsec_to_sec
	add	\res_sec, \xtime_sec, x_tmp
	msub	\res_nsec, x_tmp, \nsec_to_sec, \res_nsec
	.endm

	/*
	 * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
	 * used for CLOCK_MONOTONIC_RAW.
	 */
	.macro	get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
	udiv	\res_sec, \clock_nsec, \nsec_to_sec
	msub	\res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
	.endm

	/* sec and nsec are modified in place. */
	.macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
	/* Add timespec. */
	add	\sec, \sec, \ts_sec
	add	\nsec, \nsec, \ts_nsec

	/* Normalise the new timespec. */
	cmp	\nsec, \nsec_to_sec
	b.lt	9999f
	sub	\nsec, \nsec, \nsec_to_sec
	add	\sec, \sec, #1
9999:
	cmp	\nsec, #0
	b.ge	9998f
	add	\nsec, \nsec, \nsec_to_sec
	sub	\sec, \sec, #1
9998:
	.endm

	.macro clock_gettime_return, shift=0
	.if \shift == 1
	lsr	x11, x11, x12
	.endif
	stp	x10, x11, [x1, #TSPEC_TV_SEC]
	mov	x0, xzr
	ret
	.endm

	.macro jump_slot jumptable, index, label
	.if (. - \jumptable) != 4 * (\index)
	.error "Jump slot index mismatch"
	.endif
	b	\label
	.endm

	.text

/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
ENTRY(__kernel_gettimeofday)
	.cfi_startproc
	adr	vdso_data, _vdso_data
	/* If tv is NULL, skip to the timezone code. */
	cbz	x0, 2f

	/* Compute the time of day. */
1:	seqcnt_acquire
	syscall_check fail=4f
	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
	/* w11 = cs_mono_mult, w12 = cs_shift */
	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
	seqcnt_check fail=1b

	get_nsec_per_sec res=x9
	lsl	x9, x9, x12

	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
	get_ts_realtime res_sec=x10, res_nsec=x11, \
		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9

	/* Convert ns to us. */
	mov	x13, #1000
	lsl	x13, x13, x12
	udiv	x11, x11, x13
	stp	x10, x11, [x0, #TVAL_TV_SEC]
2:
	/* If tz is NULL, return 0. */
	cbz	x1, 3f
	ldp	w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
	stp	w4, w5, [x1, #TZ_MINWEST]
3:
	mov	x0, xzr
	ret
4:
	/* Syscall fallback. */
	mov	x8, #__NR_gettimeofday
	svc	#0
	ret
	.cfi_endproc
ENDPROC(__kernel_gettimeofday)

#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE

/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
ENTRY(__kernel_clock_gettime)
	.cfi_startproc
	cmp	w0, #JUMPSLOT_MAX
	b.hi	syscall
	adr	vdso_data, _vdso_data
	adr	x_tmp, jumptable
	add	x_tmp, x_tmp, w0, uxtw #2
	br	x_tmp

	ALIGN
jumptable:
	jump_slot jumptable, CLOCK_REALTIME, realtime
	jump_slot jumptable, CLOCK_MONOTONIC, monotonic
	b	syscall
	b	syscall
	jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
	jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
	jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse

	.if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
	.error	"Wrong jumptable size"
	.endif

	ALIGN
realtime:
	seqcnt_acquire
	syscall_check fail=syscall
	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
	/* w11 = cs_mono_mult, w12 = cs_shift */
	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
	seqcnt_check fail=realtime

	/* All computations are done with left-shifted nsecs. */
	get_nsec_per_sec res=x9
	lsl	x9, x9, x12

	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
	get_ts_realtime res_sec=x10, res_nsec=x11, \
		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
	clock_gettime_return, shift=1

	ALIGN
monotonic:
	seqcnt_acquire
	syscall_check fail=syscall
	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
	/* w11 = cs_mono_mult, w12 = cs_shift */
	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
	ldp	x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
	seqcnt_check fail=monotonic

	/* All computations are done with left-shifted nsecs. */
	lsl	x4, x4, x12
	get_nsec_per_sec res=x9
	lsl	x9, x9, x12

	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
	get_ts_realtime res_sec=x10, res_nsec=x11, \
		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9

	add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
	clock_gettime_return, shift=1

	ALIGN
monotonic_raw:
	seqcnt_acquire
	syscall_check fail=syscall
	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
	/* w11 = cs_raw_mult, w12 = cs_shift */
	ldp	w12, w11, [vdso_data, #VDSO_CS_SHIFT]
	ldp	x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
	seqcnt_check fail=monotonic_raw

	/* All computations are done with left-shifted nsecs. */
	get_nsec_per_sec res=x9
	lsl	x9, x9, x12

	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
	get_ts_clock_raw res_sec=x10, res_nsec=x11, \
		clock_nsec=x15, nsec_to_sec=x9

	add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
	clock_gettime_return, shift=1

	ALIGN
realtime_coarse:
	seqcnt_acquire
	ldp	x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
	seqcnt_check fail=realtime_coarse
	clock_gettime_return

	ALIGN
monotonic_coarse:
	seqcnt_acquire
	ldp	x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
	ldp	x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
	seqcnt_check fail=monotonic_coarse

	/* Computations are done in (non-shifted) nsecs. */
	get_nsec_per_sec res=x9
	add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
	clock_gettime_return

	ALIGN
syscall: /* Syscall fallback. */
	mov	x8, #__NR_clock_gettime
	svc	#0
	ret
	.cfi_endproc
ENDPROC(__kernel_clock_gettime)

/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
ENTRY(__kernel_clock_getres)
	.cfi_startproc
	cmp	w0, #CLOCK_REALTIME
	ccmp	w0, #CLOCK_MONOTONIC, #0x4, ne
	ccmp	w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
	b.ne	1f

	ldr	x2, 5f
	b	2f
1:
	cmp	w0, #CLOCK_REALTIME_COARSE
	ccmp	w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
	b.ne	4f
	ldr	x2, 6f
2:
	cbz	w1, 3f
	stp	xzr, x2, [x1]

3:	/* res == NULL. */
	mov	w0, wzr
	ret

4:	/* Syscall fallback. */
	mov	x8, #__NR_clock_getres
	svc	#0
	ret
5:
	.quad	CLOCK_REALTIME_RES
6:
	.quad	CLOCK_COARSE_RES
	.cfi_endproc
ENDPROC(__kernel_clock_getres)