summaryrefslogtreecommitdiffstats
path: root/arch/csky/kernel/entry.S
blob: 5a5cabd076e1664bafa95848e6dc197a8f50d16b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#include <linux/linkage.h>
#include <abi/entry.h>
#include <abi/pgtable-bits.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
#include <linux/threads.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/thread_info.h>

#define PTE_INDX_MSK    0xffc
#define PTE_INDX_SHIFT  10
#define _PGDIR_SHIFT    22

.macro	zero_fp
#ifdef CONFIG_STACKTRACE
	movi	r8, 0
#endif
.endm

.macro	context_tracking
#ifdef CONFIG_CONTEXT_TRACKING
	mfcr	a0, epsr
	btsti	a0, 31
	bt	1f
	jbsr	context_tracking_user_exit
	ldw	a0, (sp, LSAVE_A0)
	ldw	a1, (sp, LSAVE_A1)
	ldw	a2, (sp, LSAVE_A2)
	ldw	a3, (sp, LSAVE_A3)
#if defined(__CSKYABIV1__)
	ldw	r6, (sp, LSAVE_A4)
	ldw	r7, (sp, LSAVE_A5)
#endif
1:
#endif
.endm

.macro tlbop_begin name, val0, val1, val2
ENTRY(csky_\name)
	mtcr    a3, ss2
	mtcr    r6, ss3
	mtcr    a2, ss4

	RD_PGDR	r6
	RD_MEH	a3
#ifdef CONFIG_CPU_HAS_TLBI
	tlbi.vaas a3
	sync.is

	btsti	a3, 31
	bf	1f
	RD_PGDR_K r6
1:
#else
	bgeni	a2, 31
	WR_MCIR	a2
	bgeni	a2, 25
	WR_MCIR	a2
#endif
	bclri   r6, 0
	lrw	a2, va_pa_offset
	ld.w	a2, (a2, 0)
	subu	r6, a2
	bseti	r6, 31

	mov     a2, a3
	lsri    a2, _PGDIR_SHIFT
	lsli    a2, 2
	addu    r6, a2
	ldw     r6, (r6)

	lrw	a2, va_pa_offset
	ld.w	a2, (a2, 0)
	subu	r6, a2
	bseti	r6, 31

	lsri    a3, PTE_INDX_SHIFT
	lrw     a2, PTE_INDX_MSK
	and     a3, a2
	addu    r6, a3
	ldw     a3, (r6)

	movi	a2, (_PAGE_PRESENT | \val0)
	and     a3, a2
	cmpne   a3, a2
	bt	\name

	/* First read/write the page, just update the flags */
	ldw     a3, (r6)
	bgeni   a2, PAGE_VALID_BIT
	bseti   a2, PAGE_ACCESSED_BIT
	bseti   a2, \val1
	bseti   a2, \val2
	or      a3, a2
	stw     a3, (r6)

	/* Some cpu tlb-hardrefill bypass the cache */
#ifdef CONFIG_CPU_NEED_TLBSYNC
	movi	a2, 0x22
	bseti	a2, 6
	mtcr	r6, cr22
	mtcr	a2, cr17
	sync
#endif

	mfcr    a3, ss2
	mfcr    r6, ss3
	mfcr    a2, ss4
	rte
\name:
	mfcr    a3, ss2
	mfcr    r6, ss3
	mfcr    a2, ss4
	SAVE_ALL 0
.endm
.macro tlbop_end is_write
	zero_fp
	context_tracking
	RD_MEH	a2
	psrset  ee, ie
	mov     a0, sp
	movi    a1, \is_write
	jbsr    do_page_fault
	jmpi    ret_from_exception
.endm

.text

tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
tlbop_end 0

tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
tlbop_end 1

tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
#ifndef CONFIG_CPU_HAS_LDSTEX
jbsr csky_cmpxchg_fixup
#endif
tlbop_end 1

ENTRY(csky_systemcall)
	SAVE_ALL TRAP0_SIZE
	zero_fp
	context_tracking
	psrset  ee, ie

	lrw     r9, __NR_syscalls
	cmphs   syscallid, r9		/* Check nr of syscall */
	bt      1f

	lrw     r9, sys_call_table
	ixw     r9, syscallid
	ldw     syscallid, (r9)
	cmpnei  syscallid, 0
	bf      ret_from_exception

	mov     r9, sp
	bmaski  r10, THREAD_SHIFT
	andn    r9, r10
	ldw     r10, (r9, TINFO_FLAGS)
	lrw	r9, _TIF_SYSCALL_WORK
	and	r10, r9
	cmpnei	r10, 0
	bt      csky_syscall_trace
#if defined(__CSKYABIV2__)
	subi    sp, 8
	stw  	r5, (sp, 0x4)
	stw  	r4, (sp, 0x0)
	jsr     syscallid                      /* Do system call */
	addi 	sp, 8
#else
	jsr     syscallid
#endif
	stw     a0, (sp, LSAVE_A0)      /* Save return value */
1:
#ifdef CONFIG_DEBUG_RSEQ
	mov	a0, sp
	jbsr	rseq_syscall
#endif
	jmpi    ret_from_exception

csky_syscall_trace:
	mov	a0, sp                  /* sp = pt_regs pointer */
	jbsr	syscall_trace_enter
	cmpnei	a0, 0
	bt	1f
	/* Prepare args before do system call */
	ldw	a0, (sp, LSAVE_A0)
	ldw	a1, (sp, LSAVE_A1)
	ldw	a2, (sp, LSAVE_A2)
	ldw	a3, (sp, LSAVE_A3)
#if defined(__CSKYABIV2__)
	subi	sp, 8
	ldw	r9, (sp, LSAVE_A4)
	stw	r9, (sp, 0x0)
	ldw	r9, (sp, LSAVE_A5)
	stw	r9, (sp, 0x4)
	jsr	syscallid                     /* Do system call */
	addi	sp, 8
#else
	ldw	r6, (sp, LSAVE_A4)
	ldw	r7, (sp, LSAVE_A5)
	jsr	syscallid                     /* Do system call */
#endif
	stw	a0, (sp, LSAVE_A0)	/* Save return value */

1:
#ifdef CONFIG_DEBUG_RSEQ
	mov	a0, sp
	jbsr	rseq_syscall
#endif
	mov     a0, sp                  /* right now, sp --> pt_regs */
	jbsr    syscall_trace_exit
	br	ret_from_exception

ENTRY(ret_from_kernel_thread)
	jbsr	schedule_tail
	mov	a0, r10
	jsr	r9
	jbsr	ret_from_exception

ENTRY(ret_from_fork)
	jbsr	schedule_tail
	mov	r9, sp
	bmaski	r10, THREAD_SHIFT
	andn	r9, r10
	ldw	r10, (r9, TINFO_FLAGS)
	lrw	r9, _TIF_SYSCALL_WORK
	and	r10, r9
	cmpnei	r10, 0
	bf	ret_from_exception
	mov	a0, sp			/* sp = pt_regs pointer */
	jbsr	syscall_trace_exit

ret_from_exception:
	psrclr	ie
	ld	r9, (sp, LSAVE_PSR)
	btsti	r9, 31

	bt	1f
	/*
	 * Load address of current->thread_info, Then get address of task_struct
	 * Get task_needreshed in task_struct
	 */
	mov	r9, sp
	bmaski	r10, THREAD_SHIFT
	andn	r9, r10

	ldw	r10, (r9, TINFO_FLAGS)
	lrw	r9, _TIF_WORK_MASK
	and	r10, r9
	cmpnei	r10, 0
	bt	exit_work
#ifdef CONFIG_CONTEXT_TRACKING
	jbsr	context_tracking_user_enter
#endif
1:
#ifdef CONFIG_PREEMPTION
	mov	r9, sp
	bmaski	r10, THREAD_SHIFT
	andn	r9, r10

	ldw	r10, (r9, TINFO_PREEMPT)
	cmpnei	r10, 0
	bt	2f
	jbsr	preempt_schedule_irq	/* irq en/disable is done inside */
2:
#endif

#ifdef CONFIG_TRACE_IRQFLAGS
	ld	r10, (sp, LSAVE_PSR)
	btsti	r10, 6
	bf	2f
	jbsr	trace_hardirqs_on
2:
#endif
	RESTORE_ALL

exit_work:
	lrw	r9, ret_from_exception
	mov	lr, r9

	btsti	r10, TIF_NEED_RESCHED
	bt	work_resched

	psrset	ie
	mov	a0, sp
	mov	a1, r10
	jmpi	do_notify_resume

work_resched:
	jmpi	schedule

ENTRY(csky_trap)
	SAVE_ALL 0
	zero_fp
	context_tracking
	psrset	ee
	mov	a0, sp                 /* Push Stack pointer arg */
	jbsr	trap_c                 /* Call C-level trap handler */
	jmpi	ret_from_exception

/*
 * Prototype from libc for abiv1:
 * register unsigned int __result asm("a0");
 * asm( "trap 3" :"=r"(__result)::);
 */
ENTRY(csky_get_tls)
	USPTOKSP

	/* increase epc for continue */
	mfcr	a0, epc
	addi	a0, TRAP0_SIZE
	mtcr	a0, epc

	/* get current task thread_info with kernel 8K stack */
	bmaski	a0, THREAD_SHIFT
	not	a0
	subi	sp, 1
	and	a0, sp
	addi	sp, 1

	/* get tls */
	ldw	a0, (a0, TINFO_TP_VALUE)

	KSPTOUSP
	rte

ENTRY(csky_irq)
	SAVE_ALL 0
	zero_fp
	context_tracking
	psrset	ee

#ifdef CONFIG_TRACE_IRQFLAGS
	jbsr	trace_hardirqs_off
#endif


	mov	a0, sp
	jbsr	csky_do_IRQ

	jmpi	ret_from_exception

/*
 * a0 =  prev task_struct *
 * a1 =  next task_struct *
 * a0 =  return next
 */
ENTRY(__switch_to)
	lrw	a3, TASK_THREAD
	addu	a3, a0

	SAVE_SWITCH_STACK

	stw	sp, (a3, THREAD_KSP)

	/* Set up next process to run */
	lrw	a3, TASK_THREAD
	addu	a3, a1

	ldw	sp, (a3, THREAD_KSP)	/* Set next kernel sp */

#if  defined(__CSKYABIV2__)
	addi	a3, a1, TASK_THREAD_INFO
	ldw	tls, (a3, TINFO_TP_VALUE)
#endif

	RESTORE_SWITCH_STACK

	rts
ENDPROC(__switch_to)