1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
|
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
* Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
* Adapted for Power Macintosh by Paul Mackerras.
* Low-level exception handlers and MMU support
* rewritten by Paul Mackerras.
* Copyright (C) 1996 Paul Mackerras.
* MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
*
* This file contains the system call entry code, context switch
* code, and exception/interrupt return code for PowerPC.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/sys.h>
#include <linux/threads.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/ptrace.h>
/*
* MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
*/
#if MSR_KERNEL >= 0x10000
#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
#else
#define LOAD_MSR_KERNEL(r, x) li r,(x)
#endif
#ifdef CONFIG_BOOKE
.globl mcheck_transfer_to_handler
mcheck_transfer_to_handler:
mfspr r0,SPRN_DSRR0
stw r0,_DSRR0(r11)
mfspr r0,SPRN_DSRR1
stw r0,_DSRR1(r11)
/* fall through */
.globl debug_transfer_to_handler
debug_transfer_to_handler:
mfspr r0,SPRN_CSRR0
stw r0,_CSRR0(r11)
mfspr r0,SPRN_CSRR1
stw r0,_CSRR1(r11)
/* fall through */
.globl crit_transfer_to_handler
crit_transfer_to_handler:
#ifdef CONFIG_PPC_BOOK3E_MMU
mfspr r0,SPRN_MAS0
stw r0,MAS0(r11)
mfspr r0,SPRN_MAS1
stw r0,MAS1(r11)
mfspr r0,SPRN_MAS2
stw r0,MAS2(r11)
mfspr r0,SPRN_MAS3
stw r0,MAS3(r11)
mfspr r0,SPRN_MAS6
stw r0,MAS6(r11)
#ifdef CONFIG_PHYS_64BIT
mfspr r0,SPRN_MAS7
stw r0,MAS7(r11)
#endif /* CONFIG_PHYS_64BIT */
#endif /* CONFIG_PPC_BOOK3E_MMU */
#ifdef CONFIG_44x
mfspr r0,SPRN_MMUCR
stw r0,MMUCR(r11)
#endif
mfspr r0,SPRN_SRR0
stw r0,_SRR0(r11)
mfspr r0,SPRN_SRR1
stw r0,_SRR1(r11)
/* set the stack limit to the current stack
* and set the limit to protect the thread_info
* struct
*/
mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8)
stw r0,SAVED_KSP_LIMIT(r11)
rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
#ifdef CONFIG_40x
.globl crit_transfer_to_handler
crit_transfer_to_handler:
lwz r0,crit_r10@l(0)
stw r0,GPR10(r11)
lwz r0,crit_r11@l(0)
stw r0,GPR11(r11)
mfspr r0,SPRN_SRR0
stw r0,crit_srr0@l(0)
mfspr r0,SPRN_SRR1
stw r0,crit_srr1@l(0)
/* set the stack limit to the current stack
* and set the limit to protect the thread_info
* struct
*/
mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8)
stw r0,saved_ksp_limit@l(0)
rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
* on address translation.
* Note that we rely on the caller having set cr0.eq iff the exception
* occurred in kernel mode (i.e. MSR:PR = 0).
*/
.globl transfer_to_handler_full
transfer_to_handler_full:
SAVE_NVGPRS(r11)
/* fall through */
.globl transfer_to_handler
transfer_to_handler:
stw r2,GPR2(r11)
stw r12,_NIP(r11)
stw r9,_MSR(r11)
andi. r2,r9,MSR_PR
mfctr r12
mfspr r2,SPRN_XER
stw r12,_CTR(r11)
stw r2,_XER(r11)
mfspr r12,SPRN_SPRG_THREAD
addi r2,r12,-THREAD
tovirt(r2,r2) /* set r2 to current */
beq 2f /* if from user, fix up THREAD.regs */
addi r11,r1,STACK_FRAME_OVERHEAD
stw r11,PT_REGS(r12)
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
/* Check to see if the dbcr0 register is set up to debug. Use the
internal debug mode bit to do this. */
lwz r12,THREAD_DBCR0(r12)
andis. r12,r12,DBCR0_IDM@h
beq+ 3f
/* From user and task is ptraced - load up global dbcr0 */
li r12,-1 /* clear all pending debug events */
mtspr SPRN_DBSR,r12
lis r11,global_dbcr0@ha
tophys(r11,r11)
addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_CPU(r9)
slwi r9,r9,3
add r11,r11,r9
#endif
lwz r12,0(r11)
mtspr SPRN_DBCR0,r12
lwz r12,4(r11)
addi r12,r12,-1
stw r12,4(r11)
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
CURRENT_THREAD_INFO(r9, r1)
tophys(r9, r9)
ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
#endif
b 3f
2: /* if from kernel, check interrupted DOZE/NAP mode and
* check for stack overflow
*/
lwz r9,KSP_LIMIT(r12)
cmplw r1,r9 /* if r1 <= ksp_limit */
ble- stack_ovf /* then the kernel stack overflowed */
5:
#if defined(CONFIG_6xx) || defined(CONFIG_E500)
CURRENT_THREAD_INFO(r9, r1)
tophys(r9,r9) /* check local flags */
lwz r12,TI_LOCAL_FLAGS(r9)
mtcrf 0x01,r12
bt- 31-TLF_NAPPING,4f
bt- 31-TLF_SLEEPING,7f
#endif /* CONFIG_6xx || CONFIG_E500 */
.globl transfer_to_handler_cont
transfer_to_handler_cont:
3:
mflr r9
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
#ifdef CONFIG_TRACE_IRQFLAGS
lis r12,reenable_mmu@h
ori r12,r12,reenable_mmu@l
mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r10
SYNC
RFI
reenable_mmu: /* re-enable mmu so we can */
mfmsr r10
lwz r12,_MSR(r1)
xor r10,r10,r12
andi. r10,r10,MSR_EE /* Did EE change? */
beq 1f
/*
* The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
* If from user mode there is only one stack frame on the stack, and
* accessing CALLER_ADDR1 will cause oops. So we need create a dummy
* stack frame to make trace_hardirqs_off happy.
*
* This is handy because we also need to save a bunch of GPRs,
* r3 can be different from GPR3(r1) at this point, r9 and r11
* contains the old MSR and handler address respectively,
* r4 & r5 can contain page fault arguments that need to be passed
* along as well. r12, CCR, CTR, XER etc... are left clobbered as
* they aren't useful past this point (aren't syscall arguments),
* the rest is restored from the exception frame.
*/
stwu r1,-32(r1)
stw r9,8(r1)
stw r11,12(r1)
stw r3,16(r1)
stw r4,20(r1)
stw r5,24(r1)
bl trace_hardirqs_off
lwz r5,24(r1)
lwz r4,20(r1)
lwz r3,16(r1)
lwz r11,12(r1)
lwz r9,8(r1)
addi r1,r1,32
lwz r0,GPR0(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
1: mtctr r11
mtlr r9
bctr /* jump to handler */
#else /* CONFIG_TRACE_IRQFLAGS */
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined (CONFIG_6xx) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING
stw r12,TI_LOCAL_FLAGS(r9)
b power_save_ppc32_restore
7: rlwinm r12,r12,0,~_TLF_SLEEPING
stw r12,TI_LOCAL_FLAGS(r9)
lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
rlwinm r9,r9,0,~MSR_EE
lwz r12,_LINK(r11) /* and return to address in LR */
b fast_exception_return
#endif
/*
* On kernel stack overflow, load up an initial stack pointer
* and call StackOverflow(regs), which should not return.
*/
stack_ovf:
/* sometimes we use a statically-allocated stack, which is OK. */
lis r12,_end@h
ori r12,r12,_end@l
cmplw r1,r12
ble 5b /* r1 <= &_end is OK */
SAVE_NVGPRS(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
lis r9,StackOverflow@ha
addi r9,r9,StackOverflow@l
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
FIX_SRR1(r10,r12)
mtspr SPRN_SRR0,r9
mtspr SPRN_SRR1,r10
SYNC
RFI
/*
* Handle a system call.
*/
.stabs "arch/powerpc/kernel/",N_SO,0,0,0f
.stabs "entry_32.S",N_SO,0,0,0f
0:
_GLOBAL(DoSyscall)
stw r3,ORIG_GPR3(r1)
li r12,0
stw r12,RESULT(r1)
lwz r11,_CCR(r1) /* Clear SO bit in CR */
rlwinm r11,r11,0,4,2
stw r11,_CCR(r1)
#ifdef CONFIG_TRACE_IRQFLAGS
/* Return from syscalls can (and generally will) hard enable
* interrupts. You aren't supposed to call a syscall with
* interrupts disabled in the first place. However, to ensure
* that we get it right vs. lockdep if it happens, we force
* that hard enable here with appropriate tracing if we see
* that we have been called with interrupts off
*/
mfmsr r11
andi. r12,r11,MSR_EE
bne+ 1f
/* We came in with interrupts disabled, we enable them now */
bl trace_hardirqs_on
mfmsr r11
lwz r0,GPR0(r1)
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
ori r11,r11,MSR_EE
lwz r5,GPR5(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
mtmsr r11
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
CURRENT_THREAD_INFO(r10, r1)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_DOTRACE
bne- syscall_dotrace
syscall_dotrace_cont:
cmplwi 0,r0,NR_syscalls
lis r10,sys_call_table@h
ori r10,r10,sys_call_table@l
slwi r0,r0,2
bge- 66f
lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
mtlr r10
addi r9,r1,STACK_FRAME_OVERHEAD
PPC440EP_ERR42
blrl /* Call handler */
.globl ret_from_syscall
ret_from_syscall:
mr r6,r3
CURRENT_THREAD_INFO(r12, r1)
/* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
/* Note: We don't bother telling lockdep about it */
SYNC
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
li r8,-MAX_ERRNO
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- syscall_exit_work
cmplw 0,r3,r8
blt+ syscall_exit_cont
lwz r11,_CCR(r1) /* Load CR */
neg r3,r3
oris r11,r11,0x1000 /* Set SO bit in CR */
stw r11,_CCR(r1)
syscall_exit_cont:
lwz r8,_MSR(r1)
#ifdef CONFIG_TRACE_IRQFLAGS
/* If we are going to return from the syscall with interrupts
* off, we trace that here. It shouldn't happen though but we
* want to catch the bugger if it does right ?
*/
andi. r10,r8,MSR_EE
bne+ 1f
stw r3,GPR3(r1)
bl trace_hardirqs_off
lwz r3,GPR3(r1)
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up. The internal
debug mode bit tells us that dbcr0 should be loaded. */
lwz r0,THREAD+THREAD_DBCR0(r2)
andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
#ifdef CONFIG_44x
BEGIN_MMU_FTR_SECTION
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
bne- 2f
1:
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
#endif /* CONFIG_44x */
BEGIN_FTR_SECTION
lwarx r7,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
andi. r4,r8,MSR_PR
beq 3f
CURRENT_THREAD_INFO(r4, r1)
ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
3:
#endif
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
mtcr r5
lwz r7,_NIP(r1)
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8
SYNC
RFI
#ifdef CONFIG_44x
2: li r7,0
iccci r0,r0
stw r7,icache_44x_need_flush@l(r4)
b 1b
#endif /* CONFIG_44x */
66: li r3,-ENOSYS
b ret_from_syscall
.globl ret_from_fork
ret_from_fork:
REST_NVGPRS(r1)
bl schedule_tail
li r3,0
b ret_from_syscall
.globl ret_from_kernel_thread
ret_from_kernel_thread:
REST_NVGPRS(r1)
bl schedule_tail
mtlr r14
mr r3,r15
PPC440EP_ERR42
blrl
li r3,0
b ret_from_syscall
/* Traced system call support */
syscall_dotrace:
SAVE_NVGPRS(r1)
li r0,0xc00
stw r0,_TRAP(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_enter
/*
* Restore argument registers possibly just changed.
* We use the return value of do_syscall_trace_enter
* for call number to look up in the table (r0).
*/
mr r0,r3
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
lwz r5,GPR5(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
REST_NVGPRS(r1)
cmplwi r0,NR_syscalls
/* Return code is already in r3 thanks to do_syscall_trace_enter() */
bge- ret_from_syscall
b syscall_dotrace_cont
syscall_exit_work:
andi. r0,r9,_TIF_RESTOREALL
beq+ 0f
REST_NVGPRS(r1)
b 2f
0: cmplw 0,r3,r8
blt+ 1f
andi. r0,r9,_TIF_NOERROR
bne- 1f
lwz r11,_CCR(r1) /* Load CR */
neg r3,r3
oris r11,r11,0x1000 /* Set SO bit in CR */
stw r11,_CCR(r1)
1: stw r6,RESULT(r1) /* Save result */
stw r3,GPR3(r1) /* Update return value */
2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
beq 4f
/* Clear per-syscall TIF flags if any are set. */
li r11,_TIF_PERSYSCALL_MASK
addi r12,r12,TI_FLAGS
3: lwarx r8,0,r12
andc r8,r8,r11
#ifdef CONFIG_IBM405_ERR77
dcbt 0,r12
#endif
stwcx. r8,0,r12
bne- 3b
subi r12,r12,TI_FLAGS
4: /* Anything which requires enabling interrupts? */
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
beq ret_from_except
/* Re-enable interrupts. There is no need to trace that with
* lockdep as we are supposed to have IRQs on at this point
*/
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10)
/* Save NVGPRS if they're not saved already */
lwz r4,_TRAP(r1)
andi. r4,r4,1
beq 5f
SAVE_NVGPRS(r1)
li r4,0xc00
stw r4,_TRAP(r1)
5:
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_leave
b ret_from_except_full
/*
* The fork/clone functions need to copy the full register set into
* the child process. Therefore we need to save all the nonvolatile
* registers (r13 - r31) before calling the C code.
*/
.globl ppc_fork
ppc_fork:
SAVE_NVGPRS(r1)
lwz r0,_TRAP(r1)
rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
stw r0,_TRAP(r1) /* register set saved */
b sys_fork
.globl ppc_vfork
ppc_vfork:
SAVE_NVGPRS(r1)
lwz r0,_TRAP(r1)
rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
stw r0,_TRAP(r1) /* register set saved */
b sys_vfork
.globl ppc_clone
ppc_clone:
SAVE_NVGPRS(r1)
lwz r0,_TRAP(r1)
rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
stw r0,_TRAP(r1) /* register set saved */
b sys_clone
.globl ppc_swapcontext
ppc_swapcontext:
SAVE_NVGPRS(r1)
lwz r0,_TRAP(r1)
rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
stw r0,_TRAP(r1) /* register set saved */
b sys_swapcontext
/*
* Top-level page fault handling.
* This is in assembler because if do_page_fault tells us that
* it is a bad kernel page fault, we want to save the non-volatile
* registers before calling bad_page_fault.
*/
.globl handle_page_fault
handle_page_fault:
stw r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
cmpwi r3,0
beq+ ret_from_except
SAVE_NVGPRS(r1)
lwz r0,_TRAP(r1)
clrrwi r0,r0,1
stw r0,_TRAP(r1)
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
lwz r4,_DAR(r1)
bl bad_page_fault
b ret_from_except_full
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
* of the other is restored from its kernel stack. The memory
* management hardware is updated to the second process's state.
* Finally, we can return to the second process.
* On entry, r3 points to the THREAD for the current task, r4
* points to the THREAD for the new task.
*
* This routine is always called with interrupts disabled.
*
* Note: there are two ways to get to the "going out" portion
* of this code; either by coming in via the entry (_switch)
* or via "fork" which must set up an environment equivalent
* to the "_switch" path. If you change this , you'll have to
* change the fork code also.
*
* The code which creates the new task context is in 'copy_thread'
* in arch/ppc/kernel/process.c
*/
_GLOBAL(_switch)
stwu r1,-INT_FRAME_SIZE(r1)
mflr r0
stw r0,INT_FRAME_SIZE+4(r1)
/* r3-r12 are caller saved -- Cort */
SAVE_NVGPRS(r1)
stw r0,_NIP(r1) /* Return to switch caller */
mfmsr r11
li r0,MSR_FP /* Disable floating-point */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris r0,r0,MSR_VEC@h /* Disable altivec */
mfspr r12,SPRN_VRSAVE /* save vrsave register value */
stw r12,THREAD+THREAD_VRSAVE(r2)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
BEGIN_FTR_SECTION
oris r0,r0,MSR_SPE@h /* Disable SPE */
mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
stw r12,THREAD+THREAD_SPEFSCR(r2)
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
and. r0,r0,r11 /* FP or altivec or SPE enabled? */
beq+ 1f
andc r11,r11,r0
MTMSRD(r11)
isync
1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
* stores it has performed on this one.
*/
sync
#endif /* CONFIG_SMP */
tophys(r0,r4)
CLR_TOP32(r0)
mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
lwz r1,KSP(r4) /* Load new stack pointer */
/* save the old current 'last' for return value */
mr r3,r2
addi r2,r4,-THREAD /* Update current */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
lwz r0,THREAD+THREAD_VRSAVE(r2)
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
BEGIN_FTR_SECTION
lwz r0,THREAD+THREAD_SPEFSCR(r2)
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */
REST_NVGPRS(r1)
lwz r4,_NIP(r1) /* Return to _switch caller in new task */
mtlr r4
addi r1,r1,INT_FRAME_SIZE
blr
.globl fast_exception_return
fast_exception_return:
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
andi. r10,r9,MSR_RI /* check for recoverable interrupt */
beq 1f /* if not, we've got problems */
#endif
2: REST_4GPRS(3, r11)
lwz r10,_CCR(r11)
REST_GPR(1, r11)
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
REST_GPR(10, r11)
mtspr SPRN_SRR1,r9
mtspr SPRN_SRR0,r12
REST_GPR(9, r11)
REST_GPR(12, r11)
lwz r11,GPR11(r11)
SYNC
RFI
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/* check if the exception happened in a restartable section */
1: lis r3,exc_exit_restart_end@ha
addi r3,r3,exc_exit_restart_end@l
cmplw r12,r3
bge 3f
lis r4,exc_exit_restart@ha
addi r4,r4,exc_exit_restart@l
cmplw r12,r4
blt 3f
lis r3,fee_restarts@ha
tophys(r3,r3)
lwz r5,fee_restarts@l(r3)
addi r5,r5,1
stw r5,fee_restarts@l(r3)
mr r12,r4 /* restart at exc_exit_restart */
b 2b
.section .bss
.align 2
fee_restarts:
.space 4
.previous
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
/* but the 601 doesn't implement the RI bit, so assume it's OK */
3:
BEGIN_FTR_SECTION
b 2b
END_FTR_SECTION_IFSET(CPU_FTR_601)
li r10,-1
stw r10,_TRAP(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
lis r10,MSR_KERNEL@h
ori r10,r10,MSR_KERNEL@l
bl transfer_to_handler_full
.long nonrecoverable_exception
.long ret_from_except
#endif
.globl ret_from_except_full
ret_from_except_full:
REST_NVGPRS(r1)
/* fall through */
.globl ret_from_except
ret_from_except:
/* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt. */
/* Note: We don't bother telling lockdep about it */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC /* Some chip revs have problems here... */
MTMSRD(r10) /* disable interrupts */
lwz r3,_MSR(r1) /* Returning to user mode? */
andi. r0,r3,MSR_PR
beq resume_kernel
user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_USER_WORK_MASK
bne do_work
restore_user:
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* Check whether this process has its own DBCR0 value. The internal
debug mode bit tells us that dbcr0 should be loaded. */
lwz r0,THREAD+THREAD_DBCR0(r2)
andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
CURRENT_THREAD_INFO(r9, r1)
ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
#endif
b restore
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel:
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_FLAGS(r9)
andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
beq+ 1f
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
lwz r3,GPR1(r1)
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
mr r4,r1 /* src: current exception frame */
mr r1,r3 /* Reroute the trampoline frame to r1 */
/* Copy from the original to the trampoline. */
li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
li r6,0 /* start offset: 0 */
mtctr r5
2: lwzx r0,r6,r4
stwx r0,r6,r3
addi r6,r6,4
bdnz 2b
/* Do real store operation to complete stwu */
lwz r5,GPR1(r1)
stw r8,0(r5)
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS
0: lwarx r8,0,r5
andc r8,r8,r11
#ifdef CONFIG_IBM405_ERR77
dcbt 0,r5
#endif
stwcx. r8,0,r5
bne- 0b
1:
#ifdef CONFIG_PREEMPT
/* check current_thread_info->preempt_count */
lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
beq+ restore
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
#ifdef CONFIG_TRACE_IRQFLAGS
/* Lockdep thinks irqs are enabled, we need to call
* preempt_schedule_irq with IRQs off, so we inform lockdep
* now that we -did- turn them off already
*/
bl trace_hardirqs_off
#endif
1: bl preempt_schedule_irq
CURRENT_THREAD_INFO(r9, r1)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
*/
bl trace_hardirqs_on
#endif
#endif /* CONFIG_PREEMPT */
/* interrupts are hard-disabled at this point */
restore:
#ifdef CONFIG_44x
BEGIN_MMU_FTR_SECTION
b 1f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
beq+ 1f
li r6,0
iccci r0,r0
stw r6,icache_44x_need_flush@l(r4)
1:
#endif /* CONFIG_44x */
lwz r9,_MSR(r1)
#ifdef CONFIG_TRACE_IRQFLAGS
/* Lockdep doesn't know about the fact that IRQs are temporarily turned
* off in this assembly code while peeking at TI_FLAGS() and such. However
* we need to inform it if the exception turned interrupts off, and we
* are about to trun them back on.
*
* The problem here sadly is that we don't know whether the exceptions was
* one that turned interrupts off or not. So we always tell lockdep about
* turning them on here when we go back to wherever we came from with EE
* on, even if that may meen some redudant calls being tracked. Maybe later
* we could encode what the exception did somewhere or test the exception
* type in the pt_regs but that sounds overkill
*/
andi. r10,r9,MSR_EE
beq 1f
/*
* Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
* which is the stack frame here, we need to force a stack frame
* in case we came from user space.
*/
stwu r1,-32(r1)
mflr r0
stw r0,4(r1)
stwu r1,-32(r1)
bl trace_hardirqs_on
lwz r1,0(r1)
lwz r1,0(r1)
lwz r9,_MSR(r1)
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
REST_2GPRS(7, r1)
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
mtspr SPRN_XER,r10
mtctr r11
PPC405_ERR77(0,r1)
BEGIN_FTR_SECTION
lwarx r11,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
lwz r10,_CCR(r1)
lwz r11,_LINK(r1)
mtcrf 0xFF,r10
mtlr r11
/*
* Once we put values in SRR0 and SRR1, we are in a state
* where exceptions are not recoverable, since taking an
* exception will trash SRR0 and SRR1. Therefore we clear the
* MSR:RI bit to indicate this. If we do take an exception,
* we can't return to the point of the exception but we
* can restart the exception exit path at the label
* exc_exit_restart below. -- paulus
*/
LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
SYNC
MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart
exc_exit_restart:
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r9
REST_4GPRS(9, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
exc_exit_restart_end:
SYNC
RFI
#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
/*
* This is a bit different on 4xx/Book-E because it doesn't have
* the RI bit in the MSR.
* The TLB miss handler checks if we have interrupted
* the exception exit path and restarts it if so
* (well maybe one day it will... :).
*/
lwz r11,_LINK(r1)
mtlr r11
lwz r10,_CCR(r1)
mtcrf 0xff,r10
REST_2GPRS(9, r1)
.globl exc_exit_restart
exc_exit_restart:
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
exc_exit_start:
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
REST_2GPRS(11, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
exc_exit_restart_end:
PPC405_ERR77_SYNC
rfi
b . /* prevent prefetch past rfi */
/*
* Returning from a critical interrupt in user mode doesn't need
* to be any different from a normal exception. For a critical
* interrupt in the kernel, we just return (without checking for
* preemption) since the interrupt may have happened at some crucial
* place (e.g. inside the TLB miss handler), and because we will be
* running with r1 pointing into critical_stack, not the current
* process's kernel stack (and therefore current_thread_info() will
* give the wrong answer).
* We have to restore various SPRs that may have been in use at the
* time of the critical interrupt.
*
*/
#ifdef CONFIG_40x
#define PPC_40x_TURN_OFF_MSR_DR \
/* avoid any possible TLB misses here by turning off MSR.DR, we \
* assume the instructions here are mapped by a pinned TLB entry */ \
li r10,MSR_IR; \
mtmsr r10; \
isync; \
tophys(r1, r1);
#else
#define PPC_40x_TURN_OFF_MSR_DR
#endif
#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
REST_NVGPRS(r1); \
lwz r3,_MSR(r1); \
andi. r3,r3,MSR_PR; \
LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
bne user_exc_return; \
lwz r0,GPR0(r1); \
lwz r2,GPR2(r1); \
REST_4GPRS(3, r1); \
REST_2GPRS(7, r1); \
lwz r10,_XER(r1); \
lwz r11,_CTR(r1); \
mtspr SPRN_XER,r10; \
mtctr r11; \
PPC405_ERR77(0,r1); \
stwcx. r0,0,r1; /* to clear the reservation */ \
lwz r11,_LINK(r1); \
mtlr r11; \
lwz r10,_CCR(r1); \
mtcrf 0xff,r10; \
PPC_40x_TURN_OFF_MSR_DR; \
lwz r9,_DEAR(r1); \
lwz r10,_ESR(r1); \
mtspr SPRN_DEAR,r9; \
mtspr SPRN_ESR,r10; \
lwz r11,_NIP(r1); \
lwz r12,_MSR(r1); \
mtspr exc_lvl_srr0,r11; \
mtspr exc_lvl_srr1,r12; \
lwz r9,GPR9(r1); \
lwz r12,GPR12(r1); \
lwz r10,GPR10(r1); \
lwz r11,GPR11(r1); \
lwz r1,GPR1(r1); \
PPC405_ERR77_SYNC; \
exc_lvl_rfi; \
b .; /* prevent prefetch past exc_lvl_rfi */
#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
lwz r9,_##exc_lvl_srr0(r1); \
lwz r10,_##exc_lvl_srr1(r1); \
mtspr SPRN_##exc_lvl_srr0,r9; \
mtspr SPRN_##exc_lvl_srr1,r10;
#if defined(CONFIG_PPC_BOOK3E_MMU)
#ifdef CONFIG_PHYS_64BIT
#define RESTORE_MAS7 \
lwz r11,MAS7(r1); \
mtspr SPRN_MAS7,r11;
#else
#define RESTORE_MAS7
#endif /* CONFIG_PHYS_64BIT */
#define RESTORE_MMU_REGS \
lwz r9,MAS0(r1); \
lwz r10,MAS1(r1); \
lwz r11,MAS2(r1); \
mtspr SPRN_MAS0,r9; \
lwz r9,MAS3(r1); \
mtspr SPRN_MAS1,r10; \
lwz r10,MAS6(r1); \
mtspr SPRN_MAS2,r11; \
mtspr SPRN_MAS3,r9; \
mtspr SPRN_MAS6,r10; \
RESTORE_MAS7;
#elif defined(CONFIG_44x)
#define RESTORE_MMU_REGS \
lwz r9,MMUCR(r1); \
mtspr SPRN_MMUCR,r9;
#else
#define RESTORE_MMU_REGS
#endif
#ifdef CONFIG_40x
.globl ret_from_crit_exc
ret_from_crit_exc:
mfspr r9,SPRN_SPRG_THREAD
lis r10,saved_ksp_limit@ha;
lwz r10,saved_ksp_limit@l(r10);
tovirt(r9,r9);
stw r10,KSP_LIMIT(r9)
lis r9,crit_srr0@ha;
lwz r9,crit_srr0@l(r9);
lis r10,crit_srr1@ha;
lwz r10,crit_srr1@l(r10);
mtspr SPRN_SRR0,r9;
mtspr SPRN_SRR1,r10;
RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
#endif /* CONFIG_40x */
#ifdef CONFIG_BOOKE
.globl ret_from_crit_exc
ret_from_crit_exc:
mfspr r9,SPRN_SPRG_THREAD
lwz r10,SAVED_KSP_LIMIT(r1)
stw r10,KSP_LIMIT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
.globl ret_from_debug_exc
ret_from_debug_exc:
mfspr r9,SPRN_SPRG_THREAD
lwz r10,SAVED_KSP_LIMIT(r1)
stw r10,KSP_LIMIT(r9)
lwz r9,THREAD_INFO-THREAD(r9)
CURRENT_THREAD_INFO(r10, r1)
lwz r10,TI_PREEMPT(r10)
stw r10,TI_PREEMPT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_xSRR(CSRR0,CSRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
.globl ret_from_mcheck_exc
ret_from_mcheck_exc:
mfspr r9,SPRN_SPRG_THREAD
lwz r10,SAVED_KSP_LIMIT(r1)
stw r10,KSP_LIMIT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_xSRR(CSRR0,CSRR1);
RESTORE_xSRR(DSRR0,DSRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
#endif /* CONFIG_BOOKE */
/*
* Load the DBCR0 value for a task that is being ptraced,
* having first saved away the global DBCR0. Note that r0
* has the dbcr0 value to set upon entry to this.
*/
load_dbcr0:
mfmsr r10 /* first disable debug exceptions */
rlwinm r10,r10,0,~MSR_DE
mtmsr r10
isync
mfspr r10,SPRN_DBCR0
lis r11,global_dbcr0@ha
addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_CPU(r9)
slwi r9,r9,3
add r11,r11,r9
#endif
stw r10,0(r11)
mtspr SPRN_DBCR0,r0
lwz r10,4(r11)
addi r10,r10,1
stw r10,4(r11)
li r11,-1
mtspr SPRN_DBSR,r11 /* clear all pending debug events */
blr
.section .bss
.align 4
global_dbcr0:
.space 8*NR_CPUS
.previous
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
andi. r0,r9,_TIF_NEED_RESCHED
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
/* Note: We don't need to inform lockdep that we are enabling
* interrupts here. As far as it knows, they are already enabled
*/
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
bl schedule
recheck:
/* Note: And we don't tell it we are disabling them again
* neither. Those disable/enable cycles used to peek at
* TI_FLAGS aren't advertised.
*/
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
do_user_signal: /* r10 contains MSR_KERNEL here */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
/* save r13-r31 in the exception frame, if not already done */
lwz r3,_TRAP(r1)
andi. r0,r3,1
beq 2f
SAVE_NVGPRS(r1)
rlwinm r3,r3,0,0,30
stw r3,_TRAP(r1)
2: addi r3,r1,STACK_FRAME_OVERHEAD
mr r4,r9
bl do_notify_resume
REST_NVGPRS(r1)
b recheck
/*
* We come here when we are at the end of handling an exception
* that occurred at a place where taking an exception will lose
* state information, such as the contents of SRR0 and SRR1.
*/
nonrecoverable:
lis r10,exc_exit_restart_end@ha
addi r10,r10,exc_exit_restart_end@l
cmplw r12,r10
bge 3f
lis r11,exc_exit_restart@ha
addi r11,r11,exc_exit_restart@l
cmplw r12,r11
blt 3f
lis r10,ee_restarts@ha
lwz r12,ee_restarts@l(r10)
addi r12,r12,1
stw r12,ee_restarts@l(r10)
mr r12,r11 /* restart at exc_exit_restart */
blr
3: /* OK, we can't recover, kill this process */
/* but the 601 doesn't implement the RI bit, so assume it's OK */
BEGIN_FTR_SECTION
blr
END_FTR_SECTION_IFSET(CPU_FTR_601)
lwz r3,_TRAP(r1)
andi. r0,r3,1
beq 4f
SAVE_NVGPRS(r1)
rlwinm r3,r3,0,0,30
stw r3,_TRAP(r1)
4: addi r3,r1,STACK_FRAME_OVERHEAD
bl nonrecoverable_exception
/* shouldn't return */
b 4b
.section .bss
.align 2
ee_restarts:
.space 4
.previous
/*
* PROM code for specific machines follows. Put it
* here so it's easy to add arch-specific sections later.
* -- Cort
*/
#ifdef CONFIG_PPC_RTAS
/*
* On CHRP, the Run-Time Abstraction Services (RTAS) have to be
* called with the MMU off.
*/
_GLOBAL(enter_rtas)
stwu r1,-INT_FRAME_SIZE(r1)
mflr r0
stw r0,INT_FRAME_SIZE+4(r1)
LOAD_REG_ADDR(r4, rtas)
lis r6,1f@ha /* physical return address for rtas */
addi r6,r6,1f@l
tophys(r6,r6)
tophys(r7,r1)
lwz r8,RTASENTRY(r4)
lwz r4,RTASBASE(r4)
mfmsr r9
stw r9,8(r1)
LOAD_MSR_KERNEL(r0,MSR_KERNEL)
SYNC /* disable interrupts so SRR0/1 */
MTMSRD(r0) /* don't get trashed */
li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
mtlr r6
mtspr SPRN_SPRG_RTAS,r7
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
RFI
1: tophys(r9,r1)
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
lwz r9,8(r9) /* original msr value */
FIX_SRR1(r9,r0)
addi r1,r1,INT_FRAME_SIZE
li r0,0
mtspr SPRN_SPRG_RTAS,r0
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
RFI /* return to caller */
.globl machine_check_in_rtas
machine_check_in_rtas:
twi 31,0,0
/* XXX load up BATs and panic */
#endif /* CONFIG_PPC_RTAS */
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
/*
* It is required that _mcount on PPC32 must preserve the
* link register. But we have r0 to play with. We use r0
* to push the return address back to the caller of mcount
* into the ctr register, restore the link register and
* then jump back using the ctr register.
*/
mflr r0
mtctr r0
lwz r0, 4(r1)
mtlr r0
bctr
_GLOBAL(ftrace_caller)
MCOUNT_SAVE_FRAME
/* r3 ends up with link register */
subi r3, r3, MCOUNT_INSN_SIZE
.globl ftrace_call
ftrace_call:
bl ftrace_stub
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
b ftrace_graph_stub
_GLOBAL(ftrace_graph_stub)
#endif
MCOUNT_RESTORE_FRAME
/* old link register ends up in ctr reg */
bctr
#else
_GLOBAL(mcount)
_GLOBAL(_mcount)
MCOUNT_SAVE_FRAME
subi r3, r3, MCOUNT_INSN_SIZE
LOAD_REG_ADDR(r5, ftrace_trace_function)
lwz r5,0(r5)
mtctr r5
bctrl
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
b ftrace_graph_caller
#endif
MCOUNT_RESTORE_FRAME
bctr
#endif
_GLOBAL(ftrace_stub)
blr
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
_GLOBAL(ftrace_graph_caller)
/* load r4 with local address */
lwz r4, 44(r1)
subi r4, r4, MCOUNT_INSN_SIZE
/* Grab the LR out of the caller stack frame */
lwz r3,52(r1)
bl prepare_ftrace_return
nop
/*
* prepare_ftrace_return gives us the address we divert to.
* Change the LR in the callers stack frame to this.
*/
stw r3,52(r1)
MCOUNT_RESTORE_FRAME
/* old link register ends up in ctr reg */
bctr
_GLOBAL(return_to_handler)
/* need to save return values */
stwu r1, -32(r1)
stw r3, 20(r1)
stw r4, 16(r1)
stw r31, 12(r1)
mr r31, r1
bl ftrace_return_to_handler
nop
/* return value has real return address */
mtlr r3
lwz r3, 20(r1)
lwz r4, 16(r1)
lwz r31,12(r1)
lwz r1, 0(r1)
/* Jump back to real return address */
blr
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_FUNCTION_TRACER */
|