1
2
3
4
5
6
7
8
9
10
11 #include <linux/init.h>
12 #include <linux/linkage.h>
13 #include <asm/assembler.h>
14 #include <asm/errno.h>
15 #include <asm/thread_info.h>
16 #include <asm/memory.h>
17 #include <asm/unistd.h>
18 #include <generated/asm-offsets.h>
19 #include "debug-macro.S"
20
21 @
22 @ Most of the stack format comes from struct pt_regs, but with
23 @ the addition of 8 bytes for storing syscall args 5 and 6.
24 @
25 #define S_OFF 8
26
27
28
29
30
31 #if S_R0 != 0
32 #error "Please fix"
33 #endif
34
35 .macro zero_fp
36 #ifdef CONFIG_FRAME_POINTER
37 mov fp, #0
38 #endif
39 .endm
40
41 .macro alignment_trap, rtemp
42 #ifdef CONFIG_ALIGNMENT_TRAP
43 ldw \rtemp, .LCcralign
44 ldw \rtemp, [\rtemp]
45 movc p0.c1, \rtemp, #0
46 #endif
47 .endm
48
49 .macro load_user_sp_lr, rd, rtemp, offset = 0
50 mov \rtemp, asr
51 xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
52 mov.a asr, \rtemp @ switch to the SUSR mode
53
54 ldw sp, [\rd+], #\offset @ load sp_user
55 ldw lr, [\rd+], #\offset + 4 @ load lr_user
56
57 xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
58 mov.a asr, \rtemp @ switch back to the PRIV mode
59 .endm
60
61 .macro priv_exit, rpsr
62 mov.a bsr, \rpsr
63 ldm.w (r0 - r15), [sp]+
64 ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr
65 .endm
66
67 .macro restore_user_regs, fast = 0, offset = 0
68 ldw r1, [sp+], #\offset + S_PSR @ get calling asr
69 ldw lr, [sp+], #\offset + S_PC @ get pc
70 mov.a bsr, r1 @ save in bsr_priv
71 .if \fast
72 add sp, sp, #\offset + S_R1 @ r0 is syscall return value
73 ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15
74 ldur (r16 - lr), [sp]+ @ get calling r16 - lr
75 .else
76 ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15
77 ldur (r16 - lr), [sp]+ @ get calling r16 - lr
78 .endif
79 nop
80 add sp, sp, #S_FRAME_SIZE - S_R16
81 mov.a pc, lr @ return
82 @ and move bsr_priv into asr
83 .endm
84
85 .macro get_thread_info, rd
86 mov \rd, sp >> #13
87 mov \rd, \rd << #13
88 .endm
89
90 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
91 ldw \base, =(PKUNITY_INTC_BASE)
92 ldw \irqstat, [\base+], #0xC @ INTC_ICIP
93 ldw \tmp, [\base+], #0x4 @ INTC_ICMR
94 and.a \irqstat, \irqstat, \tmp
95 beq 1001f
96 cntlz \irqnr, \irqstat
97 rsub \irqnr, \irqnr, #31
98 1001:
99 .endm
100
101 #ifdef CONFIG_DEBUG_LL
102 .macro printreg, reg, temp
103 adr \temp, 901f
104 stm (r0-r3), [\temp]+
105 stw lr, [\temp+], #0x10
106 mov r0, \reg
107 b.l printhex8
108 mov r0, #':'
109 b.l printch
110 mov r0, pc
111 b.l printhex8
112 adr r0, 902f
113 b.l printascii
114 adr \temp, 901f
115 ldm (r0-r3), [\temp]+
116 ldw lr, [\temp+], #0x10
117 b 903f
118 901: .word 0, 0, 0, 0, 0 @ r0-r3, lr
119 902: .asciz ": epip4d\n"
120 .align
121 903:
122 .endm
123 #endif
124
125
126
127
128
129
130
131
132
133 scno .req r21 @ syscall number
134 tbl .req r22 @ syscall table pointer
135 why .req r22 @ Linux syscall (!= 0)
136 tsk .req r23 @ current thread_info
137
138
139
140
141 .macro intr_handler
142 1: get_irqnr_and_base r0, r6, r5, lr
143 beq 2f
144 mov r1, sp
145 @
146 @ routine called with r0 = irq number, r1 = struct pt_regs *
147 @
148 adr lr, 1b
149 b asm_do_IRQ
150 2:
151 .endm
152
153
154
155
156 .macro priv_entry
157 sub sp, sp, #(S_FRAME_SIZE - 4)
158 stm (r1 - r15), [sp]+
159 add r5, sp, #S_R15
160 stm (r16 - r28), [r5]+
161
162 ldm (r1 - r3), [r0]+
163 add r5, sp, #S_SP - 4 @ here for interlock avoidance
164 mov r4, #-1 @ "" "" "" ""
165 add r0, sp, #(S_FRAME_SIZE - 4)
166 stw.w r1, [sp+], #-4 @ save the "real" r0 copied
167 @ from the exception stack
168
169 mov r1, lr
170
171 @
172 @ We are now ready to fill in the remaining blanks on the stack:
173 @
174 @ r0 - sp_priv
175 @ r1 - lr_priv
176 @ r2 - lr_<exception>, already fixed up for correct return/restart
177 @ r3 - bsr_<exception>
178 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
179 @
180 stm (r0 - r4), [r5]+
181 .endm
182
183
184
185
186
187 .macro user_entry
188 sub sp, sp, #S_FRAME_SIZE
189 stm (r1 - r15), [sp+]
190 add r4, sp, #S_R16
191 stm (r16 - r28), [r4]+
192
193 ldm (r1 - r3), [r0]+
194 add r0, sp, #S_PC @ here for interlock avoidance
195 mov r4, #-1 @ "" "" "" ""
196
197 stw r1, [sp] @ save the "real" r0 copied
198 @ from the exception stack
199
200 @
201 @ We are now ready to fill in the remaining blanks on the stack:
202 @
203 @ r2 - lr_<exception>, already fixed up for correct return/restart
204 @ r3 - bsr_<exception>
205 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
206 @
207 @ Also, separately save sp_user and lr_user
208 @
209 stm (r2 - r4), [r0]+
210 stur (sp, lr), [r0-]
211
212 @
213 @ Enable the alignment trap while in kernel mode
214 @
215 alignment_trap r0
216
217 @
218 @ Clear FP to mark the first stack frame
219 @
220 zero_fp
221 .endm
222
223 .text
224
225 @
226 @ __invalid - generic code for failed exception
227 @ (re-entrant version of handlers)
228 @
229 __invalid:
230 sub sp, sp, #S_FRAME_SIZE
231 stm (r1 - r15), [sp+]
232 add r1, sp, #S_R16
233 stm (r16 - r28, sp, lr), [r1]+
234
235 zero_fp
236
237 ldm (r4 - r6), [r0]+
238 add r0, sp, #S_PC @ here for interlock avoidance
239 mov r7, #-1 @ "" "" "" ""
240 stw r4, [sp] @ save preserved r0
241 stm (r5 - r7), [r0]+ @ lr_<exception>,
242 @ asr_<exception>, "old_r0"
243
244 mov r0, sp
245 mov r1, asr
246 b bad_mode
247 ENDPROC(__invalid)
248
249 .align 5
250 __dabt_priv:
251 priv_entry
252
253 @
254 @ get ready to re-enable interrupts if appropriate
255 @
256 mov r17, asr
257 cand.a r3, #PSR_I_BIT
258 bne 1f
259 andn r17, r17, #PSR_I_BIT
260 1:
261
262 @
263 @ Call the processor-specific abort handler:
264 @
265 @ r2 - aborted context pc
266 @ r3 - aborted context asr
267 @
268 @ The abort handler must return the aborted address in r0, and
269 @ the fault status register in r1.
270 @
271 movc r1, p0.c3, #0 @ get FSR
272 movc r0, p0.c4, #0 @ get FAR
273
274 @
275 @ set desired INTR state, then call main handler
276 @
277 mov.a asr, r17
278 mov r2, sp
279 b.l do_DataAbort
280
281 @
282 @ INTRs off again before pulling preserved data off the stack
283 @
284 disable_irq r0
285
286 @
287 @ restore BSR and restart the instruction
288 @
289 ldw r2, [sp+], #S_PSR
290 priv_exit r2 @ return from exception
291 ENDPROC(__dabt_priv)
292
293 .align 5
294 __intr_priv:
295 priv_entry
296
297 intr_handler
298
299 mov r0, #0 @ epip4d
300 movc p0.c5, r0, #14
301 nop; nop; nop; nop; nop; nop; nop; nop
302
303 ldw r4, [sp+], #S_PSR @ irqs are already disabled
304
305 priv_exit r4 @ return from exception
306 ENDPROC(__intr_priv)
307
308 .ltorg
309
310 .align 5
311 __extn_priv:
312 priv_entry
313
314 mov r0, sp @ struct pt_regs *regs
315 mov r1, asr
316 b bad_mode @ not supported
317 ENDPROC(__extn_priv)
318
319 .align 5
320 __pabt_priv:
321 priv_entry
322
323 @
324 @ re-enable interrupts if appropriate
325 @
326 mov r17, asr
327 cand.a r3, #PSR_I_BIT
328 bne 1f
329 andn r17, r17, #PSR_I_BIT
330 1:
331
332 @
333 @ set args, then call main handler
334 @
335 @ r0 - address of faulting instruction
336 @ r1 - pointer to registers on stack
337 @
338 mov r0, r2 @ pass address of aborted instruction
339 mov r1, #5
340 mov.a asr, r17
341 mov r2, sp @ regs
342 b.l do_PrefetchAbort @ call abort handler
343
344 @
345 @ INTRs off again before pulling preserved data off the stack
346 @
347 disable_irq r0
348
349 @
350 @ restore BSR and restart the instruction
351 @
352 ldw r2, [sp+], #S_PSR
353 priv_exit r2 @ return from exception
354 ENDPROC(__pabt_priv)
355
356 .align 5
357 .LCcralign:
358 .word cr_alignment
359
360 .align 5
361 __dabt_user:
362 user_entry
363
364 #ifdef CONFIG_UNICORE_FPU_F64
365 cff ip, s31
366 cand.a ip, #0x08000000 @ FPU execption traps?
367 beq 209f
368
369 ldw ip, [sp+], #S_PC
370 add ip, ip, #4
371 stw ip, [sp+], #S_PC
372 @
373 @ fall through to the emulation code, which returns using r19 if
374 @ it has emulated the instruction, or the more conventional lr
375 @ if we are to treat this as a real extended instruction
376 @
377 @ r0 - instruction
378 @
379 1: ldw.u r0, [r2]
380 adr r19, ret_from_exception
381 adr lr, 209f
382 @
383 @ fallthrough to call do_uc_f64
384 @
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401 get_thread_info r20 @ get current thread
402 and r8, r0, #0x00003c00 @ mask out CP number
403 mov r7, #1
404 stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[]
405
406 @ F64 hardware support entry point.
407 @ r0 = faulted instruction
408 @ r19 = return address
409 @ r20 = fp_state
410 enable_irq r4
411 add r20, r20, #TI_FPSTATE @ r20 = workspace
412 cff r1, s31 @ get fpu FPSCR
413 andn r2, r1, #0x08000000
414 ctf r2, s31 @ clear 27 bit
415 mov r2, sp @ nothing stacked - regdump is at TOS
416 mov lr, r19 @ setup for a return to the user code
417
418 @ Now call the C code to package up the bounce to the support code
419 @ r0 holds the trigger instruction
420 @ r1 holds the FPSCR value
421 @ r2 pointer to register dump
422 b ucf64_exchandler
423 209:
424 #endif
425 @
426 @ Call the processor-specific abort handler:
427 @
428 @ r2 - aborted context pc
429 @ r3 - aborted context asr
430 @
431 @ The abort handler must return the aborted address in r0, and
432 @ the fault status register in r1.
433 @
434 movc r1, p0.c3, #0 @ get FSR
435 movc r0, p0.c4, #0 @ get FAR
436
437 @
438 @ INTRs on, then call the main handler
439 @
440 enable_irq r2
441 mov r2, sp
442 adr lr, ret_from_exception
443 b do_DataAbort
444 ENDPROC(__dabt_user)
445
446 .align 5
447 __intr_user:
448 user_entry
449
450 get_thread_info tsk
451
452 intr_handler
453
454 mov why, #0
455 b ret_to_user
456 ENDPROC(__intr_user)
457
458 .ltorg
459
460 .align 5
461 __extn_user:
462 user_entry
463
464 mov r0, sp
465 mov r1, asr
466 b bad_mode
467 ENDPROC(__extn_user)
468
469 .align 5
470 __pabt_user:
471 user_entry
472
473 mov r0, r2 @ pass address of aborted instruction.
474 mov r1, #5
475 enable_irq r1 @ Enable interrupts
476 mov r2, sp @ regs
477 b.l do_PrefetchAbort @ call abort handler
478
479
480
481
482 ENTRY(ret_from_exception)
483 get_thread_info tsk
484 mov why, #0
485 b ret_to_user
486 ENDPROC(__pabt_user)
487 ENDPROC(ret_from_exception)
488
489
490
491
492
493
494 ENTRY(__switch_to)
495 add ip, r1, #TI_CPU_SAVE
496 stm.w (r4 - r15), [ip]+
497 stm.w (r16 - r27, sp, lr), [ip]+
498
499 #ifdef CONFIG_UNICORE_FPU_F64
500 add ip, r1, #TI_FPSTATE
501 sfm.w (f0 - f7 ), [ip]+
502 sfm.w (f8 - f15), [ip]+
503 sfm.w (f16 - f23), [ip]+
504 sfm.w (f24 - f31), [ip]+
505 cff r4, s31
506 stw r4, [ip]
507
508 add ip, r2, #TI_FPSTATE
509 lfm.w (f0 - f7 ), [ip]+
510 lfm.w (f8 - f15), [ip]+
511 lfm.w (f16 - f23), [ip]+
512 lfm.w (f24 - f31), [ip]+
513 ldw r4, [ip]
514 ctf r4, s31
515 #endif
516 add ip, r2, #TI_CPU_SAVE
517 ldm.w (r4 - r15), [ip]+
518 ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
519 ENDPROC(__switch_to)
520
521 .align 5
522
523
524
525
526
527 ret_fast_syscall:
528 disable_irq r1 @ disable interrupts
529 ldw r1, [tsk+], #TI_FLAGS
530 cand.a r1, #_TIF_WORK_MASK
531 bne fast_work_pending
532
533 @ fast_restore_user_regs
534 restore_user_regs fast = 1, offset = S_OFF
535
536
537
538
539 fast_work_pending:
540 stw.w r0, [sp+], #S_R0+S_OFF @ returned r0
541 work_pending:
542 cand.a r1, #_TIF_NEED_RESCHED
543 bne work_resched
544 mov r0, sp @ 'regs'
545 mov r2, why @ 'syscall'
546 cand.a r1, #_TIF_SIGPENDING @ delivering a signal?
547 cmovne why, #0 @ prevent further restarts
548 b.l do_notify_resume
549 b ret_slow_syscall @ Check work again
550
551 work_resched:
552 b.l schedule
553
554
555
556 ENTRY(ret_to_user)
557 ret_slow_syscall:
558 disable_irq r1 @ disable interrupts
559 get_thread_info tsk @ epip4d, one path error?!
560 ldw r1, [tsk+], #TI_FLAGS
561 cand.a r1, #_TIF_WORK_MASK
562 bne work_pending
563 no_work_pending:
564 @ slow_restore_user_regs
565 restore_user_regs fast = 0, offset = 0
566 ENDPROC(ret_to_user)
567
568
569
570
571 ENTRY(ret_from_fork)
572 b.l schedule_tail
573 b ret_slow_syscall
574 ENDPROC(ret_from_fork)
575
576 ENTRY(ret_from_kernel_thread)
577 b.l schedule_tail
578 mov r0, r5
579 adr lr, ret_slow_syscall
580 mov pc, r4
581 ENDPROC(ret_from_kernel_thread)
582
583
584
585
586
587 .align 5
588 ENTRY(vector_swi)
589 sub sp, sp, #S_FRAME_SIZE
590 stm (r0 - r15), [sp]+ @ Calling r0 - r15
591 add r8, sp, #S_R16
592 stm (r16 - r28), [r8]+ @ Calling r16 - r28
593 add r8, sp, #S_PC
594 stur (sp, lr), [r8-] @ Calling sp, lr
595 mov r8, bsr @ called from non-REAL mode
596 stw lr, [sp+], #S_PC @ Save calling PC
597 stw r8, [sp+], #S_PSR @ Save ASR
598 stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0
599 zero_fp
600
601
602
603
604 sub ip, lr, #4
605 ldw.u scno, [ip] @ get SWI instruction
606
607 #ifdef CONFIG_ALIGNMENT_TRAP
608 ldw ip, __cr_alignment
609 ldw ip, [ip]
610 movc p0.c1, ip, #0 @ update control register
611 #endif
612 enable_irq ip
613
614 get_thread_info tsk
615 ldw tbl, =sys_call_table @ load syscall table pointer
616
617 andn scno, scno, #0xff000000 @ mask off SWI op-code
618 andn scno, scno, #0x00ff0000 @ mask off SWI op-code
619
620 stm.w (r4, r5), [sp-] @ push fifth and sixth args
621 ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing
622 cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
623 bne __sys_trace
624
625 csub.a scno, #__NR_syscalls @ check upper syscall limit
626 adr lr, ret_fast_syscall @ return address
627 bea 1f
628 ldw pc, [tbl+], scno << #2 @ call sys_* routine
629 1:
630 add r1, sp, #S_OFF
631 2: mov why, #0 @ no longer a real syscall
632 b sys_ni_syscall @ not private func
633
634
635
636
637
638 __sys_trace:
639 mov r2, scno
640 add r1, sp, #S_OFF
641 mov r0, #0 @ trace entry [IP = 0]
642 b.l syscall_trace
643
644 adr lr, __sys_trace_return @ return address
645 mov scno, r0 @ syscall number (possibly new)
646 add r1, sp, #S_R0 + S_OFF @ pointer to regs
647 csub.a scno, #__NR_syscalls @ check upper syscall limit
648 bea 2b
649 ldm (r0 - r3), [r1]+ @ have to reload r0 - r3
650 ldw pc, [tbl+], scno << #2 @ call sys_* routine
651
652 __sys_trace_return:
653 stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0
654 mov r2, scno
655 mov r1, sp
656 mov r0, #1 @ trace exit [IP = 1]
657 b.l syscall_trace
658 b ret_slow_syscall
659
660 .align 5
661 #ifdef CONFIG_ALIGNMENT_TRAP
662 .type __cr_alignment, #object
663 __cr_alignment:
664 .word cr_alignment
665 #endif
666 .ltorg
667
668 ENTRY(sys_rt_sigreturn)
669 add r0, sp, #S_OFF
670 mov why, #0 @ prevent syscall restart handling
671 b __sys_rt_sigreturn
672 ENDPROC(sys_rt_sigreturn)
673
674 __INIT
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689 .macro vector_stub, name, mode
690 .align 5
691
692 vector_\name:
693 @
694 @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
695 @ (parent ASR)
696 @
697 stw r0, [sp]
698 stw lr, [sp+], #4 @ save r0, lr
699 mov lr, bsr
700 stw lr, [sp+], #8 @ save bsr
701
702 @
703 @ Prepare for PRIV mode. INTRs remain disabled.
704 @
705 mov r0, asr
706 xor r0, r0, #(\mode ^ PRIV_MODE)
707 mov.a bsr, r0
708
709 @
710 @ the branch table must immediately follow this code
711 @
712 and lr, lr, #0x03
713 add lr, lr, #1
714 mov r0, sp
715 ldw lr, [pc+], lr << #2
716 mov.a pc, lr @ branch to handler in PRIV mode
717 ENDPROC(vector_\name)
718 .align 2
719 @ handler addresses follow this label
720 .endm
721
722 .globl __stubs_start
723 __stubs_start:
724
725
726
727 vector_stub intr, INTR_MODE
728
729 .long __intr_user @ 0 (USER)
730 .long __invalid @ 1
731 .long __invalid @ 2
732 .long __intr_priv @ 3 (PRIV)
733
734
735
736
737
738 vector_stub dabt, ABRT_MODE
739
740 .long __dabt_user @ 0 (USER)
741 .long __invalid @ 1
742 .long __invalid @ 2 (INTR)
743 .long __dabt_priv @ 3 (PRIV)
744
745
746
747
748
749 vector_stub pabt, ABRT_MODE
750
751 .long __pabt_user @ 0 (USER)
752 .long __invalid @ 1
753 .long __invalid @ 2 (INTR)
754 .long __pabt_priv @ 3 (PRIV)
755
756
757
758
759
760 vector_stub extn, EXTN_MODE
761
762 .long __extn_user @ 0 (USER)
763 .long __invalid @ 1
764 .long __invalid @ 2 (INTR)
765 .long __extn_priv @ 3 (PRIV)
766
767
768
769
770
771 .align 5
772
773 .LCvswi:
774 .word vector_swi
775
776 .globl __stubs_end
777 __stubs_end:
778
779 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
780
781 .globl __vectors_start
782 __vectors_start:
783 jepriv SYS_ERROR0
784 b vector_extn + stubs_offset
785 ldw pc, .LCvswi + stubs_offset
786 b vector_pabt + stubs_offset
787 b vector_dabt + stubs_offset
788 jepriv SYS_ERROR0
789 b vector_intr + stubs_offset
790 jepriv SYS_ERROR0
791
792 .globl __vectors_end
793 __vectors_end:
794
795 .data
796
797 .globl cr_alignment
798 .globl cr_no_alignment
799 cr_alignment:
800 .space 4
801 cr_no_alignment:
802 .space 4