1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41 #include <asm/asmmacro.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/kregs.h>
45 #include <asm/asm-offsets.h>
46 #include <asm/pgtable.h>
47 #include <asm/percpu.h>
48 #include <asm/processor.h>
49 #include <asm/thread_info.h>
50 #include <asm/unistd.h>
51 #include <asm/ftrace.h>
52 #include <asm/export.h>
53
54 #include "minstate.h"
55
56
57
58
59
60 ENTRY(ia64_execve)
61
62
63
64 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
65 alloc loc1=ar.pfs,8,2,3,0
66 mov loc0=rp
67 .body
68 mov out0=in0
69 ;;
70 mov out1=in1
71 mov out2=in2
72 br.call.sptk.many rp=sys_execve
73 .ret0:
74 cmp4.ge p6,p7=r8,r0
75 mov ar.pfs=loc1
76 sxt4 r8=r8
77 ;;
78 stf.spill [sp]=f0
79 mov rp=loc0
80 (p6) mov ar.pfs=r0
81 (p7) br.ret.sptk.many rp
82
83
84
85
86
87
88
89 mov ar.unat=0; mov ar.lc=0
90 mov r4=0; mov f2=f0; mov b1=r0
91 mov r5=0; mov f3=f0; mov b2=r0
92 mov r6=0; mov f4=f0; mov b3=r0
93 mov r7=0; mov f5=f0; mov b4=r0
94 ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
95 ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
96 ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
97 ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
98 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
99 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
100 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
101 br.ret.sptk.many rp
102 END(ia64_execve)
103
104
105
106
107
108 GLOBAL_ENTRY(sys_clone2)
109
110
111
112 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
113 alloc r16=ar.pfs,8,2,6,0
114 DO_SAVE_SWITCH_STACK
115 adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
116 mov loc0=rp
117 mov loc1=r16
118 .body
119 mov out1=in1
120 mov out2=in2
121 tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
122 mov out3=in3
123 ;;
124 (p6) st8 [r2]=in5
125 mov out4=in4
126 mov out0=in0
127 br.call.sptk.many rp=do_fork
128 .ret1: .restore sp
129 adds sp=IA64_SWITCH_STACK_SIZE,sp
130 mov ar.pfs=loc1
131 mov rp=loc0
132 br.ret.sptk.many rp
133 END(sys_clone2)
134
135
136
137
138
139 GLOBAL_ENTRY(sys_clone)
140
141
142
143 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
144 alloc r16=ar.pfs,8,2,6,0
145 DO_SAVE_SWITCH_STACK
146 adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
147 mov loc0=rp
148 mov loc1=r16
149 .body
150 mov out1=in1
151 mov out2=16
152 tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
153 mov out3=in2
154 ;;
155 (p6) st8 [r2]=in4
156 mov out4=in3
157 mov out0=in0
158 br.call.sptk.many rp=do_fork
159 .ret2: .restore sp
160 adds sp=IA64_SWITCH_STACK_SIZE,sp
161 mov ar.pfs=loc1
162 mov rp=loc0
163 br.ret.sptk.many rp
164 END(sys_clone)
165
166
167
168
169
170
171
172 GLOBAL_ENTRY(ia64_switch_to)
173 .prologue
174 alloc r16=ar.pfs,1,0,0,0
175 DO_SAVE_SWITCH_STACK
176 .body
177
178 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
179 movl r25=init_task
180 mov r27=IA64_KR(CURRENT_STACK)
181 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
182 dep r20=0,in0,61,3
183 ;;
184 st8 [r22]=sp
185 shr.u r26=r20,IA64_GRANULE_SHIFT
186 cmp.eq p7,p6=r25,in0
187 ;;
188
189
190
191 (p6) cmp.eq p7,p6=r26,r27
192 (p6) br.cond.dpnt .map
193 ;;
194 .done:
195 ld8 sp=[r21]
196 MOV_TO_KR(CURRENT, in0, r8, r9)
197 mov r8=r13
198 mov r13=in0
199 ;;
200 DO_LOAD_SWITCH_STACK
201
202 #ifdef CONFIG_SMP
203 sync.i
204 #endif
205 br.ret.sptk.many rp
206
207 .map:
208 RSM_PSR_IC(r25)
209 movl r25=PAGE_KERNEL
210 ;;
211 srlz.d
212 or r23=r25,r20
213 mov r25=IA64_GRANULE_SHIFT<<2
214 ;;
215 MOV_TO_ITIR(p0, r25, r8)
216 MOV_TO_IFA(in0, r8)
217 ;;
218 mov r25=IA64_TR_CURRENT_STACK
219 MOV_TO_KR(CURRENT_STACK, r26, r8, r9)
220 ;;
221 itr.d dtr[r25]=r23
222 SSM_PSR_IC_AND_SRLZ_D(r8, r9)
223 br.cond.sptk .done
224 END(ia64_switch_to)
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242 GLOBAL_ENTRY(save_switch_stack)
243 .prologue
244 .altrp b7
245 flushrs
246 .save @priunat,r17
247 mov r17=ar.unat
248 .body
249 #ifdef CONFIG_ITANIUM
250 adds r2=16+128,sp
251 adds r3=16+64,sp
252 adds r14=SW(R4)+16,sp
253 ;;
254 st8.spill [r14]=r4,16
255 lfetch.fault.excl.nt1 [r3],128
256 ;;
257 lfetch.fault.excl.nt1 [r2],128
258 lfetch.fault.excl.nt1 [r3],128
259 ;;
260 lfetch.fault.excl [r2]
261 lfetch.fault.excl [r3]
262 adds r15=SW(R5)+16,sp
263 #else
264 add r2=16+3*128,sp
265 add r3=16,sp
266 add r14=SW(R4)+16,sp
267 ;;
268 st8.spill [r14]=r4,SW(R6)-SW(R4)
269 lfetch.fault.excl.nt1 [r3],128
270 ;;
271 lfetch.fault.excl.nt1 [r3],128
272 lfetch.fault.excl.nt1 [r2],128
273 ;;
274 lfetch.fault.excl.nt1 [r3]
275 lfetch.fault.excl.nt1 [r2]
276 adds r15=SW(R5)+16,sp
277 #endif
278 ;;
279 st8.spill [r15]=r5,SW(R7)-SW(R5)
280 mov.m ar.rsc=0
281 add r2=SW(F2)+16,sp
282 ;;
283 st8.spill [r14]=r6,SW(B0)-SW(R6)
284 mov.m r18=ar.fpsr
285 add r3=SW(F3)+16,sp
286 ;;
287 stf.spill [r2]=f2,32
288 mov.m r19=ar.rnat
289 mov r21=b0
290
291 stf.spill [r3]=f3,32
292 st8.spill [r15]=r7,SW(B2)-SW(R7)
293 mov r22=b1
294 ;;
295
296 mov.m r29=ar.unat
297 mov.m r20=ar.bspstore
298 mov r23=b2
299 stf.spill [r2]=f4,32
300 stf.spill [r3]=f5,32
301 mov r24=b3
302 ;;
303 st8 [r14]=r21,SW(B1)-SW(B0)
304 st8 [r15]=r23,SW(B3)-SW(B2)
305 mov r25=b4
306 mov r26=b5
307 ;;
308 st8 [r14]=r22,SW(B4)-SW(B1)
309 st8 [r15]=r24,SW(AR_PFS)-SW(B3)
310 mov r21=ar.lc
311 stf.spill [r2]=f12,32
312 stf.spill [r3]=f13,32
313 ;;
314 st8 [r14]=r25,SW(B5)-SW(B4)
315 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS)
316 stf.spill [r2]=f14,32
317 stf.spill [r3]=f15,32
318 ;;
319 st8 [r14]=r26
320 st8 [r15]=r21
321 stf.spill [r2]=f16,32
322 stf.spill [r3]=f17,32
323 ;;
324 stf.spill [r2]=f18,32
325 stf.spill [r3]=f19,32
326 ;;
327 stf.spill [r2]=f20,32
328 stf.spill [r3]=f21,32
329 ;;
330 stf.spill [r2]=f22,32
331 stf.spill [r3]=f23,32
332 ;;
333 stf.spill [r2]=f24,32
334 stf.spill [r3]=f25,32
335 ;;
336 stf.spill [r2]=f26,32
337 stf.spill [r3]=f27,32
338 ;;
339 stf.spill [r2]=f28,32
340 stf.spill [r3]=f29,32
341 ;;
342 stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
343 stf.spill [r3]=f31,SW(PR)-SW(F31)
344 add r14=SW(CALLER_UNAT)+16,sp
345 ;;
346 st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT)
347 st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT)
348 mov r21=pr
349 ;;
350 st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT)
351 st8 [r3]=r21
352 ;;
353 st8 [r2]=r20
354 st8 [r14]=r18
355 mov ar.rsc=3
356 br.cond.sptk.many b7
357 END(save_switch_stack)
358
359
360
361
362
363
364
365 GLOBAL_ENTRY(load_switch_stack)
366 .prologue
367 .altrp b7
368
369 .body
370 lfetch.fault.nt1 [sp]
371 adds r2=SW(AR_BSPSTORE)+16,sp
372 adds r3=SW(AR_UNAT)+16,sp
373 mov ar.rsc=0
374 adds r14=SW(CALLER_UNAT)+16,sp
375 adds r15=SW(AR_FPSR)+16,sp
376 ;;
377 ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))
378 ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))
379 ;;
380 ld8 r21=[r2],16
381 ld8 r22=[r3],16
382 ;;
383 ld8 r23=[r2],16
384 ld8 r24=[r3],16
385 ;;
386 ld8 r25=[r2],16
387 ld8 r26=[r3],16
388 ;;
389 ld8 r16=[r2],(SW(PR)-SW(AR_PFS))
390 ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))
391 ;;
392 ld8 r28=[r2]
393 ld8 r30=[r3]
394 ;;
395 ld8 r18=[r14],16
396 ld8 r19=[r15],24
397 ;;
398 ldf.fill f2=[r14],32
399 ldf.fill f3=[r15],32
400 ;;
401 ldf.fill f4=[r14],32
402 ldf.fill f5=[r15],32
403 ;;
404 ldf.fill f12=[r14],32
405 ldf.fill f13=[r15],32
406 ;;
407 ldf.fill f14=[r14],32
408 ldf.fill f15=[r15],32
409 ;;
410 ldf.fill f16=[r14],32
411 ldf.fill f17=[r15],32
412 ;;
413 ldf.fill f18=[r14],32
414 ldf.fill f19=[r15],32
415 mov b0=r21
416 ;;
417 ldf.fill f20=[r14],32
418 ldf.fill f21=[r15],32
419 mov b1=r22
420 ;;
421 ldf.fill f22=[r14],32
422 ldf.fill f23=[r15],32
423 mov b2=r23
424 ;;
425 mov ar.bspstore=r27
426 mov ar.unat=r29
427 mov b3=r24
428 ;;
429 ldf.fill f24=[r14],32
430 ldf.fill f25=[r15],32
431 mov b4=r25
432 ;;
433 ldf.fill f26=[r14],32
434 ldf.fill f27=[r15],32
435 mov b5=r26
436 ;;
437 ldf.fill f28=[r14],32
438 ldf.fill f29=[r15],32
439 mov ar.pfs=r16
440 ;;
441 ldf.fill f30=[r14],32
442 ldf.fill f31=[r15],24
443 mov ar.lc=r17
444 ;;
445 ld8.fill r4=[r14],16
446 ld8.fill r5=[r15],16
447 mov pr=r28,-1
448 ;;
449 ld8.fill r6=[r14],16
450 ld8.fill r7=[r15],16
451
452 mov ar.unat=r18
453 mov ar.rnat=r30
454 mov ar.fpsr=r19
455 mov ar.rsc=3
456 br.cond.sptk.many b7
457 END(load_switch_stack)
458
459
460
461
462
463
464
465 GLOBAL_ENTRY(ia64_trace_syscall)
466 PT_REGS_UNWIND_INFO(0)
467
468
469
470
471 adds r16=PT(F6)+16,sp
472 adds r17=PT(F7)+16,sp
473 ;;
474 stf.spill [r16]=f6,32
475 stf.spill [r17]=f7,32
476 ;;
477 stf.spill [r16]=f8,32
478 stf.spill [r17]=f9,32
479 ;;
480 stf.spill [r16]=f10
481 stf.spill [r17]=f11
482 br.call.sptk.many rp=syscall_trace_enter
483 cmp.lt p6,p0=r8,r0
484 adds r2=PT(R8)+16,sp
485 adds r3=PT(R10)+16,sp
486 mov r10=0
487 (p6) br.cond.sptk strace_error
488 adds r16=PT(F6)+16,sp
489 adds r17=PT(F7)+16,sp
490 ;;
491 ldf.fill f6=[r16],32
492 ldf.fill f7=[r17],32
493 ;;
494 ldf.fill f8=[r16],32
495 ldf.fill f9=[r17],32
496 ;;
497 ldf.fill f10=[r16]
498 ldf.fill f11=[r17]
499
500
501 adds r15=PT(R15)+16,sp
502 ;;
503 ld8 r15=[r15]
504 mov r3=NR_syscalls - 1
505 ;;
506 adds r15=-1024,r15
507 movl r16=sys_call_table
508 ;;
509 shladd r20=r15,3,r16
510 cmp.leu p6,p7=r15,r3
511 ;;
512 (p6) ld8 r20=[r20]
513 (p7) movl r20=sys_ni_syscall
514 ;;
515 mov b6=r20
516 br.call.sptk.many rp=b6
517 .strace_check_retval:
518 cmp.lt p6,p0=r8,r0
519 adds r2=PT(R8)+16,sp
520 adds r3=PT(R10)+16,sp
521 mov r10=0
522 (p6) br.cond.sptk strace_error
523 ;;
524 .strace_save_retval:
525 .mem.offset 0,0; st8.spill [r2]=r8
526 .mem.offset 8,0; st8.spill [r3]=r10
527 br.call.sptk.many rp=syscall_trace_leave
528 .ret3:
529 (pUStk) cmp.eq.unc p6,p0=r0,r0
530 (pUStk) rsm psr.i
531 br.cond.sptk ia64_work_pending_syscall_end
532
533 strace_error:
534 ld8 r3=[r2]
535 sub r9=0,r8
536 ;;
537 cmp.ne p6,p0=r3,r0
538 adds r3=16,r2
539 ;;
540 (p6) mov r10=-1
541 (p6) mov r8=r9
542 br.cond.sptk .strace_save_retval
543 END(ia64_trace_syscall)
544
545
546
547
548
549 GLOBAL_ENTRY(ia64_strace_leave_kernel)
550 PT_REGS_UNWIND_INFO(0)
551 {
552
553
554
555 nop.m 0
556 nop.i 0
557 br.call.sptk.many rp=syscall_trace_leave
558 }
559 .ret4: br.cond.sptk ia64_leave_kernel
560 END(ia64_strace_leave_kernel)
561
562 ENTRY(call_payload)
563 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0)
564
565 alloc loc1=ar.pfs,0,3,1,0
566 mov loc0=rp
567 mov loc2=gp
568 mov out0=r5
569 ld8 r14 = [r4], 8
570 ;;
571 mov b6 = r14
572 ld8 gp = [r4]
573 ;;
574 br.call.sptk.many rp=b6
575 .ret12: mov gp=loc2
576 mov rp=loc0
577 mov ar.pfs=loc1
578
579 cmp.ne pKStk,pUStk=r0,r0
580 br.ret.sptk.many rp
581 END(call_payload)
582
583 GLOBAL_ENTRY(ia64_ret_from_clone)
584 PT_REGS_UNWIND_INFO(0)
585 {
586
587
588
589 nop.m 0
590 nop.i 0
591
592
593
594
595
596 br.call.sptk.many rp=ia64_invoke_schedule_tail
597 }
598 .ret8:
599 (pKStk) br.call.sptk.many rp=call_payload
600 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
601 ;;
602 ld4 r2=[r2]
603 ;;
604 mov r8=0
605 and r2=_TIF_SYSCALL_TRACEAUDIT,r2
606 ;;
607 cmp.ne p6,p0=r2,r0
608 (p6) br.cond.spnt .strace_check_retval
609 ;;
610 END(ia64_ret_from_clone)
611
612 GLOBAL_ENTRY(ia64_ret_from_syscall)
613 PT_REGS_UNWIND_INFO(0)
614 cmp.ge p6,p7=r8,r0
615 adds r2=PT(R8)+16,sp
616 mov r10=r0
617 (p7) br.cond.spnt handle_syscall_error
618 END(ia64_ret_from_syscall)
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665 GLOBAL_ENTRY(ia64_leave_syscall)
666 PT_REGS_UNWIND_INFO(0)
667
668
669
670
671
672
673
674
675
676
677
678 #ifdef CONFIG_PREEMPT
679 RSM_PSR_I(p0, r2, r18)
680 cmp.eq pLvSys,p0=r0,r0
681 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
682 ;;
683 .pred.rel.mutex pUStk,pKStk
684 (pKStk) ld4 r21=[r20]
685 (pUStk) mov r21=0
686 ;;
687 cmp.eq p6,p0=r21,r0
688 #else
689 RSM_PSR_I(pUStk, r2, r18)
690 cmp.eq pLvSys,p0=r0,r0
691 (pUStk) cmp.eq.unc p6,p0=r0,r0
692 #endif
693 .global ia64_work_processed_syscall;
694 ia64_work_processed_syscall:
695 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
696 adds r2=PT(LOADRS)+16,r12
697 MOV_FROM_ITC(pUStk, p9, r22, r19)
698 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
699 ;;
700 (p6) ld4 r31=[r18]
701 ld8 r19=[r2],PT(B6)-PT(LOADRS)
702 adds r3=PT(AR_BSPSTORE)+16,r12
703 ;;
704 #else
705 adds r2=PT(LOADRS)+16,r12
706 adds r3=PT(AR_BSPSTORE)+16,r12
707 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
708 ;;
709 (p6) ld4 r31=[r18]
710 ld8 r19=[r2],PT(B6)-PT(LOADRS)
711 nop.i 0
712 ;;
713 #endif
714 mov r16=ar.bsp
715 ld8 r18=[r2],PT(R9)-PT(B6)
716 (p6) and r15=TIF_WORK_MASK,r31
717 ;;
718 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)
719 (p6) cmp4.ne.unc p6,p0=r15, r0
720 (p6) br.cond.spnt .work_pending_syscall
721 ;;
722
723 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
724 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
725 (pNonSys) break 0
726 ;;
727 invala
728 RSM_PSR_I_IC(r28, r29, r30)
729 cmp.eq p9,p0=r0,r0
730
731 ld8 r29=[r2],16
732 ld8 r28=[r3],16
733 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
734 (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
735 ;;
736 ld8 r30=[r2],16
737 ld8 r25=[r3],16
738 (pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
739 ;;
740 #else
741 mov r22=r0
742 ;;
743 ld8 r30=[r2],16
744 ld8 r25=[r3],16
745 (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
746 ;;
747 #endif
748 ld8 r26=[r2],PT(B0)-PT(AR_PFS)
749 MOV_FROM_PSR(pKStk, r22, r21)
750 nop 0
751 ;;
752 ld8 r21=[r2],PT(AR_RNAT)-PT(B0)
753 ld8 r27=[r3],PT(PR)-PT(AR_RSC)
754 mov f6=f0
755 ;;
756 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)
757 ld8 r31=[r3],PT(R1)-PT(PR)
758 mov f7=f0
759 ;;
760 ld8 r20=[r2],PT(R12)-PT(AR_FPSR)
761 ld8.fill r1=[r3],16
762 (pUStk) mov r17=1
763 ;;
764 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
765 (pUStk) st1 [r15]=r17
766 #else
767 (pUStk) st1 [r14]=r17
768 #endif
769 ld8.fill r13=[r3],16
770 mov f8=f0
771 ;;
772 ld8.fill r12=[r2]
773 ld8.fill r15=[r3]
774 mov b6=r18
775
776 LOAD_PHYS_STACK_REG_SIZE(r17)
777 mov f9=f0
778 (pKStk) br.cond.dpnt.many skip_rbs_switch
779
780 srlz.d
781 shr.u r18=r19,16
782 COVER
783 ;;
784 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
785 mov r19=ar.bsp
786 st8 [r14]=r22
787 mov f10=f0
788
789 mov r22=r0
790 movl r14=__kernel_syscall_via_epc
791 ;;
792 #else
793 mov r19=ar.bsp
794 mov f10=f0
795
796 nop.m 0
797 movl r14=__kernel_syscall_via_epc
798 ;;
799 #endif
800 mov.m ar.csd=r0
801 mov.m ar.ccv=r0
802 mov b7=r14
803
804 mov.m ar.ssd=r0
805 mov f11=f0
806 br.cond.sptk.many rbs_switch
807 END(ia64_leave_syscall)
808
809 GLOBAL_ENTRY(ia64_leave_kernel)
810 PT_REGS_UNWIND_INFO(0)
811
812
813
814
815
816
817
818
819
820
821
822 #ifdef CONFIG_PREEMPT
823 RSM_PSR_I(p0, r17, r31)
824 cmp.eq p0,pLvSys=r0,r0
825 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
826 ;;
827 .pred.rel.mutex pUStk,pKStk
828 (pKStk) ld4 r21=[r20]
829 (pUStk) mov r21=0
830 ;;
831 cmp.eq p6,p0=r21,r0
832 #else
833 RSM_PSR_I(pUStk, r17, r31)
834 cmp.eq p0,pLvSys=r0,r0
835 (pUStk) cmp.eq.unc p6,p0=r0,r0
836 #endif
837 .work_processed_kernel:
838 adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
839 ;;
840 (p6) ld4 r31=[r17]
841 adds r21=PT(PR)+16,r12
842 ;;
843
844 lfetch [r21],PT(CR_IPSR)-PT(PR)
845 adds r2=PT(B6)+16,r12
846 adds r3=PT(R16)+16,r12
847 ;;
848 lfetch [r21]
849 ld8 r28=[r2],8
850 adds r29=PT(R24)+16,r12
851
852 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
853 adds r30=PT(AR_CCV)+16,r12
854 (p6) and r19=TIF_WORK_MASK,r31
855 ;;
856 ld8.fill r24=[r29]
857 ld8 r15=[r30]
858 (p6) cmp4.ne.unc p6,p0=r19, r0
859 ;;
860 ld8 r29=[r2],16
861 ld8 r30=[r3],16
862 (p6) br.cond.spnt .work_pending
863 ;;
864 ld8 r31=[r2],16
865 ld8.fill r8=[r3],16
866 ;;
867 ld8.fill r9=[r2],16
868 ld8.fill r10=[r3],PT(R17)-PT(R10)
869 ;;
870 ld8.fill r11=[r2],PT(R18)-PT(R11)
871 ld8.fill r17=[r3],16
872 ;;
873 ld8.fill r18=[r2],16
874 ld8.fill r19=[r3],16
875 ;;
876 ld8.fill r20=[r2],16
877 ld8.fill r21=[r3],16
878 mov ar.csd=r30
879 mov ar.ssd=r31
880 ;;
881 RSM_PSR_I_IC(r23, r22, r25)
882 invala
883 ;;
884 ld8.fill r22=[r2],24
885 ld8.fill r23=[r3],24
886 mov b6=r28
887 ;;
888 ld8.fill r25=[r2],16
889 ld8.fill r26=[r3],16
890 mov b7=r29
891 ;;
892 ld8.fill r27=[r2],16
893 ld8.fill r28=[r3],16
894 ;;
895 ld8.fill r29=[r2],16
896 ld8.fill r30=[r3],24
897 ;;
898 ld8.fill r31=[r2],PT(F9)-PT(R31)
899 adds r3=PT(F10)-PT(F6),r3
900 ;;
901 ldf.fill f9=[r2],PT(F6)-PT(F9)
902 ldf.fill f10=[r3],PT(F8)-PT(F10)
903 ;;
904 ldf.fill f6=[r2],PT(F7)-PT(F6)
905 ;;
906 ldf.fill f7=[r2],PT(F11)-PT(F7)
907 ldf.fill f8=[r3],32
908 ;;
909 srlz.d
910 mov ar.ccv=r15
911 ;;
912 ldf.fill f11=[r2]
913 BSW_0(r2, r3, r15)
914 ;;
915 (pUStk) mov r18=IA64_KR(CURRENT)
916 adds r16=PT(CR_IPSR)+16,r12
917 adds r17=PT(CR_IIP)+16,r12
918
919 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
920 .pred.rel.mutex pUStk,pKStk
921 MOV_FROM_PSR(pKStk, r22, r29)
922 MOV_FROM_ITC(pUStk, p9, r22, r29)
923 nop.i 0
924 ;;
925 #else
926 MOV_FROM_PSR(pKStk, r22, r29)
927 nop.i 0
928 nop.i 0
929 ;;
930 #endif
931 ld8 r29=[r16],16
932 ld8 r28=[r17],16
933 ;;
934 ld8 r30=[r16],16
935 ld8 r25=[r17],16
936 ;;
937 ld8 r26=[r16],16
938 ld8 r27=[r17],16
939 cmp.eq p9,p0=r0,r0
940 ;;
941 ld8 r24=[r16],16
942 ld8 r23=[r17],16
943 ;;
944 ld8 r31=[r16],16
945 ld8 r21=[r17],16
946 ;;
947 ld8 r19=[r16],16
948 ld8.fill r1=[r17],16
949 ;;
950 ld8.fill r12=[r16],16
951 ld8.fill r13=[r17],16
952 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
953 (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
954 #else
955 (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
956 #endif
957 ;;
958 ld8 r20=[r16],16
959 ld8.fill r15=[r17],16
960 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
961 (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
962 #endif
963 ;;
964 ld8.fill r14=[r16],16
965 ld8.fill r2=[r17]
966 (pUStk) mov r17=1
967 ;;
968 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
969
970
971
972
973
974 (pUStk) st8 [r3]=r22
975 (pUStk) st1 [r18]=r17
976 shr.u r18=r19,16
977 ;;
978 ld8.fill r3=[r16]
979 LOAD_PHYS_STACK_REG_SIZE(r17)
980 (pKStk) br.cond.dpnt skip_rbs_switch
981 mov r16=ar.bsp
982 #else
983 ld8.fill r3=[r16]
984 (pUStk) st1 [r18]=r17
985 shr.u r18=r19,16
986 ;;
987 mov r16=ar.bsp
988 LOAD_PHYS_STACK_REG_SIZE(r17)
989 (pKStk) br.cond.dpnt skip_rbs_switch
990 #endif
991
992
993
994
995
996
997 (pNonSys) br.cond.dpnt dont_preserve_current_frame
998 COVER
999 ;;
1000 mov r19=ar.bsp
1001 rbs_switch:
1002 sub r16=r16,r18
1003 cmp.ne p9,p0=r0,r0
1004 ;;
1005 sub r19=r19,r16
1006 add r18=64,r18
1007 ;;
1008 shl r19=r19,16
1009 ;;
1010 dont_preserve_current_frame:
1011
1012
1013
1014
1015
1016
1017 # define pRecurse p6
1018 # define pReturn p7
1019 #ifdef CONFIG_ITANIUM
1020 # define Nregs 10
1021 #else
1022 # define Nregs 14
1023 #endif
1024 alloc loc0=ar.pfs,2,Nregs-2,2,0
1025 shr.u loc1=r18,9
1026 sub r17=r17,r18
1027 ;;
1028 mov ar.rsc=r19
1029 shladd in0=loc1,3,r17
1030 mov in1=0
1031 ;;
1032 TEXT_ALIGN(32)
1033 rse_clear_invalid:
1034 #ifdef CONFIG_ITANIUM
1035
1036 { .mii
1037 alloc loc0=ar.pfs,2,Nregs-2,2,0
1038 cmp.lt pRecurse,p0=Nregs*8,in0
1039 add out0=-Nregs*8,in0
1040 }{ .mfb
1041 add out1=1,in1
1042 nop.f 0
1043 nop.b 0
1044 ;;
1045 }{ .mfi
1046 mov loc1=0
1047 nop.f 0
1048 mov loc2=0
1049 }{ .mib
1050 mov loc3=0
1051 mov loc4=0
1052 (pRecurse) br.call.sptk.many b0=rse_clear_invalid
1053
1054 }{ .mfi
1055 mov loc5=0
1056 nop.f 0
1057 cmp.ne pReturn,p0=r0,in1
1058 }{ .mib
1059 mov loc6=0
1060 mov loc7=0
1061 (pReturn) br.ret.sptk.many b0
1062 }
1063 #else
1064 alloc loc0=ar.pfs,2,Nregs-2,2,0
1065 cmp.lt pRecurse,p0=Nregs*8,in0
1066 add out0=-Nregs*8,in0
1067 add out1=1,in1
1068 mov loc1=0
1069 mov loc2=0
1070 ;;
1071 mov loc3=0
1072 mov loc4=0
1073 mov loc5=0
1074 mov loc6=0
1075 mov loc7=0
1076 (pRecurse) br.call.dptk.few b0=rse_clear_invalid
1077 ;;
1078 mov loc8=0
1079 mov loc9=0
1080 cmp.ne pReturn,p0=r0,in1
1081 mov loc10=0
1082 mov loc11=0
1083 (pReturn) br.ret.dptk.many b0
1084 #endif
1085 # undef pRecurse
1086 # undef pReturn
1087 ;;
1088 alloc r17=ar.pfs,0,0,0,0
1089 ;;
1090 loadrs
1091 ;;
1092 skip_rbs_switch:
1093 mov ar.unat=r25
1094 (pKStk) extr.u r22=r22,21,1
1095 (pLvSys)mov r19=r0
1096 ;;
1097 (pUStk) mov ar.bspstore=r23
1098 (pKStk) dep r29=r22,r29,21,1
1099 (pLvSys)mov r16=r0
1100 ;;
1101 MOV_TO_IPSR(p0, r29, r25)
1102 mov ar.pfs=r26
1103 (pLvSys)mov r17=r0
1104
1105 MOV_TO_IFS(p9, r30, r25)
1106 mov b0=r21
1107 (pLvSys)mov r18=r0
1108
1109 mov ar.fpsr=r20
1110 MOV_TO_IIP(r28, r25)
1111 nop 0
1112 ;;
1113 (pUStk) mov ar.rnat=r24
1114 nop 0
1115 (pLvSys)mov r2=r0
1116
1117 mov ar.rsc=r27
1118 mov pr=r31,-1
1119 RFI
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 .work_pending_syscall:
1132 add r2=-8,r2
1133 add r3=-8,r3
1134 ;;
1135 st8 [r2]=r8
1136 st8 [r3]=r10
1137 .work_pending:
1138 tbit.z p6,p0=r31,TIF_NEED_RESCHED
1139 (p6) br.cond.sptk.few .notify
1140 br.call.spnt.many rp=preempt_schedule_irq
1141 .ret9: cmp.eq p6,p0=r0,r0
1142 (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
1143 br.cond.sptk.many .work_processed_kernel
1144
1145 .notify:
1146 (pUStk) br.call.spnt.many rp=notify_resume_user
1147 .ret10: cmp.ne p6,p0=r0,r0
1148 (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
1149 br.cond.sptk.many .work_processed_kernel
1150
1151 .global ia64_work_pending_syscall_end;
1152 ia64_work_pending_syscall_end:
1153 adds r2=PT(R8)+16,r12
1154 adds r3=PT(R10)+16,r12
1155 ;;
1156 ld8 r8=[r2]
1157 ld8 r10=[r3]
1158 br.cond.sptk.many ia64_work_processed_syscall
1159 END(ia64_leave_kernel)
1160
1161 ENTRY(handle_syscall_error)
1162
1163
1164
1165
1166
1167
1168 PT_REGS_UNWIND_INFO(0)
1169 ld8 r3=[r2]
1170 ;;
1171 cmp.eq p6,p7=r3,r0
1172 ;;
1173 (p7) mov r10=-1
1174 (p7) sub r8=0,r8
1175 br.cond.sptk ia64_leave_syscall
1176 END(handle_syscall_error)
1177
1178
1179
1180
1181
1182 GLOBAL_ENTRY(ia64_invoke_schedule_tail)
1183 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1184 alloc loc1=ar.pfs,8,2,1,0
1185 mov loc0=rp
1186 mov out0=r8
1187 ;;
1188 br.call.sptk.many rp=schedule_tail
1189 .ret11: mov ar.pfs=loc1
1190 mov rp=loc0
1191 br.ret.sptk.many rp
1192 END(ia64_invoke_schedule_tail)
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 GLOBAL_ENTRY(notify_resume_user)
1203 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1204 alloc loc1=ar.pfs,8,2,3,0
1205 mov r9=ar.unat
1206 mov loc0=rp
1207 mov out0=0
1208 adds out1=8,sp
1209 (pSys) mov out2=1
1210 ;;
1211 (pNonSys) mov out2=0
1212 .fframe 16
1213 .spillsp ar.unat, 16
1214 st8 [sp]=r9,-16
1215 st8 [out1]=loc1,-8
1216 .body
1217 br.call.sptk.many rp=do_notify_resume_user
1218 .ret15: .restore sp
1219 adds sp=16,sp
1220 ;;
1221 ld8 r9=[sp]
1222 mov rp=loc0
1223 ;;
1224 mov ar.unat=r9
1225 mov ar.pfs=loc1
1226 br.ret.sptk.many rp
1227 END(notify_resume_user)
1228
1229 ENTRY(sys_rt_sigreturn)
1230 PT_REGS_UNWIND_INFO(0)
1231
1232
1233
1234 alloc r2=ar.pfs,8,0,1,0
1235 .prologue
1236 PT_REGS_SAVES(16)
1237 adds sp=-16,sp
1238 .body
1239 cmp.eq pNonSys,pSys=r0,r0
1240 ;;
1241
1242
1243
1244
1245
1246
1247 adds r16=PT(F6)+32,sp
1248 adds r17=PT(F7)+32,sp
1249 ;;
1250 stf.spill [r16]=f6,32
1251 stf.spill [r17]=f7,32
1252 ;;
1253 stf.spill [r16]=f8,32
1254 stf.spill [r17]=f9,32
1255 ;;
1256 stf.spill [r16]=f10
1257 stf.spill [r17]=f11
1258 adds out0=16,sp
1259 br.call.sptk.many rp=ia64_rt_sigreturn
1260 .ret19: .restore sp,0
1261 adds sp=16,sp
1262 ;;
1263 ld8 r9=[sp]
1264 mov.sptk b7=r8,ia64_leave_kernel
1265 ;;
1266 mov ar.unat=r9
1267 br.many b7
1268 END(sys_rt_sigreturn)
1269
1270 GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
1271 .prologue
1272
1273
1274
1275 mov r16=r0
1276 DO_SAVE_SWITCH_STACK
1277 br.call.sptk.many rp=ia64_handle_unaligned
1278 .ret21: .body
1279 DO_LOAD_SWITCH_STACK
1280 br.cond.sptk.many rp
1281 END(ia64_prepare_handle_unaligned)
1282
1283
1284
1285
1286 # define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
1287
1288 GLOBAL_ENTRY(unw_init_running)
1289 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1290 alloc loc1=ar.pfs,2,3,3,0
1291 ;;
1292 ld8 loc2=[in0],8
1293 mov loc0=rp
1294 mov r16=loc1
1295 DO_SAVE_SWITCH_STACK
1296 .body
1297
1298 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1299 .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
1300 SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
1301 adds sp=-EXTRA_FRAME_SIZE,sp
1302 .body
1303 ;;
1304 adds out0=16,sp
1305 mov out1=r13
1306 adds out2=16+EXTRA_FRAME_SIZE,sp
1307 br.call.sptk.many rp=unw_init_frame_info
1308 1: adds out0=16,sp
1309 mov b6=loc2
1310 mov loc2=gp
1311 ;;
1312 ld8 gp=[in0]
1313 mov out1=in1
1314 br.call.sptk.many rp=b6
1315 1: mov gp=loc2
1316
1317
1318
1319
1320 .restore sp
1321 adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
1322
1323 mov ar.pfs=loc1
1324 mov rp=loc0
1325 br.ret.sptk.many rp
1326 END(unw_init_running)
1327 EXPORT_SYMBOL(unw_init_running)
1328
1329 #ifdef CONFIG_FUNCTION_TRACER
1330 #ifdef CONFIG_DYNAMIC_FTRACE
1331 GLOBAL_ENTRY(_mcount)
1332 br ftrace_stub
1333 END(_mcount)
1334 EXPORT_SYMBOL(_mcount)
1335
1336 .here:
1337 br.ret.sptk.many b0
1338
1339 GLOBAL_ENTRY(ftrace_caller)
1340 alloc out0 = ar.pfs, 8, 0, 4, 0
1341 mov out3 = r0
1342 ;;
1343 mov out2 = b0
1344 add r3 = 0x20, r3
1345 mov out1 = r1;
1346 br.call.sptk.many b0 = ftrace_patch_gp
1347
1348 ftrace_patch_gp:
1349 movl gp=__gp
1350 mov b0 = r3
1351 ;;
1352 .global ftrace_call;
1353 ftrace_call:
1354 {
1355 .mlx
1356 nop.m 0x0
1357 movl r3 = .here;;
1358 }
1359 alloc loc0 = ar.pfs, 4, 4, 2, 0
1360 ;;
1361 mov loc1 = b0
1362 mov out0 = b0
1363 mov loc2 = r8
1364 mov loc3 = r15
1365 ;;
1366 adds out0 = -MCOUNT_INSN_SIZE, out0
1367 mov out1 = in2
1368 mov b6 = r3
1369
1370 br.call.sptk.many b0 = b6
1371 ;;
1372 mov ar.pfs = loc0
1373 mov b0 = loc1
1374 mov r8 = loc2
1375 mov r15 = loc3
1376 br ftrace_stub
1377 ;;
1378 END(ftrace_caller)
1379
1380 #else
1381 GLOBAL_ENTRY(_mcount)
1382 movl r2 = ftrace_stub
1383 movl r3 = ftrace_trace_function;;
1384 ld8 r3 = [r3];;
1385 ld8 r3 = [r3];;
1386 cmp.eq p7,p0 = r2, r3
1387 (p7) br.sptk.many ftrace_stub
1388 ;;
1389
1390 alloc loc0 = ar.pfs, 4, 4, 2, 0
1391 ;;
1392 mov loc1 = b0
1393 mov out0 = b0
1394 mov loc2 = r8
1395 mov loc3 = r15
1396 ;;
1397 adds out0 = -MCOUNT_INSN_SIZE, out0
1398 mov out1 = in2
1399 mov b6 = r3
1400
1401 br.call.sptk.many b0 = b6
1402 ;;
1403 mov ar.pfs = loc0
1404 mov b0 = loc1
1405 mov r8 = loc2
1406 mov r15 = loc3
1407 br ftrace_stub
1408 ;;
1409 END(_mcount)
1410 #endif
1411
1412 GLOBAL_ENTRY(ftrace_stub)
1413 mov r3 = b0
1414 movl r2 = _mcount_ret_helper
1415 ;;
1416 mov b6 = r2
1417 mov b7 = r3
1418 br.ret.sptk.many b6
1419
1420 _mcount_ret_helper:
1421 mov b0 = r42
1422 mov r1 = r41
1423 mov ar.pfs = r40
1424 br b7
1425 END(ftrace_stub)
1426
1427 #endif
1428
1429 #define __SYSCALL(nr, entry, nargs) data8 entry
1430 .rodata
1431 .align 8
1432 .globl sys_call_table
1433 sys_call_table:
1434 #include <asm/syscall_table.h>
1435 #undef __SYSCALL