1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <linux/linkage.h>
33 #include <linux/err.h>
34 #include <asm/thread_info.h>
35 #include <asm/irqflags.h>
36 #include <asm/errno.h>
37 #include <asm/segment.h>
38 #include <asm/smp.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/irq_vectors.h>
42 #include <asm/cpufeatures.h>
43 #include <asm/alternative-asm.h>
44 #include <asm/asm.h>
45 #include <asm/smap.h>
46 #include <asm/frame.h>
47 #include <asm/nospec-branch.h>
48
49 #include "calling.h"
50
51 .section .entry.text, "ax"
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66 #ifdef CONFIG_PREEMPTION
67 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
68 #else
69 # define preempt_stop(clobbers)
70 #endif
71
72 .macro TRACE_IRQS_IRET
73 #ifdef CONFIG_TRACE_IRQFLAGS
74 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
75 jz 1f
76 TRACE_IRQS_ON
77 1:
78 #endif
79 .endm
80
81 #define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
82
83
84
85
86
87
88
89
90
91
92 #ifdef CONFIG_X86_32_LAZY_GS
93
94
95 .macro PUSH_GS
96 pushl $0
97 .endm
98 .macro POP_GS pop=0
99 addl $(4 + \pop), %esp
100 .endm
101 .macro POP_GS_EX
102 .endm
103
104
105 .macro PTGS_TO_GS
106 .endm
107 .macro PTGS_TO_GS_EX
108 .endm
109 .macro GS_TO_REG reg
110 .endm
111 .macro REG_TO_PTGS reg
112 .endm
113 .macro SET_KERNEL_GS reg
114 .endm
115
116 #else
117
118 .macro PUSH_GS
119 pushl %gs
120 .endm
121
122 .macro POP_GS pop=0
123 98: popl %gs
124 .if \pop <> 0
125 add $\pop, %esp
126 .endif
127 .endm
128 .macro POP_GS_EX
129 .pushsection .fixup, "ax"
130 99: movl $0, (%esp)
131 jmp 98b
132 .popsection
133 _ASM_EXTABLE(98b, 99b)
134 .endm
135
136 .macro PTGS_TO_GS
137 98: mov PT_GS(%esp), %gs
138 .endm
139 .macro PTGS_TO_GS_EX
140 .pushsection .fixup, "ax"
141 99: movl $0, PT_GS(%esp)
142 jmp 98b
143 .popsection
144 _ASM_EXTABLE(98b, 99b)
145 .endm
146
147 .macro GS_TO_REG reg
148 movl %gs, \reg
149 .endm
150 .macro REG_TO_PTGS reg
151 movl \reg, PT_GS(%esp)
152 .endm
153 .macro SET_KERNEL_GS reg
154 movl $(__KERNEL_STACK_CANARY), \reg
155 movl \reg, %gs
156 .endm
157
158 #endif
159
160
161 .macro SWITCH_TO_USER_CR3 scratch_reg:req
162 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
163
164 movl %cr3, \scratch_reg
165 orl $PTI_SWITCH_MASK, \scratch_reg
166 movl \scratch_reg, %cr3
167 .Lend_\@:
168 .endm
169
170 .macro BUG_IF_WRONG_CR3 no_user_check=0
171 #ifdef CONFIG_DEBUG_ENTRY
172 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
173 .if \no_user_check == 0
174
175 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
176 jz .Lend_\@
177 .endif
178
179 movl %cr3, %eax
180 testl $PTI_SWITCH_MASK, %eax
181 jnz .Lend_\@
182
183 ud2
184 .Lend_\@:
185 #endif
186 .endm
187
188
189
190
191
192 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
193 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
194 movl %cr3, \scratch_reg
195
196 testl $PTI_SWITCH_MASK, \scratch_reg
197 jz .Lend_\@
198 andl $(~PTI_SWITCH_MASK), \scratch_reg
199 movl \scratch_reg, %cr3
200
201 orl $PTI_SWITCH_MASK, \scratch_reg
202 .Lend_\@:
203 .endm
204
205 #define CS_FROM_ENTRY_STACK (1 << 31)
206 #define CS_FROM_USER_CR3 (1 << 30)
207 #define CS_FROM_KERNEL (1 << 29)
208 #define CS_FROM_ESPFIX (1 << 28)
209
210 .macro FIXUP_FRAME
211
212
213
214
215 andl $0x0000ffff, 4*4(%esp)
216
217 #ifdef CONFIG_VM86
218 testl $X86_EFLAGS_VM, 5*4(%esp)
219 jnz .Lfrom_usermode_no_fixup_\@
220 #endif
221 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
222 jnz .Lfrom_usermode_no_fixup_\@
223
224 orl $CS_FROM_KERNEL, 4*4(%esp)
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258 pushl %ss # ss
259 pushl %esp # sp (points at ss)
260 addl $7*4, (%esp) # point sp back at the previous context
261 pushl 7*4(%esp) # flags
262 pushl 7*4(%esp) # cs
263 pushl 7*4(%esp) # ip
264 pushl 7*4(%esp) # orig_eax
265 pushl 7*4(%esp) # gs / function
266 pushl 7*4(%esp) # fs
267 .Lfrom_usermode_no_fixup_\@:
268 .endm
269
270 .macro IRET_FRAME
271
272
273
274
275
276
277
278 testl $CS_FROM_KERNEL, 1*4(%esp)
279 jz .Lfinished_frame_\@
280
281
282
283
284
285
286 pushl %eax
287 pushl %ecx
288 movl 5*4(%esp), %eax # (modified) regs->sp
289
290 movl 4*4(%esp), %ecx # flags
291 movl %ecx, %ss:-1*4(%eax)
292
293 movl 3*4(%esp), %ecx # cs
294 andl $0x0000ffff, %ecx
295 movl %ecx, %ss:-2*4(%eax)
296
297 movl 2*4(%esp), %ecx # ip
298 movl %ecx, %ss:-3*4(%eax)
299
300 movl 1*4(%esp), %ecx # eax
301 movl %ecx, %ss:-4*4(%eax)
302
303 popl %ecx
304 lea -4*4(%eax), %esp
305 popl %eax
306 .Lfinished_frame_\@:
307 .endm
308
309 .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
310 cld
311 .if \skip_gs == 0
312 PUSH_GS
313 .endif
314 pushl %fs
315
316 pushl %eax
317 movl $(__KERNEL_PERCPU), %eax
318 movl %eax, %fs
319 .if \unwind_espfix > 0
320 UNWIND_ESPFIX_STACK
321 .endif
322 popl %eax
323
324 FIXUP_FRAME
325 pushl %es
326 pushl %ds
327 pushl \pt_regs_ax
328 pushl %ebp
329 pushl %edi
330 pushl %esi
331 pushl %edx
332 pushl %ecx
333 pushl %ebx
334 movl $(__USER_DS), %edx
335 movl %edx, %ds
336 movl %edx, %es
337 .if \skip_gs == 0
338 SET_KERNEL_GS %edx
339 .endif
340
341 .if \switch_stacks > 0
342 SWITCH_TO_KERNEL_STACK
343 .endif
344 .endm
345
346 .macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
347 SAVE_ALL unwind_espfix=\unwind_espfix
348
349 BUG_IF_WRONG_CR3
350
351
352
353
354
355
356
357
358 SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
359
360 .Lend_\@:
361 .endm
362
363 .macro RESTORE_INT_REGS
364 popl %ebx
365 popl %ecx
366 popl %edx
367 popl %esi
368 popl %edi
369 popl %ebp
370 popl %eax
371 .endm
372
373 .macro RESTORE_REGS pop=0
374 RESTORE_INT_REGS
375 1: popl %ds
376 2: popl %es
377 3: popl %fs
378 POP_GS \pop
379 IRET_FRAME
380 .pushsection .fixup, "ax"
381 4: movl $0, (%esp)
382 jmp 1b
383 5: movl $0, (%esp)
384 jmp 2b
385 6: movl $0, (%esp)
386 jmp 3b
387 .popsection
388 _ASM_EXTABLE(1b, 4b)
389 _ASM_EXTABLE(2b, 5b)
390 _ASM_EXTABLE(3b, 6b)
391 POP_GS_EX
392 .endm
393
394 .macro RESTORE_ALL_NMI cr3_reg:req pop=0
395
396
397
398
399
400
401 ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
402
403 testl $PTI_SWITCH_MASK, \cr3_reg
404 jz .Lswitched_\@
405
406
407 movl \cr3_reg, %cr3
408
409 .Lswitched_\@:
410
411 BUG_IF_WRONG_CR3
412
413 RESTORE_REGS pop=\pop
414 .endm
415
416 .macro CHECK_AND_APPLY_ESPFIX
417 #ifdef CONFIG_X86_ESPFIX32
418 #define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
419 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
420
421 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
422
423 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
424
425
426
427
428
429 movb PT_OLDSS(%esp), %ah
430 movb PT_CS(%esp), %al
431 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
432 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
433 jne .Lend_\@ # returning to user-space with LDT SS
434
435
436
437
438
439
440
441
442
443
444
445
446 mov %esp, %edx
447 mov PT_OLDESP(%esp), %eax
448 mov %dx, %ax
449 sub %eax, %edx
450 shr $16, %edx
451 mov %dl, GDT_ESPFIX_SS + 4
452 mov %dh, GDT_ESPFIX_SS + 7
453 pushl $__ESPFIX_SS
454 pushl %eax
455
456
457
458
459
460 DISABLE_INTERRUPTS(CLBR_ANY)
461 lss (%esp), %esp
462 .Lend_\@:
463 #endif
464 .endm
465
466
467
468
469
470
471
472
473
474
475
476
477 .macro SWITCH_TO_KERNEL_STACK
478
479 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
480
481 BUG_IF_WRONG_CR3
482
483 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
484
485
486
487
488
489
490
491 movl PER_CPU_VAR(cpu_entry_area), %ecx
492 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
493 subl %esp, %ecx
494 cmpl $SIZEOF_entry_stack, %ecx
495 jae .Lend_\@
496
497
498 movl %esp, %esi
499 movl %esi, %edi
500
501
502 andl $(MASK_entry_stack), %edi
503 addl $(SIZEOF_entry_stack), %edi
504
505
506 movl TSS_entry2task_stack(%edi), %edi
507
508
509 #ifdef CONFIG_VM86
510 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
511 movb PT_CS(%esp), %cl
512 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
513 #else
514 movl PT_CS(%esp), %ecx
515 andl $SEGMENT_RPL_MASK, %ecx
516 #endif
517 cmpl $USER_RPL, %ecx
518 jb .Lentry_from_kernel_\@
519
520
521 movl $PTREGS_SIZE, %ecx
522
523 #ifdef CONFIG_VM86
524 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
525 jz .Lcopy_pt_regs_\@
526
527
528
529
530
531 addl $(4 * 4), %ecx
532
533 #endif
534 .Lcopy_pt_regs_\@:
535
536
537 subl %ecx, %edi
538
539
540 movl %edi, %esp
541
542
543
544
545
546 shrl $2, %ecx
547 cld
548 rep movsl
549
550 jmp .Lend_\@
551
552 .Lentry_from_kernel_\@:
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583 movl %esi, %ecx
584
585
586 andl $(MASK_entry_stack), %ecx
587 addl $(SIZEOF_entry_stack), %ecx
588
589
590 sub %esi, %ecx
591
592
593 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
594
595
596
597
598
599 testl $PTI_SWITCH_MASK, %eax
600 jz .Lcopy_pt_regs_\@
601 orl $CS_FROM_USER_CR3, PT_CS(%esp)
602
603
604
605
606
607
608 jmp .Lcopy_pt_regs_\@
609
610 .Lend_\@:
611 .endm
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627 .macro SWITCH_TO_ENTRY_STACK
628
629 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
630
631
632 movl $PTREGS_SIZE, %ecx
633
634 #ifdef CONFIG_VM86
635 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
636 jz .Lcopy_pt_regs_\@
637
638
639 addl $(4 * 4), %ecx
640
641 .Lcopy_pt_regs_\@:
642 #endif
643
644
645 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
646 subl %ecx, %edi
647 movl %esp, %esi
648
649
650 movl %edi, %ebx
651
652
653 shrl $2, %ecx
654 cld
655 rep movsl
656
657
658
659
660
661
662 movl %ebx, %esp
663
664 .Lend_\@:
665 .endm
666
667
668
669
670
671
672
673
674 .macro PARANOID_EXIT_TO_KERNEL_MODE
675
676
677
678
679
680
681 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
682 jz .Lend_\@
683
684
685
686
687 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
688
689
690 movl %esp, %esi
691 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
692
693
694 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
695 subl %esi, %ecx
696
697
698 subl %ecx, %edi
699
700
701
702
703
704
705 movl %edi, %ebx
706
707
708 shrl $2, %ecx
709 cld
710 rep movsl
711
712
713 movl %ebx, %esp
714
715
716
717
718
719 testl $CS_FROM_USER_CR3, PT_CS(%esp)
720 jz .Lend_\@
721
722
723 andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
724
725 SWITCH_TO_USER_CR3 scratch_reg=%eax
726
727 .Lend_\@:
728 .endm
729
730
731
732
733 ENTRY(__switch_to_asm)
734
735
736
737
738 pushl %ebp
739 pushl %ebx
740 pushl %edi
741 pushl %esi
742 pushfl
743
744
745 movl %esp, TASK_threadsp(%eax)
746 movl TASK_threadsp(%edx), %esp
747
748 #ifdef CONFIG_STACKPROTECTOR
749 movl TASK_stack_canary(%edx), %ebx
750 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
751 #endif
752
753 #ifdef CONFIG_RETPOLINE
754
755
756
757
758
759
760
761 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
762 #endif
763
764
765 popfl
766 popl %esi
767 popl %edi
768 popl %ebx
769 popl %ebp
770
771 jmp __switch_to
772 END(__switch_to_asm)
773
774
775
776
777
778
779
780
781 ENTRY(schedule_tail_wrapper)
782 FRAME_BEGIN
783
784 pushl %eax
785 call schedule_tail
786 popl %eax
787
788 FRAME_END
789 ret
790 ENDPROC(schedule_tail_wrapper)
791
792
793
794
795
796
797
798 ENTRY(ret_from_fork)
799 call schedule_tail_wrapper
800
801 testl %ebx, %ebx
802 jnz 1f
803
804 2:
805
806 movl %esp, %eax
807 call syscall_return_slowpath
808 STACKLEAK_ERASE
809 jmp restore_all
810
811
812 1: movl %edi, %eax
813 CALL_NOSPEC %ebx
814
815
816
817
818
819 movl $0, PT_EAX(%esp)
820 jmp 2b
821 END(ret_from_fork)
822
823
824
825
826
827
828
829
830
831 ALIGN
832 ret_from_exception:
833 preempt_stop(CLBR_ANY)
834 ret_from_intr:
835 #ifdef CONFIG_VM86
836 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
837 movb PT_CS(%esp), %al
838 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
839 #else
840
841
842
843 movl PT_CS(%esp), %eax
844 andl $SEGMENT_RPL_MASK, %eax
845 #endif
846 cmpl $USER_RPL, %eax
847 jb restore_all_kernel # not returning to v8086 or userspace
848
849 ENTRY(resume_userspace)
850 DISABLE_INTERRUPTS(CLBR_ANY)
851 TRACE_IRQS_OFF
852 movl %esp, %eax
853 call prepare_exit_to_usermode
854 jmp restore_all
855 END(ret_from_exception)
856
857 GLOBAL(__begin_SYSENTER_singlestep_region)
858
859
860
861
862
863
864
865
866
867 #ifdef CONFIG_XEN_PV
868
869
870
871
872 ENTRY(xen_sysenter_target)
873 addl $5*4, %esp
874 jmp .Lsysenter_past_esp
875 #endif
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909 ENTRY(entry_SYSENTER_32)
910
911
912
913
914
915 pushfl
916 pushl %eax
917 BUG_IF_WRONG_CR3 no_user_check=1
918 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
919 popl %eax
920 popfl
921
922
923 movl TSS_entry2task_stack(%esp), %esp
924
925 .Lsysenter_past_esp:
926 pushl $__USER_DS
927 pushl %ebp
928 pushfl
929 orl $X86_EFLAGS_IF, (%esp)
930 pushl $__USER_CS
931 pushl $0
932 pushl %eax
933 SAVE_ALL pt_regs_ax=$-ENOSYS
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
955 jnz .Lsysenter_fix_flags
956 .Lsysenter_flags_fixed:
957
958
959
960
961
962 TRACE_IRQS_OFF
963
964 movl %esp, %eax
965 call do_fast_syscall_32
966
967 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
968 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
969
970 STACKLEAK_ERASE
971
972
973 TRACE_IRQS_ON
974
975
976
977
978
979
980
981 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
982 subl $(2*4), %eax
983
984
985 movl PT_EFLAGS(%esp), %edi
986 movl PT_EAX(%esp), %esi
987 movl %edi, (%eax)
988 movl %esi, 4(%eax)
989
990
991 movl PT_EIP(%esp), %edx
992 movl PT_OLDESP(%esp), %ecx
993 1: mov PT_FS(%esp), %fs
994 PTGS_TO_GS
995
996 popl %ebx
997 addl $2*4, %esp
998 popl %esi
999 popl %edi
1000 popl %ebp
1001
1002
1003 movl %eax, %esp
1004
1005
1006 SWITCH_TO_USER_CR3 scratch_reg=%eax
1007
1008
1009
1010
1011
1012
1013 btrl $X86_EFLAGS_IF_BIT, (%esp)
1014 BUG_IF_WRONG_CR3 no_user_check=1
1015 popfl
1016 popl %eax
1017
1018
1019
1020
1021
1022 sti
1023 sysexit
1024
1025 .pushsection .fixup, "ax"
1026 2: movl $0, PT_FS(%esp)
1027 jmp 1b
1028 .popsection
1029 _ASM_EXTABLE(1b, 2b)
1030 PTGS_TO_GS_EX
1031
1032 .Lsysenter_fix_flags:
1033 pushl $X86_EFLAGS_FIXED
1034 popfl
1035 jmp .Lsysenter_flags_fixed
1036 GLOBAL(__end_SYSENTER_singlestep_region)
1037 ENDPROC(entry_SYSENTER_32)
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 ENTRY(entry_INT80_32)
1068 ASM_CLAC
1069 pushl %eax
1070
1071 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1
1072
1073
1074
1075
1076
1077 TRACE_IRQS_OFF
1078
1079 movl %esp, %eax
1080 call do_int80_syscall_32
1081 .Lsyscall_32_done:
1082
1083 STACKLEAK_ERASE
1084
1085 restore_all:
1086 TRACE_IRQS_IRET
1087 SWITCH_TO_ENTRY_STACK
1088 .Lrestore_all_notrace:
1089 CHECK_AND_APPLY_ESPFIX
1090 .Lrestore_nocheck:
1091
1092 SWITCH_TO_USER_CR3 scratch_reg=%eax
1093
1094 BUG_IF_WRONG_CR3
1095
1096
1097 RESTORE_REGS pop=4 # skip orig_eax/error_code
1098 .Lirq_return:
1099
1100
1101
1102
1103
1104 INTERRUPT_RETURN
1105
1106 restore_all_kernel:
1107 #ifdef CONFIG_PREEMPTION
1108 DISABLE_INTERRUPTS(CLBR_ANY)
1109 cmpl $0, PER_CPU_VAR(__preempt_count)
1110 jnz .Lno_preempt
1111 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
1112 jz .Lno_preempt
1113 call preempt_schedule_irq
1114 .Lno_preempt:
1115 #endif
1116 TRACE_IRQS_IRET
1117 PARANOID_EXIT_TO_KERNEL_MODE
1118 BUG_IF_WRONG_CR3
1119 RESTORE_REGS 4
1120 jmp .Lirq_return
1121
1122 .section .fixup, "ax"
1123 ENTRY(iret_exc )
1124 pushl $0 # no error code
1125 pushl $do_iret_error
1126
1127 #ifdef CONFIG_DEBUG_ENTRY
1128
1129
1130
1131
1132
1133
1134 pushl %eax
1135 SWITCH_TO_USER_CR3 scratch_reg=%eax
1136 popl %eax
1137 #endif
1138
1139 jmp common_exception
1140 .previous
1141 _ASM_EXTABLE(.Lirq_return, iret_exc)
1142 ENDPROC(entry_INT80_32)
1143
1144 .macro FIXUP_ESPFIX_STACK
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 #ifdef CONFIG_X86_ESPFIX32
1157
1158 pushl %ecx
1159 subl $2*4, %esp
1160 sgdt (%esp)
1161 movl 2(%esp), %ecx
1162
1163
1164
1165
1166 mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al
1167 mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah
1168 shl $16, %eax
1169 addl $2*4, %esp
1170 popl %ecx
1171 addl %esp, %eax
1172 pushl $__KERNEL_DS
1173 pushl %eax
1174 lss (%esp), %esp
1175 #endif
1176 .endm
1177
1178 .macro UNWIND_ESPFIX_STACK
1179
1180 #ifdef CONFIG_X86_ESPFIX32
1181 movl %ss, %eax
1182
1183 cmpw $__ESPFIX_SS, %ax
1184 jne .Lno_fixup_\@
1185
1186 FIXUP_ESPFIX_STACK
1187 .Lno_fixup_\@:
1188 #endif
1189 .endm
1190
1191
1192
1193
1194
1195 .align 8
1196 ENTRY(irq_entries_start)
1197 vector=FIRST_EXTERNAL_VECTOR
1198 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
1199 pushl $(~vector+0x80)
1200 vector=vector+1
1201 jmp common_interrupt
1202 .align 8
1203 .endr
1204 END(irq_entries_start)
1205
1206 #ifdef CONFIG_X86_LOCAL_APIC
1207 .align 8
1208 ENTRY(spurious_entries_start)
1209 vector=FIRST_SYSTEM_VECTOR
1210 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
1211 pushl $(~vector+0x80)
1212 vector=vector+1
1213 jmp common_spurious
1214 .align 8
1215 .endr
1216 END(spurious_entries_start)
1217
1218 common_spurious:
1219 ASM_CLAC
1220 addl $-0x80, (%esp)
1221 SAVE_ALL switch_stacks=1
1222 ENCODE_FRAME_POINTER
1223 TRACE_IRQS_OFF
1224 movl %esp, %eax
1225 call smp_spurious_interrupt
1226 jmp ret_from_intr
1227 ENDPROC(common_spurious)
1228 #endif
1229
1230
1231
1232
1233
1234 .p2align CONFIG_X86_L1_CACHE_SHIFT
1235 common_interrupt:
1236 ASM_CLAC
1237 addl $-0x80, (%esp)
1238
1239 SAVE_ALL switch_stacks=1
1240 ENCODE_FRAME_POINTER
1241 TRACE_IRQS_OFF
1242 movl %esp, %eax
1243 call do_IRQ
1244 jmp ret_from_intr
1245 ENDPROC(common_interrupt)
1246
1247 #define BUILD_INTERRUPT3(name, nr, fn) \
1248 ENTRY(name) \
1249 ASM_CLAC; \
1250 pushl $~(nr); \
1251 SAVE_ALL switch_stacks=1; \
1252 ENCODE_FRAME_POINTER; \
1253 TRACE_IRQS_OFF \
1254 movl %esp, %eax; \
1255 call fn; \
1256 jmp ret_from_intr; \
1257 ENDPROC(name)
1258
1259 #define BUILD_INTERRUPT(name, nr) \
1260 BUILD_INTERRUPT3(name, nr, smp_##name); \
1261
1262
1263 #include <asm/entry_arch.h>
1264
1265 ENTRY(coprocessor_error)
1266 ASM_CLAC
1267 pushl $0
1268 pushl $do_coprocessor_error
1269 jmp common_exception
1270 END(coprocessor_error)
1271
1272 ENTRY(simd_coprocessor_error)
1273 ASM_CLAC
1274 pushl $0
1275 #ifdef CONFIG_X86_INVD_BUG
1276
1277 ALTERNATIVE "pushl $do_general_protection", \
1278 "pushl $do_simd_coprocessor_error", \
1279 X86_FEATURE_XMM
1280 #else
1281 pushl $do_simd_coprocessor_error
1282 #endif
1283 jmp common_exception
1284 END(simd_coprocessor_error)
1285
1286 ENTRY(device_not_available)
1287 ASM_CLAC
1288 pushl $-1 # mark this as an int
1289 pushl $do_device_not_available
1290 jmp common_exception
1291 END(device_not_available)
1292
1293 #ifdef CONFIG_PARAVIRT
1294 ENTRY(native_iret)
1295 iret
1296 _ASM_EXTABLE(native_iret, iret_exc)
1297 END(native_iret)
1298 #endif
1299
1300 ENTRY(overflow)
1301 ASM_CLAC
1302 pushl $0
1303 pushl $do_overflow
1304 jmp common_exception
1305 END(overflow)
1306
1307 ENTRY(bounds)
1308 ASM_CLAC
1309 pushl $0
1310 pushl $do_bounds
1311 jmp common_exception
1312 END(bounds)
1313
1314 ENTRY(invalid_op)
1315 ASM_CLAC
1316 pushl $0
1317 pushl $do_invalid_op
1318 jmp common_exception
1319 END(invalid_op)
1320
1321 ENTRY(coprocessor_segment_overrun)
1322 ASM_CLAC
1323 pushl $0
1324 pushl $do_coprocessor_segment_overrun
1325 jmp common_exception
1326 END(coprocessor_segment_overrun)
1327
1328 ENTRY(invalid_TSS)
1329 ASM_CLAC
1330 pushl $do_invalid_TSS
1331 jmp common_exception
1332 END(invalid_TSS)
1333
1334 ENTRY(segment_not_present)
1335 ASM_CLAC
1336 pushl $do_segment_not_present
1337 jmp common_exception
1338 END(segment_not_present)
1339
1340 ENTRY(stack_segment)
1341 ASM_CLAC
1342 pushl $do_stack_segment
1343 jmp common_exception
1344 END(stack_segment)
1345
1346 ENTRY(alignment_check)
1347 ASM_CLAC
1348 pushl $do_alignment_check
1349 jmp common_exception
1350 END(alignment_check)
1351
1352 ENTRY(divide_error)
1353 ASM_CLAC
1354 pushl $0 # no error code
1355 pushl $do_divide_error
1356 jmp common_exception
1357 END(divide_error)
1358
1359 #ifdef CONFIG_X86_MCE
1360 ENTRY(machine_check)
1361 ASM_CLAC
1362 pushl $0
1363 pushl machine_check_vector
1364 jmp common_exception
1365 END(machine_check)
1366 #endif
1367
1368 ENTRY(spurious_interrupt_bug)
1369 ASM_CLAC
1370 pushl $0
1371 pushl $do_spurious_interrupt_bug
1372 jmp common_exception
1373 END(spurious_interrupt_bug)
1374
1375 #ifdef CONFIG_XEN_PV
1376 ENTRY(xen_hypervisor_callback)
1377
1378
1379
1380
1381
1382
1383
1384 cmpl $xen_iret_start_crit, (%esp)
1385 jb 1f
1386 cmpl $xen_iret_end_crit, (%esp)
1387 jae 1f
1388 call xen_iret_crit_fixup
1389 1:
1390 pushl $-1
1391 SAVE_ALL
1392 ENCODE_FRAME_POINTER
1393 TRACE_IRQS_OFF
1394 mov %esp, %eax
1395 call xen_evtchn_do_upcall
1396 #ifndef CONFIG_PREEMPTION
1397 call xen_maybe_preempt_hcall
1398 #endif
1399 jmp ret_from_intr
1400 ENDPROC(xen_hypervisor_callback)
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 ENTRY(xen_failsafe_callback)
1415 pushl %eax
1416 movl $1, %eax
1417 1: mov 4(%esp), %ds
1418 2: mov 8(%esp), %es
1419 3: mov 12(%esp), %fs
1420 4: mov 16(%esp), %gs
1421
1422
1423 testl %eax, %eax
1424 popl %eax
1425 lea 16(%esp), %esp
1426 jz 5f
1427 jmp iret_exc
1428 5: pushl $-1
1429 SAVE_ALL
1430 ENCODE_FRAME_POINTER
1431 jmp ret_from_exception
1432
1433 .section .fixup, "ax"
1434 6: xorl %eax, %eax
1435 movl %eax, 4(%esp)
1436 jmp 1b
1437 7: xorl %eax, %eax
1438 movl %eax, 8(%esp)
1439 jmp 2b
1440 8: xorl %eax, %eax
1441 movl %eax, 12(%esp)
1442 jmp 3b
1443 9: xorl %eax, %eax
1444 movl %eax, 16(%esp)
1445 jmp 4b
1446 .previous
1447 _ASM_EXTABLE(1b, 6b)
1448 _ASM_EXTABLE(2b, 7b)
1449 _ASM_EXTABLE(3b, 8b)
1450 _ASM_EXTABLE(4b, 9b)
1451 ENDPROC(xen_failsafe_callback)
1452 #endif
1453
1454 #ifdef CONFIG_XEN_PVHVM
1455 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1456 xen_evtchn_do_upcall)
1457 #endif
1458
1459
1460 #if IS_ENABLED(CONFIG_HYPERV)
1461
1462 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1463 hyperv_vector_handler)
1464
1465 BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
1466 hyperv_reenlightenment_intr)
1467
1468 BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
1469 hv_stimer0_vector_handler)
1470
1471 #endif
1472
1473 ENTRY(page_fault)
1474 ASM_CLAC
1475 pushl $do_page_fault
1476 jmp common_exception_read_cr2
1477 END(page_fault)
1478
1479 common_exception_read_cr2:
1480
1481 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1482
1483 ENCODE_FRAME_POINTER
1484
1485
1486 GS_TO_REG %ecx
1487 movl PT_GS(%esp), %edi
1488 REG_TO_PTGS %ecx
1489 SET_KERNEL_GS %ecx
1490
1491 GET_CR2_INTO(%ecx) # might clobber %eax
1492
1493
1494 movl PT_ORIG_EAX(%esp), %edx # get the error code
1495 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1496
1497 TRACE_IRQS_OFF
1498 movl %esp, %eax # pt_regs pointer
1499 CALL_NOSPEC %edi
1500 jmp ret_from_exception
1501 END(common_exception_read_cr2)
1502
1503 common_exception:
1504
1505 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1506 ENCODE_FRAME_POINTER
1507
1508
1509 GS_TO_REG %ecx
1510 movl PT_GS(%esp), %edi # get the function address
1511 REG_TO_PTGS %ecx
1512 SET_KERNEL_GS %ecx
1513
1514
1515 movl PT_ORIG_EAX(%esp), %edx # get the error code
1516 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1517
1518 TRACE_IRQS_OFF
1519 movl %esp, %eax # pt_regs pointer
1520 CALL_NOSPEC %edi
1521 jmp ret_from_exception
1522 END(common_exception)
1523
1524 ENTRY(debug)
1525
1526
1527
1528 ASM_CLAC
1529 pushl $-1 # mark this as an int
1530 pushl $do_debug
1531 jmp common_exception
1532 END(debug)
1533
1534
1535
1536
1537
1538
1539
1540
1541 ENTRY(nmi)
1542 ASM_CLAC
1543
1544 #ifdef CONFIG_X86_ESPFIX32
1545
1546
1547
1548
1549 pushl %eax
1550 movl %ss, %eax
1551 cmpw $__ESPFIX_SS, %ax
1552 popl %eax
1553 je .Lnmi_espfix_stack
1554 #endif
1555
1556 pushl %eax # pt_regs->orig_ax
1557 SAVE_ALL_NMI cr3_reg=%edi
1558 ENCODE_FRAME_POINTER
1559 xorl %edx, %edx # zero error code
1560 movl %esp, %eax # pt_regs pointer
1561
1562
1563 movl PER_CPU_VAR(cpu_entry_area), %ecx
1564 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1565 subl %eax, %ecx
1566 cmpl $SIZEOF_entry_stack, %ecx
1567 jb .Lnmi_from_sysenter_stack
1568
1569
1570 call do_nmi
1571 jmp .Lnmi_return
1572
1573 .Lnmi_from_sysenter_stack:
1574
1575
1576
1577
1578 movl %esp, %ebx
1579 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1580 call do_nmi
1581 movl %ebx, %esp
1582
1583 .Lnmi_return:
1584 #ifdef CONFIG_X86_ESPFIX32
1585 testl $CS_FROM_ESPFIX, PT_CS(%esp)
1586 jnz .Lnmi_from_espfix
1587 #endif
1588
1589 CHECK_AND_APPLY_ESPFIX
1590 RESTORE_ALL_NMI cr3_reg=%edi pop=4
1591 jmp .Lirq_return
1592
1593 #ifdef CONFIG_X86_ESPFIX32
1594 .Lnmi_espfix_stack:
1595
1596
1597
1598 pushl %ss
1599 pushl %esp
1600 addl $4, (%esp)
1601
1602
1603 pushl 4*4(%esp) # flags
1604 pushl 4*4(%esp) # cs
1605 pushl 4*4(%esp) # ip
1606
1607 pushl %eax # orig_ax
1608
1609 SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
1610 ENCODE_FRAME_POINTER
1611
1612
1613 xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
1614
1615 xorl %edx, %edx # zero error code
1616 movl %esp, %eax # pt_regs pointer
1617 jmp .Lnmi_from_sysenter_stack
1618
1619 .Lnmi_from_espfix:
1620 RESTORE_ALL_NMI cr3_reg=%edi
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631 lss (1+5+6)*4(%esp), %esp # back to espfix stack
1632 jmp .Lirq_return
1633 #endif
1634 END(nmi)
1635
1636 ENTRY(int3)
1637 ASM_CLAC
1638 pushl $-1 # mark this as an int
1639
1640 SAVE_ALL switch_stacks=1
1641 ENCODE_FRAME_POINTER
1642 TRACE_IRQS_OFF
1643 xorl %edx, %edx # zero error code
1644 movl %esp, %eax # pt_regs pointer
1645 call do_int3
1646 jmp ret_from_exception
1647 END(int3)
1648
1649 ENTRY(general_protection)
1650 ASM_CLAC
1651 pushl $do_general_protection
1652 jmp common_exception
1653 END(general_protection)
1654
1655 #ifdef CONFIG_KVM_GUEST
1656 ENTRY(async_page_fault)
1657 ASM_CLAC
1658 pushl $do_async_page_fault
1659 jmp common_exception_read_cr2
1660 END(async_page_fault)
1661 #endif
1662
1663 ENTRY(rewind_stack_do_exit)
1664
1665 xorl %ebp, %ebp
1666
1667 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1668 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1669
1670 call do_exit
1671 1: jmp 1b
1672 END(rewind_stack_do_exit)