1
2
3
4
5
6
7 #include <linux/linkage.h>
8
9 #include <asm/alternative.h>
10 #include <asm/asm-offsets.h>
11 #include <asm/assembler.h>
12 #include <asm/fpsimdmacros.h>
13 #include <asm/kvm.h>
14 #include <asm/kvm_arm.h>
15 #include <asm/kvm_asm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_ptrauth.h>
18
19 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
20 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
21
22 .text
23 .pushsection .hyp.text, "ax"
24
25 .macro save_callee_saved_regs ctxt
26 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
27 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
28 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
29 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
30 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
31 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
32 .endm
33
34 .macro restore_callee_saved_regs ctxt
35 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
36 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
37 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
38 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
39 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
40 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
41 .endm
42
43
44
45
46
47 ENTRY(__guest_enter)
48
49
50
51
52
53
54 save_callee_saved_regs x1
55
56
57
58
59
60 alternative_if ARM64_HAS_RAS_EXTN
61 dsb nshst
62 isb
63 alternative_else_nop_endif
64 mrs x1, isr_el1
65 cbz x1, 1f
66 mov x0, #ARM_EXCEPTION_IRQ
67 ret
68
69 1:
70 add x18, x0, #VCPU_CONTEXT
71
72
73
74
75
76
77 ptrauth_switch_to_guest x18, x0, x1, x2
78
79
80 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
81 ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
82 ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
83 ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
84 ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
85 ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
86 ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
87 ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
88 ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
89
90
91 restore_callee_saved_regs x18
92
93
94 ldr x18, [x18, #CPU_XREG_OFFSET(18)]
95
96
97 eret
98 sb
99 ENDPROC(__guest_enter)
100
101 ENTRY(__guest_exit)
102
103
104
105
106
107 add x1, x1, #VCPU_CONTEXT
108
109 ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
110
111
112 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
113
114
115 ldp x2, x3, [sp], #16
116
117
118 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
119 stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
120 stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
121 stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
122 stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
123 stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
124 stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
125 stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
126 str x18, [x1, #CPU_XREG_OFFSET(18)]
127
128
129 save_callee_saved_regs x1
130
131 get_host_ctxt x2, x3
132
133
134
135
136
137
138 ptrauth_switch_to_host x1, x2, x3, x4, x5
139
140
141 restore_callee_saved_regs x2
142
143 alternative_if ARM64_HAS_RAS_EXTN
144
145
146
147 mrs_s x2, SYS_DISR_EL1
148 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
149 cbz x2, 1f
150 msr_s SYS_DISR_EL1, xzr
151 orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
152 1: ret
153 alternative_else
154 dsb sy
155 isb
156 mrs x2, isr_el1
157 tbnz x2, #8, 2f
158 ret
159 nop
160 2:
161 alternative_endif
162
163
164
165
166 mrs x2, elr_el2
167 mrs x3, esr_el2
168 mrs x4, spsr_el2
169 mov x5, x0
170
171 msr daifclr, #4
172
173
174
175
176 .global abort_guest_exit_start
177 abort_guest_exit_start:
178
179 isb
180
181 .global abort_guest_exit_end
182 abort_guest_exit_end:
183
184 msr daifset, #4
185
186
187
188
189 tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
190 msr elr_el2, x2
191 msr esr_el2, x3
192 msr spsr_el2, x4
193 orr x0, x0, x5
194 1: ret
195 ENDPROC(__guest_exit)