1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/asm-offsets.h>
21#include <asm/assembler.h>
22#include <asm/debug-monitors.h>
23#include <asm/esr.h>
24#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_asm.h>
28#include <asm/kvm_mmu.h>
29#include <asm/memory.h>
30
31#define CPU_GP_REG_OFFSET(x)	(CPU_GP_REGS + x)
32#define CPU_XREG_OFFSET(x)	CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
33#define CPU_SPSR_OFFSET(x)	CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
34#define CPU_SYSREG_OFFSET(x)	(CPU_SYSREGS + 8*x)
35
36	.text
37	.pushsection	.hyp.text, "ax"
38	.align	PAGE_SHIFT
39
40.macro save_common_regs
41	// x2: base address for cpu context
42	// x3: tmp register
43
44	add	x3, x2, #CPU_XREG_OFFSET(19)
45	stp	x19, x20, [x3]
46	stp	x21, x22, [x3, #16]
47	stp	x23, x24, [x3, #32]
48	stp	x25, x26, [x3, #48]
49	stp	x27, x28, [x3, #64]
50	stp	x29, lr, [x3, #80]
51
52	mrs	x19, sp_el0
53	mrs	x20, elr_el2		// EL1 PC
54	mrs	x21, spsr_el2		// EL1 pstate
55
56	stp	x19, x20, [x3, #96]
57	str	x21, [x3, #112]
58
59	mrs	x22, sp_el1
60	mrs	x23, elr_el1
61	mrs	x24, spsr_el1
62
63	str	x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
64	str	x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
65	str	x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
66.endm
67
68.macro restore_common_regs
69	// x2: base address for cpu context
70	// x3: tmp register
71
72	ldr	x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
73	ldr	x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
74	ldr	x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
75
76	msr	sp_el1, x22
77	msr	elr_el1, x23
78	msr	spsr_el1, x24
79
80	add	x3, x2, #CPU_XREG_OFFSET(31)    // SP_EL0
81	ldp	x19, x20, [x3]
82	ldr	x21, [x3, #16]
83
84	msr	sp_el0, x19
85	msr	elr_el2, x20 				// EL1 PC
86	msr	spsr_el2, x21 				// EL1 pstate
87
88	add	x3, x2, #CPU_XREG_OFFSET(19)
89	ldp	x19, x20, [x3]
90	ldp	x21, x22, [x3, #16]
91	ldp	x23, x24, [x3, #32]
92	ldp	x25, x26, [x3, #48]
93	ldp	x27, x28, [x3, #64]
94	ldp	x29, lr, [x3, #80]
95.endm
96
97.macro save_host_regs
98	save_common_regs
99.endm
100
101.macro restore_host_regs
102	restore_common_regs
103.endm
104
105.macro save_fpsimd
106	// x2: cpu context address
107	// x3, x4: tmp regs
108	add	x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
109	fpsimd_save x3, 4
110.endm
111
112.macro restore_fpsimd
113	// x2: cpu context address
114	// x3, x4: tmp regs
115	add	x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
116	fpsimd_restore x3, 4
117.endm
118
119.macro save_guest_regs
120	// x0 is the vcpu address
121	// x1 is the return code, do not corrupt!
122	// x2 is the cpu context
123	// x3 is a tmp register
124	// Guest's x0-x3 are on the stack
125
126	// Compute base to save registers
127	add	x3, x2, #CPU_XREG_OFFSET(4)
128	stp	x4, x5, [x3]
129	stp	x6, x7, [x3, #16]
130	stp	x8, x9, [x3, #32]
131	stp	x10, x11, [x3, #48]
132	stp	x12, x13, [x3, #64]
133	stp	x14, x15, [x3, #80]
134	stp	x16, x17, [x3, #96]
135	str	x18, [x3, #112]
136
137	pop	x6, x7			// x2, x3
138	pop	x4, x5			// x0, x1
139
140	add	x3, x2, #CPU_XREG_OFFSET(0)
141	stp	x4, x5, [x3]
142	stp	x6, x7, [x3, #16]
143
144	save_common_regs
145.endm
146
147.macro restore_guest_regs
148	// x0 is the vcpu address.
149	// x2 is the cpu context
150	// x3 is a tmp register
151
152	// Prepare x0-x3 for later restore
153	add	x3, x2, #CPU_XREG_OFFSET(0)
154	ldp	x4, x5, [x3]
155	ldp	x6, x7, [x3, #16]
156	push	x4, x5		// Push x0-x3 on the stack
157	push	x6, x7
158
159	// x4-x18
160	ldp	x4, x5, [x3, #32]
161	ldp	x6, x7, [x3, #48]
162	ldp	x8, x9, [x3, #64]
163	ldp	x10, x11, [x3, #80]
164	ldp	x12, x13, [x3, #96]
165	ldp	x14, x15, [x3, #112]
166	ldp	x16, x17, [x3, #128]
167	ldr	x18, [x3, #144]
168
169	// x19-x29, lr, sp*, elr*, spsr*
170	restore_common_regs
171
172	// Last bits of the 64bit state
173	pop	x2, x3
174	pop	x0, x1
175
176	// Do not touch any register after this!
177.endm
178
179/*
180 * Macros to perform system register save/restore.
181 *
182 * Ordering here is absolutely critical, and must be kept consistent
183 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
184 * and in kvm_asm.h.
185 *
186 * In other words, don't touch any of these unless you know what
187 * you are doing.
188 */
189.macro save_sysregs
190	// x2: base address for cpu context
191	// x3: tmp register
192
193	add	x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
194
195	mrs	x4,	vmpidr_el2
196	mrs	x5,	csselr_el1
197	mrs	x6,	sctlr_el1
198	mrs	x7,	actlr_el1
199	mrs	x8,	cpacr_el1
200	mrs	x9,	ttbr0_el1
201	mrs	x10,	ttbr1_el1
202	mrs	x11,	tcr_el1
203	mrs	x12,	esr_el1
204	mrs	x13, 	afsr0_el1
205	mrs	x14,	afsr1_el1
206	mrs	x15,	far_el1
207	mrs	x16,	mair_el1
208	mrs	x17,	vbar_el1
209	mrs	x18,	contextidr_el1
210	mrs	x19,	tpidr_el0
211	mrs	x20,	tpidrro_el0
212	mrs	x21,	tpidr_el1
213	mrs	x22, 	amair_el1
214	mrs	x23, 	cntkctl_el1
215	mrs	x24,	par_el1
216	mrs	x25,	mdscr_el1
217
218	stp	x4, x5, [x3]
219	stp	x6, x7, [x3, #16]
220	stp	x8, x9, [x3, #32]
221	stp	x10, x11, [x3, #48]
222	stp	x12, x13, [x3, #64]
223	stp	x14, x15, [x3, #80]
224	stp	x16, x17, [x3, #96]
225	stp	x18, x19, [x3, #112]
226	stp	x20, x21, [x3, #128]
227	stp	x22, x23, [x3, #144]
228	stp	x24, x25, [x3, #160]
229.endm
230
231.macro save_debug
232	// x2: base address for cpu context
233	// x3: tmp register
234
235	mrs	x26, id_aa64dfr0_el1
236	ubfx	x24, x26, #12, #4	// Extract BRPs
237	ubfx	x25, x26, #20, #4	// Extract WRPs
238	mov	w26, #15
239	sub	w24, w26, w24		// How many BPs to skip
240	sub	w25, w26, w25		// How many WPs to skip
241
242	add	x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
243
244	adr	x26, 1f
245	add	x26, x26, x24, lsl #2
246	br	x26
2471:
248	mrs	x20, dbgbcr15_el1
249	mrs	x19, dbgbcr14_el1
250	mrs	x18, dbgbcr13_el1
251	mrs	x17, dbgbcr12_el1
252	mrs	x16, dbgbcr11_el1
253	mrs	x15, dbgbcr10_el1
254	mrs	x14, dbgbcr9_el1
255	mrs	x13, dbgbcr8_el1
256	mrs	x12, dbgbcr7_el1
257	mrs	x11, dbgbcr6_el1
258	mrs	x10, dbgbcr5_el1
259	mrs	x9, dbgbcr4_el1
260	mrs	x8, dbgbcr3_el1
261	mrs	x7, dbgbcr2_el1
262	mrs	x6, dbgbcr1_el1
263	mrs	x5, dbgbcr0_el1
264
265	adr	x26, 1f
266	add	x26, x26, x24, lsl #2
267	br	x26
268
2691:
270	str	x20, [x3, #(15 * 8)]
271	str	x19, [x3, #(14 * 8)]
272	str	x18, [x3, #(13 * 8)]
273	str	x17, [x3, #(12 * 8)]
274	str	x16, [x3, #(11 * 8)]
275	str	x15, [x3, #(10 * 8)]
276	str	x14, [x3, #(9 * 8)]
277	str	x13, [x3, #(8 * 8)]
278	str	x12, [x3, #(7 * 8)]
279	str	x11, [x3, #(6 * 8)]
280	str	x10, [x3, #(5 * 8)]
281	str	x9, [x3, #(4 * 8)]
282	str	x8, [x3, #(3 * 8)]
283	str	x7, [x3, #(2 * 8)]
284	str	x6, [x3, #(1 * 8)]
285	str	x5, [x3, #(0 * 8)]
286
287	add	x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
288
289	adr	x26, 1f
290	add	x26, x26, x24, lsl #2
291	br	x26
2921:
293	mrs	x20, dbgbvr15_el1
294	mrs	x19, dbgbvr14_el1
295	mrs	x18, dbgbvr13_el1
296	mrs	x17, dbgbvr12_el1
297	mrs	x16, dbgbvr11_el1
298	mrs	x15, dbgbvr10_el1
299	mrs	x14, dbgbvr9_el1
300	mrs	x13, dbgbvr8_el1
301	mrs	x12, dbgbvr7_el1
302	mrs	x11, dbgbvr6_el1
303	mrs	x10, dbgbvr5_el1
304	mrs	x9, dbgbvr4_el1
305	mrs	x8, dbgbvr3_el1
306	mrs	x7, dbgbvr2_el1
307	mrs	x6, dbgbvr1_el1
308	mrs	x5, dbgbvr0_el1
309
310	adr	x26, 1f
311	add	x26, x26, x24, lsl #2
312	br	x26
313
3141:
315	str	x20, [x3, #(15 * 8)]
316	str	x19, [x3, #(14 * 8)]
317	str	x18, [x3, #(13 * 8)]
318	str	x17, [x3, #(12 * 8)]
319	str	x16, [x3, #(11 * 8)]
320	str	x15, [x3, #(10 * 8)]
321	str	x14, [x3, #(9 * 8)]
322	str	x13, [x3, #(8 * 8)]
323	str	x12, [x3, #(7 * 8)]
324	str	x11, [x3, #(6 * 8)]
325	str	x10, [x3, #(5 * 8)]
326	str	x9, [x3, #(4 * 8)]
327	str	x8, [x3, #(3 * 8)]
328	str	x7, [x3, #(2 * 8)]
329	str	x6, [x3, #(1 * 8)]
330	str	x5, [x3, #(0 * 8)]
331
332	add	x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
333
334	adr	x26, 1f
335	add	x26, x26, x25, lsl #2
336	br	x26
3371:
338	mrs	x20, dbgwcr15_el1
339	mrs	x19, dbgwcr14_el1
340	mrs	x18, dbgwcr13_el1
341	mrs	x17, dbgwcr12_el1
342	mrs	x16, dbgwcr11_el1
343	mrs	x15, dbgwcr10_el1
344	mrs	x14, dbgwcr9_el1
345	mrs	x13, dbgwcr8_el1
346	mrs	x12, dbgwcr7_el1
347	mrs	x11, dbgwcr6_el1
348	mrs	x10, dbgwcr5_el1
349	mrs	x9, dbgwcr4_el1
350	mrs	x8, dbgwcr3_el1
351	mrs	x7, dbgwcr2_el1
352	mrs	x6, dbgwcr1_el1
353	mrs	x5, dbgwcr0_el1
354
355	adr	x26, 1f
356	add	x26, x26, x25, lsl #2
357	br	x26
358
3591:
360	str	x20, [x3, #(15 * 8)]
361	str	x19, [x3, #(14 * 8)]
362	str	x18, [x3, #(13 * 8)]
363	str	x17, [x3, #(12 * 8)]
364	str	x16, [x3, #(11 * 8)]
365	str	x15, [x3, #(10 * 8)]
366	str	x14, [x3, #(9 * 8)]
367	str	x13, [x3, #(8 * 8)]
368	str	x12, [x3, #(7 * 8)]
369	str	x11, [x3, #(6 * 8)]
370	str	x10, [x3, #(5 * 8)]
371	str	x9, [x3, #(4 * 8)]
372	str	x8, [x3, #(3 * 8)]
373	str	x7, [x3, #(2 * 8)]
374	str	x6, [x3, #(1 * 8)]
375	str	x5, [x3, #(0 * 8)]
376
377	add	x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
378
379	adr	x26, 1f
380	add	x26, x26, x25, lsl #2
381	br	x26
3821:
383	mrs	x20, dbgwvr15_el1
384	mrs	x19, dbgwvr14_el1
385	mrs	x18, dbgwvr13_el1
386	mrs	x17, dbgwvr12_el1
387	mrs	x16, dbgwvr11_el1
388	mrs	x15, dbgwvr10_el1
389	mrs	x14, dbgwvr9_el1
390	mrs	x13, dbgwvr8_el1
391	mrs	x12, dbgwvr7_el1
392	mrs	x11, dbgwvr6_el1
393	mrs	x10, dbgwvr5_el1
394	mrs	x9, dbgwvr4_el1
395	mrs	x8, dbgwvr3_el1
396	mrs	x7, dbgwvr2_el1
397	mrs	x6, dbgwvr1_el1
398	mrs	x5, dbgwvr0_el1
399
400	adr	x26, 1f
401	add	x26, x26, x25, lsl #2
402	br	x26
403
4041:
405	str	x20, [x3, #(15 * 8)]
406	str	x19, [x3, #(14 * 8)]
407	str	x18, [x3, #(13 * 8)]
408	str	x17, [x3, #(12 * 8)]
409	str	x16, [x3, #(11 * 8)]
410	str	x15, [x3, #(10 * 8)]
411	str	x14, [x3, #(9 * 8)]
412	str	x13, [x3, #(8 * 8)]
413	str	x12, [x3, #(7 * 8)]
414	str	x11, [x3, #(6 * 8)]
415	str	x10, [x3, #(5 * 8)]
416	str	x9, [x3, #(4 * 8)]
417	str	x8, [x3, #(3 * 8)]
418	str	x7, [x3, #(2 * 8)]
419	str	x6, [x3, #(1 * 8)]
420	str	x5, [x3, #(0 * 8)]
421
422	mrs	x21, mdccint_el1
423	str	x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
424.endm
425
426.macro restore_sysregs
427	// x2: base address for cpu context
428	// x3: tmp register
429
430	add	x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
431
432	ldp	x4, x5, [x3]
433	ldp	x6, x7, [x3, #16]
434	ldp	x8, x9, [x3, #32]
435	ldp	x10, x11, [x3, #48]
436	ldp	x12, x13, [x3, #64]
437	ldp	x14, x15, [x3, #80]
438	ldp	x16, x17, [x3, #96]
439	ldp	x18, x19, [x3, #112]
440	ldp	x20, x21, [x3, #128]
441	ldp	x22, x23, [x3, #144]
442	ldp	x24, x25, [x3, #160]
443
444	msr	vmpidr_el2,	x4
445	msr	csselr_el1,	x5
446	msr	sctlr_el1,	x6
447	msr	actlr_el1,	x7
448	msr	cpacr_el1,	x8
449	msr	ttbr0_el1,	x9
450	msr	ttbr1_el1,	x10
451	msr	tcr_el1,	x11
452	msr	esr_el1,	x12
453	msr	afsr0_el1,	x13
454	msr	afsr1_el1,	x14
455	msr	far_el1,	x15
456	msr	mair_el1,	x16
457	msr	vbar_el1,	x17
458	msr	contextidr_el1,	x18
459	msr	tpidr_el0,	x19
460	msr	tpidrro_el0,	x20
461	msr	tpidr_el1,	x21
462	msr	amair_el1,	x22
463	msr	cntkctl_el1,	x23
464	msr	par_el1,	x24
465	msr	mdscr_el1,	x25
466.endm
467
468.macro restore_debug
469	// x2: base address for cpu context
470	// x3: tmp register
471
472	mrs	x26, id_aa64dfr0_el1
473	ubfx	x24, x26, #12, #4	// Extract BRPs
474	ubfx	x25, x26, #20, #4	// Extract WRPs
475	mov	w26, #15
476	sub	w24, w26, w24		// How many BPs to skip
477	sub	w25, w26, w25		// How many WPs to skip
478
479	add	x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
480
481	adr	x26, 1f
482	add	x26, x26, x24, lsl #2
483	br	x26
4841:
485	ldr	x20, [x3, #(15 * 8)]
486	ldr	x19, [x3, #(14 * 8)]
487	ldr	x18, [x3, #(13 * 8)]
488	ldr	x17, [x3, #(12 * 8)]
489	ldr	x16, [x3, #(11 * 8)]
490	ldr	x15, [x3, #(10 * 8)]
491	ldr	x14, [x3, #(9 * 8)]
492	ldr	x13, [x3, #(8 * 8)]
493	ldr	x12, [x3, #(7 * 8)]
494	ldr	x11, [x3, #(6 * 8)]
495	ldr	x10, [x3, #(5 * 8)]
496	ldr	x9, [x3, #(4 * 8)]
497	ldr	x8, [x3, #(3 * 8)]
498	ldr	x7, [x3, #(2 * 8)]
499	ldr	x6, [x3, #(1 * 8)]
500	ldr	x5, [x3, #(0 * 8)]
501
502	adr	x26, 1f
503	add	x26, x26, x24, lsl #2
504	br	x26
5051:
506	msr	dbgbcr15_el1, x20
507	msr	dbgbcr14_el1, x19
508	msr	dbgbcr13_el1, x18
509	msr	dbgbcr12_el1, x17
510	msr	dbgbcr11_el1, x16
511	msr	dbgbcr10_el1, x15
512	msr	dbgbcr9_el1, x14
513	msr	dbgbcr8_el1, x13
514	msr	dbgbcr7_el1, x12
515	msr	dbgbcr6_el1, x11
516	msr	dbgbcr5_el1, x10
517	msr	dbgbcr4_el1, x9
518	msr	dbgbcr3_el1, x8
519	msr	dbgbcr2_el1, x7
520	msr	dbgbcr1_el1, x6
521	msr	dbgbcr0_el1, x5
522
523	add	x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
524
525	adr	x26, 1f
526	add	x26, x26, x24, lsl #2
527	br	x26
5281:
529	ldr	x20, [x3, #(15 * 8)]
530	ldr	x19, [x3, #(14 * 8)]
531	ldr	x18, [x3, #(13 * 8)]
532	ldr	x17, [x3, #(12 * 8)]
533	ldr	x16, [x3, #(11 * 8)]
534	ldr	x15, [x3, #(10 * 8)]
535	ldr	x14, [x3, #(9 * 8)]
536	ldr	x13, [x3, #(8 * 8)]
537	ldr	x12, [x3, #(7 * 8)]
538	ldr	x11, [x3, #(6 * 8)]
539	ldr	x10, [x3, #(5 * 8)]
540	ldr	x9, [x3, #(4 * 8)]
541	ldr	x8, [x3, #(3 * 8)]
542	ldr	x7, [x3, #(2 * 8)]
543	ldr	x6, [x3, #(1 * 8)]
544	ldr	x5, [x3, #(0 * 8)]
545
546	adr	x26, 1f
547	add	x26, x26, x24, lsl #2
548	br	x26
5491:
550	msr	dbgbvr15_el1, x20
551	msr	dbgbvr14_el1, x19
552	msr	dbgbvr13_el1, x18
553	msr	dbgbvr12_el1, x17
554	msr	dbgbvr11_el1, x16
555	msr	dbgbvr10_el1, x15
556	msr	dbgbvr9_el1, x14
557	msr	dbgbvr8_el1, x13
558	msr	dbgbvr7_el1, x12
559	msr	dbgbvr6_el1, x11
560	msr	dbgbvr5_el1, x10
561	msr	dbgbvr4_el1, x9
562	msr	dbgbvr3_el1, x8
563	msr	dbgbvr2_el1, x7
564	msr	dbgbvr1_el1, x6
565	msr	dbgbvr0_el1, x5
566
567	add	x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
568
569	adr	x26, 1f
570	add	x26, x26, x25, lsl #2
571	br	x26
5721:
573	ldr	x20, [x3, #(15 * 8)]
574	ldr	x19, [x3, #(14 * 8)]
575	ldr	x18, [x3, #(13 * 8)]
576	ldr	x17, [x3, #(12 * 8)]
577	ldr	x16, [x3, #(11 * 8)]
578	ldr	x15, [x3, #(10 * 8)]
579	ldr	x14, [x3, #(9 * 8)]
580	ldr	x13, [x3, #(8 * 8)]
581	ldr	x12, [x3, #(7 * 8)]
582	ldr	x11, [x3, #(6 * 8)]
583	ldr	x10, [x3, #(5 * 8)]
584	ldr	x9, [x3, #(4 * 8)]
585	ldr	x8, [x3, #(3 * 8)]
586	ldr	x7, [x3, #(2 * 8)]
587	ldr	x6, [x3, #(1 * 8)]
588	ldr	x5, [x3, #(0 * 8)]
589
590	adr	x26, 1f
591	add	x26, x26, x25, lsl #2
592	br	x26
5931:
594	msr	dbgwcr15_el1, x20
595	msr	dbgwcr14_el1, x19
596	msr	dbgwcr13_el1, x18
597	msr	dbgwcr12_el1, x17
598	msr	dbgwcr11_el1, x16
599	msr	dbgwcr10_el1, x15
600	msr	dbgwcr9_el1, x14
601	msr	dbgwcr8_el1, x13
602	msr	dbgwcr7_el1, x12
603	msr	dbgwcr6_el1, x11
604	msr	dbgwcr5_el1, x10
605	msr	dbgwcr4_el1, x9
606	msr	dbgwcr3_el1, x8
607	msr	dbgwcr2_el1, x7
608	msr	dbgwcr1_el1, x6
609	msr	dbgwcr0_el1, x5
610
611	add	x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
612
613	adr	x26, 1f
614	add	x26, x26, x25, lsl #2
615	br	x26
6161:
617	ldr	x20, [x3, #(15 * 8)]
618	ldr	x19, [x3, #(14 * 8)]
619	ldr	x18, [x3, #(13 * 8)]
620	ldr	x17, [x3, #(12 * 8)]
621	ldr	x16, [x3, #(11 * 8)]
622	ldr	x15, [x3, #(10 * 8)]
623	ldr	x14, [x3, #(9 * 8)]
624	ldr	x13, [x3, #(8 * 8)]
625	ldr	x12, [x3, #(7 * 8)]
626	ldr	x11, [x3, #(6 * 8)]
627	ldr	x10, [x3, #(5 * 8)]
628	ldr	x9, [x3, #(4 * 8)]
629	ldr	x8, [x3, #(3 * 8)]
630	ldr	x7, [x3, #(2 * 8)]
631	ldr	x6, [x3, #(1 * 8)]
632	ldr	x5, [x3, #(0 * 8)]
633
634	adr	x26, 1f
635	add	x26, x26, x25, lsl #2
636	br	x26
6371:
638	msr	dbgwvr15_el1, x20
639	msr	dbgwvr14_el1, x19
640	msr	dbgwvr13_el1, x18
641	msr	dbgwvr12_el1, x17
642	msr	dbgwvr11_el1, x16
643	msr	dbgwvr10_el1, x15
644	msr	dbgwvr9_el1, x14
645	msr	dbgwvr8_el1, x13
646	msr	dbgwvr7_el1, x12
647	msr	dbgwvr6_el1, x11
648	msr	dbgwvr5_el1, x10
649	msr	dbgwvr4_el1, x9
650	msr	dbgwvr3_el1, x8
651	msr	dbgwvr2_el1, x7
652	msr	dbgwvr1_el1, x6
653	msr	dbgwvr0_el1, x5
654
655	ldr	x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
656	msr	mdccint_el1, x21
657.endm
658
659.macro skip_32bit_state tmp, target
660	// Skip 32bit state if not needed
661	mrs	\tmp, hcr_el2
662	tbnz	\tmp, #HCR_RW_SHIFT, \target
663.endm
664
665.macro skip_tee_state tmp, target
666	// Skip ThumbEE state if not needed
667	mrs	\tmp, id_pfr0_el1
668	tbz	\tmp, #12, \target
669.endm
670
671.macro skip_debug_state tmp, target
672	ldr	\tmp, [x0, #VCPU_DEBUG_FLAGS]
673	tbz	\tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
674.endm
675
676.macro compute_debug_state target
677	// Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
678	// is set, we do a full save/restore cycle and disable trapping.
679	add	x25, x0, #VCPU_CONTEXT
680
681	// Check the state of MDSCR_EL1
682	ldr	x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
683	and	x26, x25, #DBG_MDSCR_KDE
684	and	x25, x25, #DBG_MDSCR_MDE
685	adds	xzr, x25, x26
686	b.eq	9998f		// Nothing to see there
687
688	// If any interesting bits was set, we must set the flag
689	mov	x26, #KVM_ARM64_DEBUG_DIRTY
690	str	x26, [x0, #VCPU_DEBUG_FLAGS]
691	b	9999f		// Don't skip restore
692
6939998:
694	// Otherwise load the flags from memory in case we recently
695	// trapped
696	skip_debug_state x25, \target
6979999:
698.endm
699
700.macro save_guest_32bit_state
701	skip_32bit_state x3, 1f
702
703	add	x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
704	mrs	x4, spsr_abt
705	mrs	x5, spsr_und
706	mrs	x6, spsr_irq
707	mrs	x7, spsr_fiq
708	stp	x4, x5, [x3]
709	stp	x6, x7, [x3, #16]
710
711	add	x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
712	mrs	x4, dacr32_el2
713	mrs	x5, ifsr32_el2
714	mrs	x6, fpexc32_el2
715	stp	x4, x5, [x3]
716	str	x6, [x3, #16]
717
718	skip_debug_state x8, 2f
719	mrs	x7, dbgvcr32_el2
720	str	x7, [x3, #24]
7212:
722	skip_tee_state x8, 1f
723
724	add	x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
725	mrs	x4, teecr32_el1
726	mrs	x5, teehbr32_el1
727	stp	x4, x5, [x3]
7281:
729.endm
730
731.macro restore_guest_32bit_state
732	skip_32bit_state x3, 1f
733
734	add	x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
735	ldp	x4, x5, [x3]
736	ldp	x6, x7, [x3, #16]
737	msr	spsr_abt, x4
738	msr	spsr_und, x5
739	msr	spsr_irq, x6
740	msr	spsr_fiq, x7
741
742	add	x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
743	ldp	x4, x5, [x3]
744	ldr	x6, [x3, #16]
745	msr	dacr32_el2, x4
746	msr	ifsr32_el2, x5
747	msr	fpexc32_el2, x6
748
749	skip_debug_state x8, 2f
750	ldr	x7, [x3, #24]
751	msr	dbgvcr32_el2, x7
7522:
753	skip_tee_state x8, 1f
754
755	add	x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
756	ldp	x4, x5, [x3]
757	msr	teecr32_el1, x4
758	msr	teehbr32_el1, x5
7591:
760.endm
761
762.macro activate_traps
763	ldr     x2, [x0, #VCPU_HCR_EL2]
764	msr     hcr_el2, x2
765	mov	x2, #CPTR_EL2_TTA
766	msr	cptr_el2, x2
767
768	mov	x2, #(1 << 15)	// Trap CP15 Cr=15
769	msr	hstr_el2, x2
770
771	mrs	x2, mdcr_el2
772	and	x2, x2, #MDCR_EL2_HPMN_MASK
773	orr	x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
774	orr	x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
775
776	// Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
777	// if not dirty.
778	ldr	x3, [x0, #VCPU_DEBUG_FLAGS]
779	tbnz	x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
780	orr	x2, x2,  #MDCR_EL2_TDA
7811:
782	msr	mdcr_el2, x2
783.endm
784
785.macro deactivate_traps
786	mov	x2, #HCR_RW
787	msr	hcr_el2, x2
788	msr	cptr_el2, xzr
789	msr	hstr_el2, xzr
790
791	mrs	x2, mdcr_el2
792	and	x2, x2, #MDCR_EL2_HPMN_MASK
793	msr	mdcr_el2, x2
794.endm
795
796.macro activate_vm
797	ldr	x1, [x0, #VCPU_KVM]
798	kern_hyp_va	x1
799	ldr	x2, [x1, #KVM_VTTBR]
800	msr	vttbr_el2, x2
801.endm
802
803.macro deactivate_vm
804	msr	vttbr_el2, xzr
805.endm
806
807/*
808 * Call into the vgic backend for state saving
809 */
810.macro save_vgic_state
811	adr	x24, __vgic_sr_vectors
812	ldr	x24, [x24, VGIC_SAVE_FN]
813	kern_hyp_va	x24
814	blr	x24
815	mrs	x24, hcr_el2
816	mov	x25, #HCR_INT_OVERRIDE
817	neg	x25, x25
818	and	x24, x24, x25
819	msr	hcr_el2, x24
820.endm
821
822/*
823 * Call into the vgic backend for state restoring
824 */
825.macro restore_vgic_state
826	mrs	x24, hcr_el2
827	ldr	x25, [x0, #VCPU_IRQ_LINES]
828	orr	x24, x24, #HCR_INT_OVERRIDE
829	orr	x24, x24, x25
830	msr	hcr_el2, x24
831	adr	x24, __vgic_sr_vectors
832	ldr	x24, [x24, #VGIC_RESTORE_FN]
833	kern_hyp_va	x24
834	blr	x24
835.endm
836
837.macro save_timer_state
838	// x0: vcpu pointer
839	ldr	x2, [x0, #VCPU_KVM]
840	kern_hyp_va x2
841	ldr	w3, [x2, #KVM_TIMER_ENABLED]
842	cbz	w3, 1f
843
844	mrs	x3, cntv_ctl_el0
845	and	x3, x3, #3
846	str	w3, [x0, #VCPU_TIMER_CNTV_CTL]
847
848	isb
849
850	mrs	x3, cntv_cval_el0
851	str	x3, [x0, #VCPU_TIMER_CNTV_CVAL]
852
8531:
854	// Disable the virtual timer
855	msr	cntv_ctl_el0, xzr
856
857	// Allow physical timer/counter access for the host
858	mrs	x2, cnthctl_el2
859	orr	x2, x2, #3
860	msr	cnthctl_el2, x2
861
862	// Clear cntvoff for the host
863	msr	cntvoff_el2, xzr
864.endm
865
866.macro restore_timer_state
867	// x0: vcpu pointer
868	// Disallow physical timer access for the guest
869	// Physical counter access is allowed
870	mrs	x2, cnthctl_el2
871	orr	x2, x2, #1
872	bic	x2, x2, #2
873	msr	cnthctl_el2, x2
874
875	ldr	x2, [x0, #VCPU_KVM]
876	kern_hyp_va x2
877	ldr	w3, [x2, #KVM_TIMER_ENABLED]
878	cbz	w3, 1f
879
880	ldr	x3, [x2, #KVM_TIMER_CNTVOFF]
881	msr	cntvoff_el2, x3
882	ldr	x2, [x0, #VCPU_TIMER_CNTV_CVAL]
883	msr	cntv_cval_el0, x2
884	isb
885
886	ldr	w2, [x0, #VCPU_TIMER_CNTV_CTL]
887	and	x2, x2, #3
888	msr	cntv_ctl_el0, x2
8891:
890.endm
891
892__save_sysregs:
893	save_sysregs
894	ret
895
896__restore_sysregs:
897	restore_sysregs
898	ret
899
900__save_debug:
901	save_debug
902	ret
903
904__restore_debug:
905	restore_debug
906	ret
907
908__save_fpsimd:
909	save_fpsimd
910	ret
911
912__restore_fpsimd:
913	restore_fpsimd
914	ret
915
916/*
917 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
918 *
919 * This is the world switch. The first half of the function
920 * deals with entering the guest, and anything from __kvm_vcpu_return
921 * to the end of the function deals with reentering the host.
922 * On the enter path, only x0 (vcpu pointer) must be preserved until
923 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
924 * code) must both be preserved until the epilogue.
925 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
926 */
927ENTRY(__kvm_vcpu_run)
928	kern_hyp_va	x0
929	msr	tpidr_el2, x0	// Save the vcpu register
930
931	// Host context
932	ldr	x2, [x0, #VCPU_HOST_CONTEXT]
933	kern_hyp_va x2
934
935	save_host_regs
936	bl __save_fpsimd
937	bl __save_sysregs
938
939	compute_debug_state 1f
940	bl	__save_debug
9411:
942	activate_traps
943	activate_vm
944
945	restore_vgic_state
946	restore_timer_state
947
948	// Guest context
949	add	x2, x0, #VCPU_CONTEXT
950
951	// We must restore the 32-bit state before the sysregs, thanks
952	// to Cortex-A57 erratum #852523.
953	restore_guest_32bit_state
954	bl __restore_sysregs
955	bl __restore_fpsimd
956
957	skip_debug_state x3, 1f
958	bl	__restore_debug
9591:
960	restore_guest_regs
961
962	// That's it, no more messing around.
963	eret
964
965__kvm_vcpu_return:
966	// Assume x0 is the vcpu pointer, x1 the return code
967	// Guest's x0-x3 are on the stack
968
969	// Guest context
970	add	x2, x0, #VCPU_CONTEXT
971
972	save_guest_regs
973	bl __save_fpsimd
974	bl __save_sysregs
975
976	skip_debug_state x3, 1f
977	bl	__save_debug
9781:
979	save_guest_32bit_state
980
981	save_timer_state
982	save_vgic_state
983
984	deactivate_traps
985	deactivate_vm
986
987	// Host context
988	ldr	x2, [x0, #VCPU_HOST_CONTEXT]
989	kern_hyp_va x2
990
991	bl __restore_sysregs
992	bl __restore_fpsimd
993
994	skip_debug_state x3, 1f
995	// Clear the dirty flag for the next run, as all the state has
996	// already been saved. Note that we nuke the whole 64bit word.
997	// If we ever add more flags, we'll have to be more careful...
998	str	xzr, [x0, #VCPU_DEBUG_FLAGS]
999	bl	__restore_debug
10001:
1001	restore_host_regs
1002
1003	mov	x0, x1
1004	ret
1005END(__kvm_vcpu_run)
1006
1007// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
1008ENTRY(__kvm_tlb_flush_vmid_ipa)
1009	dsb	ishst
1010
1011	kern_hyp_va	x0
1012	ldr	x2, [x0, #KVM_VTTBR]
1013	msr	vttbr_el2, x2
1014	isb
1015
1016	/*
1017	 * We could do so much better if we had the VA as well.
1018	 * Instead, we invalidate Stage-2 for this IPA, and the
1019	 * whole of Stage-1. Weep...
1020	 */
1021	lsr	x1, x1, #12
1022	tlbi	ipas2e1is, x1
1023	/*
1024	 * We have to ensure completion of the invalidation at Stage-2,
1025	 * since a table walk on another CPU could refill a TLB with a
1026	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
1027	 * the Stage-1 invalidation happened first.
1028	 */
1029	dsb	ish
1030	tlbi	vmalle1is
1031	dsb	ish
1032	isb
1033
1034	msr	vttbr_el2, xzr
1035	ret
1036ENDPROC(__kvm_tlb_flush_vmid_ipa)
1037
1038/**
1039 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
1040 * @struct kvm *kvm - pointer to kvm structure
1041 *
1042 * Invalidates all Stage 1 and 2 TLB entries for current VMID.
1043 */
1044ENTRY(__kvm_tlb_flush_vmid)
1045	dsb     ishst
1046
1047	kern_hyp_va     x0
1048	ldr     x2, [x0, #KVM_VTTBR]
1049	msr     vttbr_el2, x2
1050	isb
1051
1052	tlbi    vmalls12e1is
1053	dsb     ish
1054	isb
1055
1056	msr     vttbr_el2, xzr
1057	ret
1058ENDPROC(__kvm_tlb_flush_vmid)
1059
1060ENTRY(__kvm_flush_vm_context)
1061	dsb	ishst
1062	tlbi	alle1is
1063	ic	ialluis
1064	dsb	ish
1065	ret
1066ENDPROC(__kvm_flush_vm_context)
1067
1068	// struct vgic_sr_vectors __vgi_sr_vectors;
1069	.align 3
1070ENTRY(__vgic_sr_vectors)
1071	.skip	VGIC_SR_VECTOR_SZ
1072ENDPROC(__vgic_sr_vectors)
1073
1074__kvm_hyp_panic:
1075	// Guess the context by looking at VTTBR:
1076	// If zero, then we're already a host.
1077	// Otherwise restore a minimal host context before panicing.
1078	mrs	x0, vttbr_el2
1079	cbz	x0, 1f
1080
1081	mrs	x0, tpidr_el2
1082
1083	deactivate_traps
1084	deactivate_vm
1085
1086	ldr	x2, [x0, #VCPU_HOST_CONTEXT]
1087	kern_hyp_va x2
1088
1089	bl __restore_sysregs
1090
10911:	adr	x0, __hyp_panic_str
1092	adr	x1, 2f
1093	ldp	x2, x3, [x1]
1094	sub	x0, x0, x2
1095	add	x0, x0, x3
1096	mrs	x1, spsr_el2
1097	mrs	x2, elr_el2
1098	mrs	x3, esr_el2
1099	mrs	x4, far_el2
1100	mrs	x5, hpfar_el2
1101	mrs	x6, par_el1
1102	mrs	x7, tpidr_el2
1103
1104	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
1105		      PSR_MODE_EL1h)
1106	msr	spsr_el2, lr
1107	ldr	lr, =panic
1108	msr	elr_el2, lr
1109	eret
1110
1111	.align	3
11122:	.quad	HYP_PAGE_OFFSET
1113	.quad	PAGE_OFFSET
1114ENDPROC(__kvm_hyp_panic)
1115
1116__hyp_panic_str:
1117	.ascii	"HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
1118
1119	.align	2
1120
1121/*
1122 * u64 kvm_call_hyp(void *hypfn, ...);
1123 *
1124 * This is not really a variadic function in the classic C-way and care must
1125 * be taken when calling this to ensure parameters are passed in registers
1126 * only, since the stack will change between the caller and the callee.
1127 *
1128 * Call the function with the first argument containing a pointer to the
1129 * function you wish to call in Hyp mode, and subsequent arguments will be
1130 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
1131 * function pointer can be passed).  The function being called must be mapped
1132 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
1133 * passed in r0 and r1.
1134 *
1135 * A function pointer with a value of 0 has a special meaning, and is
1136 * used to implement __hyp_get_vectors in the same way as in
1137 * arch/arm64/kernel/hyp_stub.S.
1138 */
1139ENTRY(kvm_call_hyp)
1140	hvc	#0
1141	ret
1142ENDPROC(kvm_call_hyp)
1143
1144.macro invalid_vector	label, target
1145	.align	2
1146\label:
1147	b \target
1148ENDPROC(\label)
1149.endm
1150
1151	/* None of these should ever happen */
1152	invalid_vector	el2t_sync_invalid, __kvm_hyp_panic
1153	invalid_vector	el2t_irq_invalid, __kvm_hyp_panic
1154	invalid_vector	el2t_fiq_invalid, __kvm_hyp_panic
1155	invalid_vector	el2t_error_invalid, __kvm_hyp_panic
1156	invalid_vector	el2h_sync_invalid, __kvm_hyp_panic
1157	invalid_vector	el2h_irq_invalid, __kvm_hyp_panic
1158	invalid_vector	el2h_fiq_invalid, __kvm_hyp_panic
1159	invalid_vector	el2h_error_invalid, __kvm_hyp_panic
1160	invalid_vector	el1_sync_invalid, __kvm_hyp_panic
1161	invalid_vector	el1_irq_invalid, __kvm_hyp_panic
1162	invalid_vector	el1_fiq_invalid, __kvm_hyp_panic
1163	invalid_vector	el1_error_invalid, __kvm_hyp_panic
1164
1165el1_sync:					// Guest trapped into EL2
1166	push	x0, x1
1167	push	x2, x3
1168
1169	mrs	x1, esr_el2
1170	lsr	x2, x1, #ESR_ELx_EC_SHIFT
1171
1172	cmp	x2, #ESR_ELx_EC_HVC64
1173	b.ne	el1_trap
1174
1175	mrs	x3, vttbr_el2			// If vttbr is valid, the 64bit guest
1176	cbnz	x3, el1_trap			// called HVC
1177
1178	/* Here, we're pretty sure the host called HVC. */
1179	pop	x2, x3
1180	pop	x0, x1
1181
1182	/* Check for __hyp_get_vectors */
1183	cbnz	x0, 1f
1184	mrs	x0, vbar_el2
1185	b	2f
1186
11871:	push	lr, xzr
1188
1189	/*
1190	 * Compute the function address in EL2, and shuffle the parameters.
1191	 */
1192	kern_hyp_va	x0
1193	mov	lr, x0
1194	mov	x0, x1
1195	mov	x1, x2
1196	mov	x2, x3
1197	blr	lr
1198
1199	pop	lr, xzr
12002:	eret
1201
1202el1_trap:
1203	/*
1204	 * x1: ESR
1205	 * x2: ESR_EC
1206	 */
1207	cmp	x2, #ESR_ELx_EC_DABT_LOW
1208	mov	x0, #ESR_ELx_EC_IABT_LOW
1209	ccmp	x2, x0, #4, ne
1210	b.ne	1f		// Not an abort we care about
1211
1212	/* This is an abort. Check for permission fault */
1213	and	x2, x1, #ESR_ELx_FSC_TYPE
1214	cmp	x2, #FSC_PERM
1215	b.ne	1f		// Not a permission fault
1216
1217	/*
1218	 * Check for Stage-1 page table walk, which is guaranteed
1219	 * to give a valid HPFAR_EL2.
1220	 */
1221	tbnz	x1, #7, 1f	// S1PTW is set
1222
1223	/* Preserve PAR_EL1 */
1224	mrs	x3, par_el1
1225	push	x3, xzr
1226
1227	/*
1228	 * Permission fault, HPFAR_EL2 is invalid.
1229	 * Resolve the IPA the hard way using the guest VA.
1230	 * Stage-1 translation already validated the memory access rights.
1231	 * As such, we can use the EL1 translation regime, and don't have
1232	 * to distinguish between EL0 and EL1 access.
1233	 */
1234	mrs	x2, far_el2
1235	at	s1e1r, x2
1236	isb
1237
1238	/* Read result */
1239	mrs	x3, par_el1
1240	pop	x0, xzr			// Restore PAR_EL1 from the stack
1241	msr	par_el1, x0
1242	tbnz	x3, #0, 3f		// Bail out if we failed the translation
1243	ubfx	x3, x3, #12, #36	// Extract IPA
1244	lsl	x3, x3, #4		// and present it like HPFAR
1245	b	2f
1246
12471:	mrs	x3, hpfar_el2
1248	mrs	x2, far_el2
1249
12502:	mrs	x0, tpidr_el2
1251	str	w1, [x0, #VCPU_ESR_EL2]
1252	str	x2, [x0, #VCPU_FAR_EL2]
1253	str	x3, [x0, #VCPU_HPFAR_EL2]
1254
1255	mov	x1, #ARM_EXCEPTION_TRAP
1256	b	__kvm_vcpu_return
1257
1258	/*
1259	 * Translation failed. Just return to the guest and
1260	 * let it fault again. Another CPU is probably playing
1261	 * behind our back.
1262	 */
12633:	pop	x2, x3
1264	pop	x0, x1
1265
1266	eret
1267
1268el1_irq:
1269	push	x0, x1
1270	push	x2, x3
1271	mrs	x0, tpidr_el2
1272	mov	x1, #ARM_EXCEPTION_IRQ
1273	b	__kvm_vcpu_return
1274
1275	.ltorg
1276
1277	.align 11
1278
1279ENTRY(__kvm_hyp_vector)
1280	ventry	el2t_sync_invalid		// Synchronous EL2t
1281	ventry	el2t_irq_invalid		// IRQ EL2t
1282	ventry	el2t_fiq_invalid		// FIQ EL2t
1283	ventry	el2t_error_invalid		// Error EL2t
1284
1285	ventry	el2h_sync_invalid		// Synchronous EL2h
1286	ventry	el2h_irq_invalid		// IRQ EL2h
1287	ventry	el2h_fiq_invalid		// FIQ EL2h
1288	ventry	el2h_error_invalid		// Error EL2h
1289
1290	ventry	el1_sync			// Synchronous 64-bit EL1
1291	ventry	el1_irq				// IRQ 64-bit EL1
1292	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
1293	ventry	el1_error_invalid		// Error 64-bit EL1
1294
1295	ventry	el1_sync			// Synchronous 32-bit EL1
1296	ventry	el1_irq				// IRQ 32-bit EL1
1297	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
1298	ventry	el1_error_invalid		// Error 32-bit EL1
1299ENDPROC(__kvm_hyp_vector)
1300
1301	.popsection
1302