1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h>
18
19#define _C_LABEL(x)     x
20#define MIPSX(name)     mips32_ ## name
21#define CALLFRAME_SIZ   32
22
23/*
24 * VECTOR
25 *  exception vector entrypoint
26 */
27#define VECTOR(x, regmask)      \
28    .ent    _C_LABEL(x),0;      \
29    EXPORT(x);
30
31#define VECTOR_END(x)      \
32    EXPORT(x);
33
34/* Overload, Danger Will Robinson!! */
35#define PT_HOST_ASID        PT_BVADDR
36#define PT_HOST_USERLOCAL   PT_EPC
37
38#define CP0_DDATA_LO        $28,3
39#define CP0_CONFIG3         $16,3
40#define CP0_CONFIG5         $16,5
41#define CP0_EBASE           $15,1
42
43#define CP0_INTCTL          $12,1
44#define CP0_SRSCTL          $12,2
45#define CP0_SRSMAP          $12,3
46#define CP0_HWRENA          $7,0
47
48/* Resume Flags */
49#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
50
51#define RESUME_GUEST            0
52#define RESUME_HOST             RESUME_FLAG_HOST
53
54/*
55 * __kvm_mips_vcpu_run: entry point to the guest
56 * a0: run
57 * a1: vcpu
58 */
59	.set	noreorder
60	.set	noat
61
62FEXPORT(__kvm_mips_vcpu_run)
63	/* k0/k1 not being used in host kernel context */
64	INT_ADDIU k1, sp, -PT_SIZE
65	LONG_S	$0, PT_R0(k1)
66	LONG_S	$1, PT_R1(k1)
67	LONG_S	$2, PT_R2(k1)
68	LONG_S	$3, PT_R3(k1)
69
70	LONG_S	$4, PT_R4(k1)
71	LONG_S	$5, PT_R5(k1)
72	LONG_S	$6, PT_R6(k1)
73	LONG_S	$7, PT_R7(k1)
74
75	LONG_S	$8,  PT_R8(k1)
76	LONG_S	$9,  PT_R9(k1)
77	LONG_S	$10, PT_R10(k1)
78	LONG_S	$11, PT_R11(k1)
79	LONG_S	$12, PT_R12(k1)
80	LONG_S	$13, PT_R13(k1)
81	LONG_S	$14, PT_R14(k1)
82	LONG_S	$15, PT_R15(k1)
83	LONG_S	$16, PT_R16(k1)
84	LONG_S	$17, PT_R17(k1)
85
86	LONG_S	$18, PT_R18(k1)
87	LONG_S	$19, PT_R19(k1)
88	LONG_S	$20, PT_R20(k1)
89	LONG_S	$21, PT_R21(k1)
90	LONG_S	$22, PT_R22(k1)
91	LONG_S	$23, PT_R23(k1)
92	LONG_S	$24, PT_R24(k1)
93	LONG_S	$25, PT_R25(k1)
94
95	/*
96	 * XXXKYMA k0/k1 not saved, not being used if we got here through
97	 * an ioctl()
98	 */
99
100	LONG_S	$28, PT_R28(k1)
101	LONG_S	$29, PT_R29(k1)
102	LONG_S	$30, PT_R30(k1)
103	LONG_S	$31, PT_R31(k1)
104
105	/* Save hi/lo */
106	mflo	v0
107	LONG_S	v0, PT_LO(k1)
108	mfhi	v1
109	LONG_S	v1, PT_HI(k1)
110
111	/* Save host status */
112	mfc0	v0, CP0_STATUS
113	LONG_S	v0, PT_STATUS(k1)
114
115	/* Save host ASID, shove it into the BVADDR location */
116	mfc0	v1, CP0_ENTRYHI
117	andi	v1, 0xff
118	LONG_S	v1, PT_HOST_ASID(k1)
119
120	/* Save DDATA_LO, will be used to store pointer to vcpu */
121	mfc0	v1, CP0_DDATA_LO
122	LONG_S	v1, PT_HOST_USERLOCAL(k1)
123
124	/* DDATA_LO has pointer to vcpu */
125	mtc0	a1, CP0_DDATA_LO
126
127	/* Offset into vcpu->arch */
128	INT_ADDIU k1, a1, VCPU_HOST_ARCH
129
130	/*
131	 * Save the host stack to VCPU, used for exception processing
132	 * when we exit from the Guest
133	 */
134	LONG_S	sp, VCPU_HOST_STACK(k1)
135
136	/* Save the kernel gp as well */
137	LONG_S	gp, VCPU_HOST_GP(k1)
138
139	/*
140	 * Setup status register for running the guest in UM, interrupts
141	 * are disabled
142	 */
143	li	k0, (ST0_EXL | KSU_USER | ST0_BEV)
144	mtc0	k0, CP0_STATUS
145	ehb
146
147	/* load up the new EBASE */
148	LONG_L	k0, VCPU_GUEST_EBASE(k1)
149	mtc0	k0, CP0_EBASE
150
151	/*
152	 * Now that the new EBASE has been loaded, unset BEV, set
153	 * interrupt mask as it was but make sure that timer interrupts
154	 * are enabled
155	 */
156	li	k0, (ST0_EXL | KSU_USER | ST0_IE)
157	andi	v0, v0, ST0_IM
158	or	k0, k0, v0
159	mtc0	k0, CP0_STATUS
160	ehb
161
162	/* Set Guest EPC */
163	LONG_L	t0, VCPU_PC(k1)
164	mtc0	t0, CP0_EPC
165
166FEXPORT(__kvm_mips_load_asid)
167	/* Set the ASID for the Guest Kernel */
168	PTR_L	t0, VCPU_COP0(k1)
169	LONG_L	t0, COP0_STATUS(t0)
170	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
171	xori	t0, KSU_USER
172	bnez	t0, 1f		/* If kernel */
173	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
174	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
1751:
176	/* t1: contains the base of the ASID array, need to get the cpu id */
177	LONG_L	t2, TI_CPU($28)             /* smp_processor_id */
178	INT_SLL	t2, t2, 2                   /* x4 */
179	REG_ADDU t3, t1, t2
180	LONG_L	k0, (t3)
181	andi	k0, k0, 0xff
182	mtc0	k0, CP0_ENTRYHI
183	ehb
184
185	/* Disable RDHWR access */
186	mtc0	zero, CP0_HWRENA
187
188	/* Now load up the Guest Context from VCPU */
189	LONG_L	$1, VCPU_R1(k1)
190	LONG_L	$2, VCPU_R2(k1)
191	LONG_L	$3, VCPU_R3(k1)
192
193	LONG_L	$4, VCPU_R4(k1)
194	LONG_L	$5, VCPU_R5(k1)
195	LONG_L	$6, VCPU_R6(k1)
196	LONG_L	$7, VCPU_R7(k1)
197
198	LONG_L	$8, VCPU_R8(k1)
199	LONG_L	$9, VCPU_R9(k1)
200	LONG_L	$10, VCPU_R10(k1)
201	LONG_L	$11, VCPU_R11(k1)
202	LONG_L	$12, VCPU_R12(k1)
203	LONG_L	$13, VCPU_R13(k1)
204	LONG_L	$14, VCPU_R14(k1)
205	LONG_L	$15, VCPU_R15(k1)
206	LONG_L	$16, VCPU_R16(k1)
207	LONG_L	$17, VCPU_R17(k1)
208	LONG_L	$18, VCPU_R18(k1)
209	LONG_L	$19, VCPU_R19(k1)
210	LONG_L	$20, VCPU_R20(k1)
211	LONG_L	$21, VCPU_R21(k1)
212	LONG_L	$22, VCPU_R22(k1)
213	LONG_L	$23, VCPU_R23(k1)
214	LONG_L	$24, VCPU_R24(k1)
215	LONG_L	$25, VCPU_R25(k1)
216
217	/* k0/k1 loaded up later */
218
219	LONG_L	$28, VCPU_R28(k1)
220	LONG_L	$29, VCPU_R29(k1)
221	LONG_L	$30, VCPU_R30(k1)
222	LONG_L	$31, VCPU_R31(k1)
223
224	/* Restore hi/lo */
225	LONG_L	k0, VCPU_LO(k1)
226	mtlo	k0
227
228	LONG_L	k0, VCPU_HI(k1)
229	mthi	k0
230
231FEXPORT(__kvm_mips_load_k0k1)
232	/* Restore the guest's k0/k1 registers */
233	LONG_L	k0, VCPU_R26(k1)
234	LONG_L	k1, VCPU_R27(k1)
235
236	/* Jump to guest */
237	eret
238
239VECTOR(MIPSX(exception), unknown)
240/* Find out what mode we came from and jump to the proper handler. */
241	mtc0	k0, CP0_ERROREPC	#01: Save guest k0
242	ehb				#02:
243
244	mfc0	k0, CP0_EBASE		#02: Get EBASE
245	INT_SRL	k0, k0, 10		#03: Get rid of CPUNum
246	INT_SLL	k0, k0, 10		#04
247	LONG_S	k1, 0x3000(k0)		#05: Save k1 @ offset 0x3000
248	INT_ADDIU k0, k0, 0x2000	#06: Exception handler is
249					#    installed @ offset 0x2000
250	j	k0			#07: jump to the function
251	 nop				#08: branch delay slot
252VECTOR_END(MIPSX(exceptionEnd))
253.end MIPSX(exception)
254
255/*
256 * Generic Guest exception handler. We end up here when the guest
257 * does something that causes a trap to kernel mode.
258 */
259NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
260	/* Get the VCPU pointer from DDTATA_LO */
261	mfc0	k1, CP0_DDATA_LO
262	INT_ADDIU k1, k1, VCPU_HOST_ARCH
263
264	/* Start saving Guest context to VCPU */
265	LONG_S	$0, VCPU_R0(k1)
266	LONG_S	$1, VCPU_R1(k1)
267	LONG_S	$2, VCPU_R2(k1)
268	LONG_S	$3, VCPU_R3(k1)
269	LONG_S	$4, VCPU_R4(k1)
270	LONG_S	$5, VCPU_R5(k1)
271	LONG_S	$6, VCPU_R6(k1)
272	LONG_S	$7, VCPU_R7(k1)
273	LONG_S	$8, VCPU_R8(k1)
274	LONG_S	$9, VCPU_R9(k1)
275	LONG_S	$10, VCPU_R10(k1)
276	LONG_S	$11, VCPU_R11(k1)
277	LONG_S	$12, VCPU_R12(k1)
278	LONG_S	$13, VCPU_R13(k1)
279	LONG_S	$14, VCPU_R14(k1)
280	LONG_S	$15, VCPU_R15(k1)
281	LONG_S	$16, VCPU_R16(k1)
282	LONG_S	$17, VCPU_R17(k1)
283	LONG_S	$18, VCPU_R18(k1)
284	LONG_S	$19, VCPU_R19(k1)
285	LONG_S	$20, VCPU_R20(k1)
286	LONG_S	$21, VCPU_R21(k1)
287	LONG_S	$22, VCPU_R22(k1)
288	LONG_S	$23, VCPU_R23(k1)
289	LONG_S	$24, VCPU_R24(k1)
290	LONG_S	$25, VCPU_R25(k1)
291
292	/* Guest k0/k1 saved later */
293
294	LONG_S	$28, VCPU_R28(k1)
295	LONG_S	$29, VCPU_R29(k1)
296	LONG_S	$30, VCPU_R30(k1)
297	LONG_S	$31, VCPU_R31(k1)
298
299	/* We need to save hi/lo and restore them on the way out */
300	mfhi	t0
301	LONG_S	t0, VCPU_HI(k1)
302
303	mflo	t0
304	LONG_S	t0, VCPU_LO(k1)
305
306	/* Finally save guest k0/k1 to VCPU */
307	mfc0	t0, CP0_ERROREPC
308	LONG_S	t0, VCPU_R26(k1)
309
310	/* Get GUEST k1 and save it in VCPU */
311	PTR_LI	t1, ~0x2ff
312	mfc0	t0, CP0_EBASE
313	and	t0, t0, t1
314	LONG_L	t0, 0x3000(t0)
315	LONG_S	t0, VCPU_R27(k1)
316
317	/* Now that context has been saved, we can use other registers */
318
319	/* Restore vcpu */
320	mfc0	a1, CP0_DDATA_LO
321	move	s1, a1
322
323	/* Restore run (vcpu->run) */
324	LONG_L	a0, VCPU_RUN(a1)
325	/* Save pointer to run in s0, will be saved by the compiler */
326	move	s0, a0
327
328	/*
329	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
330	 * process the exception
331	 */
332	mfc0	k0,CP0_EPC
333	LONG_S	k0, VCPU_PC(k1)
334
335	mfc0	k0, CP0_BADVADDR
336	LONG_S	k0, VCPU_HOST_CP0_BADVADDR(k1)
337
338	mfc0	k0, CP0_CAUSE
339	LONG_S	k0, VCPU_HOST_CP0_CAUSE(k1)
340
341	mfc0	k0, CP0_ENTRYHI
342	LONG_S	k0, VCPU_HOST_ENTRYHI(k1)
343
344	/* Now restore the host state just enough to run the handlers */
345
346	/* Swtich EBASE to the one used by Linux */
347	/* load up the host EBASE */
348	mfc0	v0, CP0_STATUS
349
350	.set	at
351	or	k0, v0, ST0_BEV
352	.set	noat
353
354	mtc0	k0, CP0_STATUS
355	ehb
356
357	LONG_L	k0, VCPU_HOST_EBASE(k1)
358	mtc0	k0,CP0_EBASE
359
360	/*
361	 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
362	 * trigger FPE for pending exceptions.
363	 */
364	.set	at
365	and	v1, v0, ST0_CU1
366	beqz	v1, 1f
367	 nop
368	.set	push
369	SET_HARDFLOAT
370	cfc1	t0, fcr31
371	sw	t0, VCPU_FCR31(k1)
372	ctc1	zero,fcr31
373	.set	pop
374	.set	noat
3751:
376
377#ifdef CONFIG_CPU_HAS_MSA
378	/*
379	 * If MSA is enabled, save MSACSR and clear it so that later
380	 * instructions don't trigger MSAFPE for pending exceptions.
381	 */
382	mfc0	t0, CP0_CONFIG3
383	ext	t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
384	beqz	t0, 1f
385	 nop
386	mfc0	t0, CP0_CONFIG5
387	ext	t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
388	beqz	t0, 1f
389	 nop
390	_cfcmsa	t0, MSA_CSR
391	sw	t0, VCPU_MSA_CSR(k1)
392	_ctcmsa	MSA_CSR, zero
3931:
394#endif
395
396	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
397	.set	at
398	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
399	or	v0, v0, ST0_CU0
400	.set	noat
401	mtc0	v0, CP0_STATUS
402	ehb
403
404	/* Load up host GP */
405	LONG_L	gp, VCPU_HOST_GP(k1)
406
407	/* Need a stack before we can jump to "C" */
408	LONG_L	sp, VCPU_HOST_STACK(k1)
409
410	/* Saved host state */
411	INT_ADDIU sp, sp, -PT_SIZE
412
413	/*
414	 * XXXKYMA do we need to load the host ASID, maybe not because the
415	 * kernel entries are marked GLOBAL, need to verify
416	 */
417
418	/* Restore host DDATA_LO */
419	LONG_L	k0, PT_HOST_USERLOCAL(sp)
420	mtc0	k0, CP0_DDATA_LO
421
422	/* Restore RDHWR access */
423	PTR_LI	k0, 0x2000000F
424	mtc0	k0, CP0_HWRENA
425
426	/* Jump to handler */
427FEXPORT(__kvm_mips_jump_to_handler)
428	/*
429	 * XXXKYMA: not sure if this is safe, how large is the stack??
430	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
431	 * with this in the kernel
432	 */
433	PTR_LA	t9, kvm_mips_handle_exit
434	jalr.hb	t9
435	 INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
436
437	/* Return from handler Make sure interrupts are disabled */
438	di
439	ehb
440
441	/*
442	 * XXXKYMA: k0/k1 could have been blown away if we processed
443	 * an exception while we were handling the exception from the
444	 * guest, reload k1
445	 */
446
447	move	k1, s1
448	INT_ADDIU k1, k1, VCPU_HOST_ARCH
449
450	/*
451	 * Check return value, should tell us if we are returning to the
452	 * host (handle I/O etc)or resuming the guest
453	 */
454	andi	t0, v0, RESUME_HOST
455	bnez	t0, __kvm_mips_return_to_host
456	 nop
457
458__kvm_mips_return_to_guest:
459	/* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
460	mtc0	s1, CP0_DDATA_LO
461
462	/* Load up the Guest EBASE to minimize the window where BEV is set */
463	LONG_L	t0, VCPU_GUEST_EBASE(k1)
464
465	/* Switch EBASE back to the one used by KVM */
466	mfc0	v1, CP0_STATUS
467	.set	at
468	or	k0, v1, ST0_BEV
469	.set	noat
470	mtc0	k0, CP0_STATUS
471	ehb
472	mtc0	t0, CP0_EBASE
473
474	/* Setup status register for running guest in UM */
475	.set	at
476	or	v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
477	and	v1, v1, ~(ST0_CU0 | ST0_MX)
478	.set	noat
479	mtc0	v1, CP0_STATUS
480	ehb
481
482	/* Set Guest EPC */
483	LONG_L	t0, VCPU_PC(k1)
484	mtc0	t0, CP0_EPC
485
486	/* Set the ASID for the Guest Kernel */
487	PTR_L	t0, VCPU_COP0(k1)
488	LONG_L	t0, COP0_STATUS(t0)
489	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
490	xori	t0, KSU_USER
491	bnez	t0, 1f		/* If kernel */
492	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
493	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
4941:
495	/* t1: contains the base of the ASID array, need to get the cpu id  */
496	LONG_L	t2, TI_CPU($28)		/* smp_processor_id */
497	INT_SLL	t2, t2, 2		/* x4 */
498	REG_ADDU t3, t1, t2
499	LONG_L	k0, (t3)
500	andi	k0, k0, 0xff
501	mtc0	k0,CP0_ENTRYHI
502	ehb
503
504	/* Disable RDHWR access */
505	mtc0    zero,  CP0_HWRENA
506
507	/* load the guest context from VCPU and return */
508	LONG_L	$0, VCPU_R0(k1)
509	LONG_L	$1, VCPU_R1(k1)
510	LONG_L	$2, VCPU_R2(k1)
511	LONG_L	$3, VCPU_R3(k1)
512	LONG_L	$4, VCPU_R4(k1)
513	LONG_L	$5, VCPU_R5(k1)
514	LONG_L	$6, VCPU_R6(k1)
515	LONG_L	$7, VCPU_R7(k1)
516	LONG_L	$8, VCPU_R8(k1)
517	LONG_L	$9, VCPU_R9(k1)
518	LONG_L	$10, VCPU_R10(k1)
519	LONG_L	$11, VCPU_R11(k1)
520	LONG_L	$12, VCPU_R12(k1)
521	LONG_L	$13, VCPU_R13(k1)
522	LONG_L	$14, VCPU_R14(k1)
523	LONG_L	$15, VCPU_R15(k1)
524	LONG_L	$16, VCPU_R16(k1)
525	LONG_L	$17, VCPU_R17(k1)
526	LONG_L	$18, VCPU_R18(k1)
527	LONG_L	$19, VCPU_R19(k1)
528	LONG_L	$20, VCPU_R20(k1)
529	LONG_L	$21, VCPU_R21(k1)
530	LONG_L	$22, VCPU_R22(k1)
531	LONG_L	$23, VCPU_R23(k1)
532	LONG_L	$24, VCPU_R24(k1)
533	LONG_L	$25, VCPU_R25(k1)
534
535	/* $/k1 loaded later */
536	LONG_L	$28, VCPU_R28(k1)
537	LONG_L	$29, VCPU_R29(k1)
538	LONG_L	$30, VCPU_R30(k1)
539	LONG_L	$31, VCPU_R31(k1)
540
541FEXPORT(__kvm_mips_skip_guest_restore)
542	LONG_L	k0, VCPU_HI(k1)
543	mthi	k0
544
545	LONG_L	k0, VCPU_LO(k1)
546	mtlo	k0
547
548	LONG_L	k0, VCPU_R26(k1)
549	LONG_L	k1, VCPU_R27(k1)
550
551	eret
552
553__kvm_mips_return_to_host:
554	/* EBASE is already pointing to Linux */
555	LONG_L	k1, VCPU_HOST_STACK(k1)
556	INT_ADDIU k1,k1, -PT_SIZE
557
558	/* Restore host DDATA_LO */
559	LONG_L	k0, PT_HOST_USERLOCAL(k1)
560	mtc0	k0, CP0_DDATA_LO
561
562	/* Restore host ASID */
563	LONG_L	k0, PT_HOST_ASID(sp)
564	andi	k0, 0xff
565	mtc0	k0,CP0_ENTRYHI
566	ehb
567
568	/* Load context saved on the host stack */
569	LONG_L	$0, PT_R0(k1)
570	LONG_L	$1, PT_R1(k1)
571
572	/*
573	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
574	 * to recover the err code
575	 */
576	INT_SRA	k0, v0, 2
577	move	$2, k0
578
579	LONG_L	$3, PT_R3(k1)
580	LONG_L	$4, PT_R4(k1)
581	LONG_L	$5, PT_R5(k1)
582	LONG_L	$6, PT_R6(k1)
583	LONG_L	$7, PT_R7(k1)
584	LONG_L	$8, PT_R8(k1)
585	LONG_L	$9, PT_R9(k1)
586	LONG_L	$10, PT_R10(k1)
587	LONG_L	$11, PT_R11(k1)
588	LONG_L	$12, PT_R12(k1)
589	LONG_L	$13, PT_R13(k1)
590	LONG_L	$14, PT_R14(k1)
591	LONG_L	$15, PT_R15(k1)
592	LONG_L	$16, PT_R16(k1)
593	LONG_L	$17, PT_R17(k1)
594	LONG_L	$18, PT_R18(k1)
595	LONG_L	$19, PT_R19(k1)
596	LONG_L	$20, PT_R20(k1)
597	LONG_L	$21, PT_R21(k1)
598	LONG_L	$22, PT_R22(k1)
599	LONG_L	$23, PT_R23(k1)
600	LONG_L	$24, PT_R24(k1)
601	LONG_L	$25, PT_R25(k1)
602
603	/* Host k0/k1 were not saved */
604
605	LONG_L	$28, PT_R28(k1)
606	LONG_L	$29, PT_R29(k1)
607	LONG_L	$30, PT_R30(k1)
608
609	LONG_L	k0, PT_HI(k1)
610	mthi	k0
611
612	LONG_L	k0, PT_LO(k1)
613	mtlo	k0
614
615	/* Restore RDHWR access */
616	PTR_LI	k0, 0x2000000F
617	mtc0	k0,  CP0_HWRENA
618
619	/* Restore RA, which is the address we will return to */
620	LONG_L  ra, PT_R31(k1)
621	j       ra
622	 nop
623
624VECTOR_END(MIPSX(GuestExceptionEnd))
625.end MIPSX(GuestException)
626
627MIPSX(exceptions):
628	####
629	##### The exception handlers.
630	#####
631	.word _C_LABEL(MIPSX(GuestException))	#  0
632	.word _C_LABEL(MIPSX(GuestException))	#  1
633	.word _C_LABEL(MIPSX(GuestException))	#  2
634	.word _C_LABEL(MIPSX(GuestException))	#  3
635	.word _C_LABEL(MIPSX(GuestException))	#  4
636	.word _C_LABEL(MIPSX(GuestException))	#  5
637	.word _C_LABEL(MIPSX(GuestException))	#  6
638	.word _C_LABEL(MIPSX(GuestException))	#  7
639	.word _C_LABEL(MIPSX(GuestException))	#  8
640	.word _C_LABEL(MIPSX(GuestException))	#  9
641	.word _C_LABEL(MIPSX(GuestException))	# 10
642	.word _C_LABEL(MIPSX(GuestException))	# 11
643	.word _C_LABEL(MIPSX(GuestException))	# 12
644	.word _C_LABEL(MIPSX(GuestException))	# 13
645	.word _C_LABEL(MIPSX(GuestException))	# 14
646	.word _C_LABEL(MIPSX(GuestException))	# 15
647	.word _C_LABEL(MIPSX(GuestException))	# 16
648	.word _C_LABEL(MIPSX(GuestException))	# 17
649	.word _C_LABEL(MIPSX(GuestException))	# 18
650	.word _C_LABEL(MIPSX(GuestException))	# 19
651	.word _C_LABEL(MIPSX(GuestException))	# 20
652	.word _C_LABEL(MIPSX(GuestException))	# 21
653	.word _C_LABEL(MIPSX(GuestException))	# 22
654	.word _C_LABEL(MIPSX(GuestException))	# 23
655	.word _C_LABEL(MIPSX(GuestException))	# 24
656	.word _C_LABEL(MIPSX(GuestException))	# 25
657	.word _C_LABEL(MIPSX(GuestException))	# 26
658	.word _C_LABEL(MIPSX(GuestException))	# 27
659	.word _C_LABEL(MIPSX(GuestException))	# 28
660	.word _C_LABEL(MIPSX(GuestException))	# 29
661	.word _C_LABEL(MIPSX(GuestException))	# 30
662	.word _C_LABEL(MIPSX(GuestException))	# 31
663