1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16
17#include <linux/kvm_host.h>
18
19#include "opcode.h"
20#include "interrupt.h"
21
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24	gpa_t gpa;
25	uint32_t kseg = KSEGX(gva);
26
27	if ((kseg == CKSEG0) || (kseg == CKSEG1))
28		gpa = CPHYSADDR(gva);
29	else {
30		kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31		kvm_mips_dump_host_tlbs();
32		gpa = KVM_INVALID_ADDR;
33	}
34
35	kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
36
37	return gpa;
38}
39
40static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
41{
42	struct mips_coproc *cop0 = vcpu->arch.cop0;
43	struct kvm_run *run = vcpu->run;
44	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
45	unsigned long cause = vcpu->arch.host_cp0_cause;
46	enum emulation_result er = EMULATE_DONE;
47	int ret = RESUME_GUEST;
48
49	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
50		/* FPU Unusable */
51		if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
52		    (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
53			/*
54			 * Unusable/no FPU in guest:
55			 * deliver guest COP1 Unusable Exception
56			 */
57			er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
58		} else {
59			/* Restore FPU state */
60			kvm_own_fpu(vcpu);
61			er = EMULATE_DONE;
62		}
63	} else {
64		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
65	}
66
67	switch (er) {
68	case EMULATE_DONE:
69		ret = RESUME_GUEST;
70		break;
71
72	case EMULATE_FAIL:
73		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
74		ret = RESUME_HOST;
75		break;
76
77	case EMULATE_WAIT:
78		run->exit_reason = KVM_EXIT_INTR;
79		ret = RESUME_HOST;
80		break;
81
82	default:
83		BUG();
84	}
85	return ret;
86}
87
88static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
89{
90	struct kvm_run *run = vcpu->run;
91	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
92	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
93	unsigned long cause = vcpu->arch.host_cp0_cause;
94	enum emulation_result er = EMULATE_DONE;
95	int ret = RESUME_GUEST;
96
97	if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
98	    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
99		kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
100			  cause, opc, badvaddr);
101		er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
102
103		if (er == EMULATE_DONE)
104			ret = RESUME_GUEST;
105		else {
106			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
107			ret = RESUME_HOST;
108		}
109	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
110		/*
111		 * XXXKYMA: The guest kernel does not expect to get this fault
112		 * when we are not using HIGHMEM. Need to address this in a
113		 * HIGHMEM kernel
114		 */
115		kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
116			cause, opc, badvaddr);
117		kvm_mips_dump_host_tlbs();
118		kvm_arch_vcpu_dump_regs(vcpu);
119		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
120		ret = RESUME_HOST;
121	} else {
122		kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
123			cause, opc, badvaddr);
124		kvm_mips_dump_host_tlbs();
125		kvm_arch_vcpu_dump_regs(vcpu);
126		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
127		ret = RESUME_HOST;
128	}
129	return ret;
130}
131
132static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
133{
134	struct kvm_run *run = vcpu->run;
135	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
136	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
137	unsigned long cause = vcpu->arch.host_cp0_cause;
138	enum emulation_result er = EMULATE_DONE;
139	int ret = RESUME_GUEST;
140
141	if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
142	    && KVM_GUEST_KERNEL_MODE(vcpu)) {
143		if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
144			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
145			ret = RESUME_HOST;
146		}
147	} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
148		   || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
149		kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
150			  cause, opc, badvaddr);
151		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
152		if (er == EMULATE_DONE)
153			ret = RESUME_GUEST;
154		else {
155			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
156			ret = RESUME_HOST;
157		}
158	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
159		/*
160		 * All KSEG0 faults are handled by KVM, as the guest kernel does
161		 * not expect to ever get them
162		 */
163		if (kvm_mips_handle_kseg0_tlb_fault
164		    (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
165			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
166			ret = RESUME_HOST;
167		}
168	} else {
169		kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
170			cause, opc, badvaddr);
171		kvm_mips_dump_host_tlbs();
172		kvm_arch_vcpu_dump_regs(vcpu);
173		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
174		ret = RESUME_HOST;
175	}
176	return ret;
177}
178
179static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
180{
181	struct kvm_run *run = vcpu->run;
182	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
183	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
184	unsigned long cause = vcpu->arch.host_cp0_cause;
185	enum emulation_result er = EMULATE_DONE;
186	int ret = RESUME_GUEST;
187
188	if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
189	    && KVM_GUEST_KERNEL_MODE(vcpu)) {
190		if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
191			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
192			ret = RESUME_HOST;
193		}
194	} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
195		   || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
196		kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
197			  vcpu->arch.pc, badvaddr);
198
199		/*
200		 * User Address (UA) fault, this could happen if
201		 * (1) TLB entry not present/valid in both Guest and shadow host
202		 *     TLBs, in this case we pass on the fault to the guest
203		 *     kernel and let it handle it.
204		 * (2) TLB entry is present in the Guest TLB but not in the
205		 *     shadow, in this case we inject the TLB from the Guest TLB
206		 *     into the shadow host TLB
207		 */
208
209		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
210		if (er == EMULATE_DONE)
211			ret = RESUME_GUEST;
212		else {
213			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214			ret = RESUME_HOST;
215		}
216	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
217		if (kvm_mips_handle_kseg0_tlb_fault
218		    (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
219			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
220			ret = RESUME_HOST;
221		}
222	} else {
223		kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
224			cause, opc, badvaddr);
225		kvm_mips_dump_host_tlbs();
226		kvm_arch_vcpu_dump_regs(vcpu);
227		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
228		ret = RESUME_HOST;
229	}
230	return ret;
231}
232
233static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
234{
235	struct kvm_run *run = vcpu->run;
236	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
237	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
238	unsigned long cause = vcpu->arch.host_cp0_cause;
239	enum emulation_result er = EMULATE_DONE;
240	int ret = RESUME_GUEST;
241
242	if (KVM_GUEST_KERNEL_MODE(vcpu)
243	    && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
244		kvm_debug("Emulate Store to MMIO space\n");
245		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
246		if (er == EMULATE_FAIL) {
247			kvm_err("Emulate Store to MMIO space failed\n");
248			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
249			ret = RESUME_HOST;
250		} else {
251			run->exit_reason = KVM_EXIT_MMIO;
252			ret = RESUME_HOST;
253		}
254	} else {
255		kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
256			cause, opc, badvaddr);
257		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
258		ret = RESUME_HOST;
259	}
260	return ret;
261}
262
263static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
264{
265	struct kvm_run *run = vcpu->run;
266	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
267	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
268	unsigned long cause = vcpu->arch.host_cp0_cause;
269	enum emulation_result er = EMULATE_DONE;
270	int ret = RESUME_GUEST;
271
272	if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
273		kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
274		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
275		if (er == EMULATE_FAIL) {
276			kvm_err("Emulate Load from MMIO space failed\n");
277			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278			ret = RESUME_HOST;
279		} else {
280			run->exit_reason = KVM_EXIT_MMIO;
281			ret = RESUME_HOST;
282		}
283	} else {
284		kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
285			cause, opc, badvaddr);
286		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
287		ret = RESUME_HOST;
288		er = EMULATE_FAIL;
289	}
290	return ret;
291}
292
293static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
294{
295	struct kvm_run *run = vcpu->run;
296	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
297	unsigned long cause = vcpu->arch.host_cp0_cause;
298	enum emulation_result er = EMULATE_DONE;
299	int ret = RESUME_GUEST;
300
301	er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
302	if (er == EMULATE_DONE)
303		ret = RESUME_GUEST;
304	else {
305		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
306		ret = RESUME_HOST;
307	}
308	return ret;
309}
310
311static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
312{
313	struct kvm_run *run = vcpu->run;
314	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
315	unsigned long cause = vcpu->arch.host_cp0_cause;
316	enum emulation_result er = EMULATE_DONE;
317	int ret = RESUME_GUEST;
318
319	er = kvm_mips_handle_ri(cause, opc, run, vcpu);
320	if (er == EMULATE_DONE)
321		ret = RESUME_GUEST;
322	else {
323		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
324		ret = RESUME_HOST;
325	}
326	return ret;
327}
328
329static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
330{
331	struct kvm_run *run = vcpu->run;
332	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
333	unsigned long cause = vcpu->arch.host_cp0_cause;
334	enum emulation_result er = EMULATE_DONE;
335	int ret = RESUME_GUEST;
336
337	er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
338	if (er == EMULATE_DONE)
339		ret = RESUME_GUEST;
340	else {
341		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
342		ret = RESUME_HOST;
343	}
344	return ret;
345}
346
347static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
348{
349	struct kvm_run *run = vcpu->run;
350	uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
351	unsigned long cause = vcpu->arch.host_cp0_cause;
352	enum emulation_result er = EMULATE_DONE;
353	int ret = RESUME_GUEST;
354
355	er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
356	if (er == EMULATE_DONE) {
357		ret = RESUME_GUEST;
358	} else {
359		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
360		ret = RESUME_HOST;
361	}
362	return ret;
363}
364
365static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
366{
367	struct kvm_run *run = vcpu->run;
368	uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
369	unsigned long cause = vcpu->arch.host_cp0_cause;
370	enum emulation_result er = EMULATE_DONE;
371	int ret = RESUME_GUEST;
372
373	er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
374	if (er == EMULATE_DONE) {
375		ret = RESUME_GUEST;
376	} else {
377		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
378		ret = RESUME_HOST;
379	}
380	return ret;
381}
382
383static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
384{
385	struct kvm_run *run = vcpu->run;
386	uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
387	unsigned long cause = vcpu->arch.host_cp0_cause;
388	enum emulation_result er = EMULATE_DONE;
389	int ret = RESUME_GUEST;
390
391	er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
392	if (er == EMULATE_DONE) {
393		ret = RESUME_GUEST;
394	} else {
395		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
396		ret = RESUME_HOST;
397	}
398	return ret;
399}
400
401/**
402 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
403 * @vcpu:	Virtual CPU context.
404 *
405 * Handle when the guest attempts to use MSA when it is disabled.
406 */
407static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
408{
409	struct mips_coproc *cop0 = vcpu->arch.cop0;
410	struct kvm_run *run = vcpu->run;
411	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
412	unsigned long cause = vcpu->arch.host_cp0_cause;
413	enum emulation_result er = EMULATE_DONE;
414	int ret = RESUME_GUEST;
415
416	if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
417	    (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
418		/*
419		 * No MSA in guest, or FPU enabled and not in FR=1 mode,
420		 * guest reserved instruction exception
421		 */
422		er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
423	} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
424		/* MSA disabled by guest, guest MSA disabled exception */
425		er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
426	} else {
427		/* Restore MSA/FPU state */
428		kvm_own_msa(vcpu);
429		er = EMULATE_DONE;
430	}
431
432	switch (er) {
433	case EMULATE_DONE:
434		ret = RESUME_GUEST;
435		break;
436
437	case EMULATE_FAIL:
438		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
439		ret = RESUME_HOST;
440		break;
441
442	default:
443		BUG();
444	}
445	return ret;
446}
447
448static int kvm_trap_emul_vm_init(struct kvm *kvm)
449{
450	return 0;
451}
452
453static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
454{
455	return 0;
456}
457
458static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
459{
460	struct mips_coproc *cop0 = vcpu->arch.cop0;
461	uint32_t config1;
462	int vcpu_id = vcpu->vcpu_id;
463
464	/*
465	 * Arch specific stuff, set up config registers properly so that the
466	 * guest will come up as expected, for now we simulate a MIPS 24kc
467	 */
468	kvm_write_c0_guest_prid(cop0, 0x00019300);
469	/* Have config1, Cacheable, noncoherent, write-back, write allocate */
470	kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
471				  (0x1 << CP0C0_AR) |
472				  (MMU_TYPE_R4000 << CP0C0_MT));
473
474	/* Read the cache characteristics from the host Config1 Register */
475	config1 = (read_c0_config1() & ~0x7f);
476
477	/* Set up MMU size */
478	config1 &= ~(0x3f << 25);
479	config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
480
481	/* We unset some bits that we aren't emulating */
482	config1 &=
483	    ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
484	      (1 << CP0C1_WR) | (1 << CP0C1_CA));
485	kvm_write_c0_guest_config1(cop0, config1);
486
487	/* Have config3, no tertiary/secondary caches implemented */
488	kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
489	/* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
490
491	/* Have config4, UserLocal */
492	kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
493
494	/* Have config5 */
495	kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
496
497	/* No config6 */
498	kvm_write_c0_guest_config5(cop0, 0);
499
500	/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
501	kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
502
503	/*
504	 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
505	 */
506	kvm_write_c0_guest_intctl(cop0, 0xFC000000);
507
508	/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
509	kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
510
511	return 0;
512}
513
514static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
515				     const struct kvm_one_reg *reg,
516				     s64 *v)
517{
518	switch (reg->id) {
519	case KVM_REG_MIPS_CP0_COUNT:
520		*v = kvm_mips_read_count(vcpu);
521		break;
522	case KVM_REG_MIPS_COUNT_CTL:
523		*v = vcpu->arch.count_ctl;
524		break;
525	case KVM_REG_MIPS_COUNT_RESUME:
526		*v = ktime_to_ns(vcpu->arch.count_resume);
527		break;
528	case KVM_REG_MIPS_COUNT_HZ:
529		*v = vcpu->arch.count_hz;
530		break;
531	default:
532		return -EINVAL;
533	}
534	return 0;
535}
536
537static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
538				     const struct kvm_one_reg *reg,
539				     s64 v)
540{
541	struct mips_coproc *cop0 = vcpu->arch.cop0;
542	int ret = 0;
543	unsigned int cur, change;
544
545	switch (reg->id) {
546	case KVM_REG_MIPS_CP0_COUNT:
547		kvm_mips_write_count(vcpu, v);
548		break;
549	case KVM_REG_MIPS_CP0_COMPARE:
550		kvm_mips_write_compare(vcpu, v, false);
551		break;
552	case KVM_REG_MIPS_CP0_CAUSE:
553		/*
554		 * If the timer is stopped or started (DC bit) it must look
555		 * atomic with changes to the interrupt pending bits (TI, IRQ5).
556		 * A timer interrupt should not happen in between.
557		 */
558		if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
559			if (v & CAUSEF_DC) {
560				/* disable timer first */
561				kvm_mips_count_disable_cause(vcpu);
562				kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
563			} else {
564				/* enable timer last */
565				kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
566				kvm_mips_count_enable_cause(vcpu);
567			}
568		} else {
569			kvm_write_c0_guest_cause(cop0, v);
570		}
571		break;
572	case KVM_REG_MIPS_CP0_CONFIG:
573		/* read-only for now */
574		break;
575	case KVM_REG_MIPS_CP0_CONFIG1:
576		cur = kvm_read_c0_guest_config1(cop0);
577		change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
578		if (change) {
579			v = cur ^ change;
580			kvm_write_c0_guest_config1(cop0, v);
581		}
582		break;
583	case KVM_REG_MIPS_CP0_CONFIG2:
584		/* read-only for now */
585		break;
586	case KVM_REG_MIPS_CP0_CONFIG3:
587		cur = kvm_read_c0_guest_config3(cop0);
588		change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
589		if (change) {
590			v = cur ^ change;
591			kvm_write_c0_guest_config3(cop0, v);
592		}
593		break;
594	case KVM_REG_MIPS_CP0_CONFIG4:
595		cur = kvm_read_c0_guest_config4(cop0);
596		change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
597		if (change) {
598			v = cur ^ change;
599			kvm_write_c0_guest_config4(cop0, v);
600		}
601		break;
602	case KVM_REG_MIPS_CP0_CONFIG5:
603		cur = kvm_read_c0_guest_config5(cop0);
604		change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
605		if (change) {
606			v = cur ^ change;
607			kvm_write_c0_guest_config5(cop0, v);
608		}
609		break;
610	case KVM_REG_MIPS_COUNT_CTL:
611		ret = kvm_mips_set_count_ctl(vcpu, v);
612		break;
613	case KVM_REG_MIPS_COUNT_RESUME:
614		ret = kvm_mips_set_count_resume(vcpu, v);
615		break;
616	case KVM_REG_MIPS_COUNT_HZ:
617		ret = kvm_mips_set_count_hz(vcpu, v);
618		break;
619	default:
620		return -EINVAL;
621	}
622	return ret;
623}
624
625static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
626{
627	kvm_lose_fpu(vcpu);
628
629	return 0;
630}
631
632static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
633{
634	return 0;
635}
636
637static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
638	/* exit handlers */
639	.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
640	.handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
641	.handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
642	.handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
643	.handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
644	.handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
645	.handle_syscall = kvm_trap_emul_handle_syscall,
646	.handle_res_inst = kvm_trap_emul_handle_res_inst,
647	.handle_break = kvm_trap_emul_handle_break,
648	.handle_trap = kvm_trap_emul_handle_trap,
649	.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
650	.handle_fpe = kvm_trap_emul_handle_fpe,
651	.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
652
653	.vm_init = kvm_trap_emul_vm_init,
654	.vcpu_init = kvm_trap_emul_vcpu_init,
655	.vcpu_setup = kvm_trap_emul_vcpu_setup,
656	.gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
657	.queue_timer_int = kvm_mips_queue_timer_int_cb,
658	.dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
659	.queue_io_int = kvm_mips_queue_io_int_cb,
660	.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
661	.irq_deliver = kvm_mips_irq_deliver_cb,
662	.irq_clear = kvm_mips_irq_clear_cb,
663	.get_one_reg = kvm_trap_emul_get_one_reg,
664	.set_one_reg = kvm_trap_emul_set_one_reg,
665	.vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
666	.vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
667};
668
669int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
670{
671	*install_callbacks = &kvm_trap_emul_callbacks;
672	return 0;
673}
674