Lines Matching refs:arch

67 		vcpu->arch.guest_kernel_asid[i] = 0;  in kvm_mips_reset_vcpu()
68 vcpu->arch.guest_user_asid[i] = 0; in kvm_mips_reset_vcpu()
80 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable()
114 kvm->arch.commpage_tlb = wired; in kvm_mips_init_tlbs()
117 kvm->arch.commpage_tlb); in kvm_mips_init_tlbs()
146 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { in kvm_mips_free_vcpus()
147 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) in kvm_mips_free_vcpus()
148 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); in kvm_mips_free_vcpus()
150 kfree(kvm->arch.guest_pmap); in kvm_mips_free_vcpus()
220 if (!kvm->arch.guest_pmap) { in kvm_arch_commit_memory_region()
225 kvm->arch.guest_pmap_npages = npages; in kvm_arch_commit_memory_region()
226 kvm->arch.guest_pmap = in kvm_arch_commit_memory_region()
229 if (!kvm->arch.guest_pmap) { in kvm_arch_commit_memory_region()
235 npages, kvm->arch.guest_pmap); in kvm_arch_commit_memory_region()
239 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; in kvm_arch_commit_memory_region()
274 vcpu->arch.host_ebase = (void *)read_c0_ebase(); in kvm_arch_vcpu_create()
286 vcpu->arch.guest_ebase = gebase; in kvm_arch_vcpu_create()
323 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); in kvm_arch_vcpu_create()
325 if (!vcpu->arch.kseg0_commpage) { in kvm_arch_vcpu_create()
330 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_create()
334 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
356 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_arch_vcpu_free()
362 kfree(vcpu->arch.guest_ebase); in kvm_arch_vcpu_free()
363 kfree(vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_free()
397 kvm_read_c0_guest_cause(vcpu->arch.cop0)); in kvm_arch_vcpu_ioctl_run()
444 dvcpu->arch.wait = 0; in kvm_vcpu_ioctl_interrupt()
533 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_get_reg()
534 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_get_reg()
543 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; in kvm_mips_get_reg()
546 v = (long)vcpu->arch.hi; in kvm_mips_get_reg()
549 v = (long)vcpu->arch.lo; in kvm_mips_get_reg()
552 v = (long)vcpu->arch.pc; in kvm_mips_get_reg()
557 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
567 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
576 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
581 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
588 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
605 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
610 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
712 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_set_reg()
713 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_set_reg()
744 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; in kvm_mips_set_reg()
747 vcpu->arch.hi = v; in kvm_mips_set_reg()
750 vcpu->arch.lo = v; in kvm_mips_set_reg()
753 vcpu->arch.pc = v; in kvm_mips_set_reg()
758 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
768 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
777 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
782 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
789 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
803 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
808 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
884 vcpu->arch.fpu_enabled = true; in kvm_vcpu_ioctl_enable_cap()
887 vcpu->arch.msa_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1116 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
1117 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvm_arch_vcpu_dump_regs()
1121 vcpu->arch.gprs[i], in kvm_arch_vcpu_dump_regs()
1122 vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
1123 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
1125 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); in kvm_arch_vcpu_dump_regs()
1126 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); in kvm_arch_vcpu_dump_regs()
1128 cop0 = vcpu->arch.cop0; in kvm_arch_vcpu_dump_regs()
1142 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
1143 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
1144 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
1145 vcpu->arch.hi = regs->hi; in kvm_arch_vcpu_ioctl_set_regs()
1146 vcpu->arch.lo = regs->lo; in kvm_arch_vcpu_ioctl_set_regs()
1147 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
1156 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
1157 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
1159 regs->hi = vcpu->arch.hi; in kvm_arch_vcpu_ioctl_get_regs()
1160 regs->lo = vcpu->arch.lo; in kvm_arch_vcpu_ioctl_get_regs()
1161 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
1172 vcpu->arch.wait = 0; in kvm_mips_comparecount_func()
1182 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); in kvm_mips_comparecount_wakeup()
1190 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, in kvm_arch_vcpu_init()
1192 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; in kvm_arch_vcpu_init()
1224 uint32_t cause = vcpu->arch.host_cp0_cause; in kvm_mips_handle_exit()
1226 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; in kvm_mips_handle_exit()
1227 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_mips_handle_exit()
1294 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, in kvm_mips_handle_exit()
1368 kvm_read_c0_guest_status(vcpu->arch.cop0)); in kvm_mips_handle_exit()
1402 if (kvm_mips_guest_has_fpu(&vcpu->arch) && in kvm_mips_handle_exit()
1404 __kvm_restore_fcsr(&vcpu->arch); in kvm_mips_handle_exit()
1406 if (kvm_mips_guest_has_msa(&vcpu->arch) && in kvm_mips_handle_exit()
1408 __kvm_restore_msacsr(&vcpu->arch); in kvm_mips_handle_exit()
1420 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_own_fpu()
1438 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) in kvm_own_fpu()
1453 if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { in kvm_own_fpu()
1454 __kvm_restore_fpu(&vcpu->arch); in kvm_own_fpu()
1455 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; in kvm_own_fpu()
1465 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_own_msa()
1474 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_own_msa()
1482 (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | in kvm_own_msa()
1497 switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { in kvm_own_msa()
1502 __kvm_restore_msa_upper(&vcpu->arch); in kvm_own_msa()
1503 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; in kvm_own_msa()
1507 __kvm_restore_msa(&vcpu->arch); in kvm_own_msa()
1508 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; in kvm_own_msa()
1509 if (kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_own_msa()
1510 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; in kvm_own_msa()
1524 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { in kvm_drop_fpu()
1526 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; in kvm_drop_fpu()
1528 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { in kvm_drop_fpu()
1530 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; in kvm_drop_fpu()
1546 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { in kvm_lose_fpu()
1550 __kvm_save_msa(&vcpu->arch); in kvm_lose_fpu()
1554 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) in kvm_lose_fpu()
1556 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); in kvm_lose_fpu()
1557 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { in kvm_lose_fpu()
1561 __kvm_save_fpu(&vcpu->arch); in kvm_lose_fpu()
1562 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; in kvm_lose_fpu()