Lines Matching refs:arch
52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; in kvm_mips_get_kernel_asid()
57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; in kvm_mips_get_user_asid()
62 return vcpu->kvm->arch.commpage_tlb; in kvm_mips_get_commpage_asid()
118 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_dump_guest_tlbs()
126 tlb = vcpu->arch.guest_tlb[i]; in kvm_mips_dump_guest_tlbs()
149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) in kvm_mips_map_page()
161 kvm->arch.guest_pmap[gfn] = pfn; in kvm_mips_map_page()
183 if (gfn >= kvm->arch.guest_pmap_npages) { in kvm_mips_translate_guest_kseg0_to_hpa()
192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; in kvm_mips_translate_guest_kseg0_to_hpa()
234 vcpu->arch.pc, idx, read_c0_entryhi(), in kvm_mips_host_tlb_write()
279 if (gfn >= kvm->arch.guest_pmap_npages) { in kvm_mips_handle_kseg0_tlb_fault()
295 pfn0 = kvm->arch.guest_pmap[gfn]; in kvm_mips_handle_kseg0_tlb_fault()
296 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1]; in kvm_mips_handle_kseg0_tlb_fault()
298 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1]; in kvm_mips_handle_kseg0_tlb_fault()
299 pfn1 = kvm->arch.guest_pmap[gfn]; in kvm_mips_handle_kseg0_tlb_fault()
320 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; in kvm_mips_handle_commpage_tlb_fault()
343 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), in kvm_mips_handle_commpage_tlb_fault()
377 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) in kvm_mips_handle_mapped_seg_tlb_fault()
379 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) in kvm_mips_handle_mapped_seg_tlb_fault()
398 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, in kvm_mips_handle_mapped_seg_tlb_fault()
410 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; in kvm_mips_guest_tlb_lookup()
660 if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) in kvm_mips_migrate_count()
661 hrtimer_restart(&vcpu->arch.comparecount_timer); in kvm_mips_migrate_count()
676 if (((vcpu->arch. in kvm_arch_vcpu_load()
678 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); in kvm_arch_vcpu_load()
679 vcpu->arch.guest_kernel_asid[cpu] = in kvm_arch_vcpu_load()
680 vcpu->arch.guest_kernel_mm.context.asid[cpu]; in kvm_arch_vcpu_load()
681 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); in kvm_arch_vcpu_load()
682 vcpu->arch.guest_user_asid[cpu] = in kvm_arch_vcpu_load()
683 vcpu->arch.guest_user_mm.context.asid[cpu]; in kvm_arch_vcpu_load()
689 cpu, vcpu->arch.guest_kernel_asid[cpu]); in kvm_arch_vcpu_load()
691 vcpu->arch.guest_user_asid[cpu]); in kvm_arch_vcpu_load()
694 if (vcpu->arch.last_sched_cpu != cpu) { in kvm_arch_vcpu_load()
696 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); in kvm_arch_vcpu_load()
711 write_c0_entryhi(vcpu->arch. in kvm_arch_vcpu_load()
725 write_c0_entryhi(vcpu->arch. in kvm_arch_vcpu_load()
729 write_c0_entryhi(vcpu->arch. in kvm_arch_vcpu_load()
754 vcpu->arch.preempt_entryhi = read_c0_entryhi(); in kvm_arch_vcpu_put()
755 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
775 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_get_inst()
798 &vcpu->arch. in kvm_get_inst()