Lines Matching refs:vcpu
175 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument
186 struct kvm *kvm = vcpu->kvm; in kvmppc_map_vrma()
246 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_hv_reset_msr() argument
248 unsigned long msr = vcpu->arch.intr_msr; in kvmppc_mmu_book3s_64_hv_reset_msr()
251 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) in kvmppc_mmu_book3s_64_hv_reset_msr()
254 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; in kvmppc_mmu_book3s_64_hv_reset_msr()
255 kvmppc_set_msr(vcpu, msr); in kvmppc_mmu_book3s_64_hv_reset_msr()
278 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, in kvmppc_mmu_book3s_hv_find_slbe() argument
284 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_hv_find_slbe()
285 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) in kvmppc_mmu_book3s_hv_find_slbe()
288 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) in kvmppc_mmu_book3s_hv_find_slbe()
293 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) in kvmppc_mmu_book3s_hv_find_slbe()
294 return &vcpu->arch.slb[i]; in kvmppc_mmu_book3s_hv_find_slbe()
308 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_hv_xlate() argument
311 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_book3s_64_hv_xlate()
318 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); in kvmppc_mmu_book3s_64_hv_xlate()
322 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
328 slb_v = vcpu->kvm->arch.vrma_slb_v; in kvmppc_mmu_book3s_64_hv_xlate()
351 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; in kvmppc_mmu_book3s_64_hv_xlate()
361 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); in kvmppc_mmu_book3s_64_hv_xlate()
391 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_hv_emulate_mmio() argument
399 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != in kvmppc_hv_emulate_mmio()
431 vcpu->arch.paddr_accessed = gpa; in kvmppc_hv_emulate_mmio()
432 vcpu->arch.vaddr_accessed = ea; in kvmppc_hv_emulate_mmio()
433 return kvmppc_emulate_mmio(run, vcpu); in kvmppc_hv_emulate_mmio()
436 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_book3s_hv_page_fault() argument
439 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_hv_page_fault()
461 if (ea != vcpu->arch.pgfault_addr) in kvmppc_book3s_hv_page_fault()
463 index = vcpu->arch.pgfault_index; in kvmppc_book3s_hv_page_fault()
475 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || in kvmppc_book3s_hv_page_fault()
476 hpte[1] != vcpu->arch.pgfault_hpte[1]) in kvmppc_book3s_hv_page_fault()
487 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault()
491 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, in kvmppc_book3s_hv_page_fault()
599 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { in kvmppc_book3s_hv_page_fault()
628 trace_kvm_page_fault_exit(vcpu, hpte, ret); in kvmppc_book3s_hv_page_fault()
1033 struct kvm_vcpu *vcpu; in kvmppc_hv_get_dirty_log() local
1052 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_hv_get_dirty_log()
1053 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_hv_get_dirty_log()
1054 harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map); in kvmppc_hv_get_dirty_log()
1055 harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map); in kvmppc_hv_get_dirty_log()
1056 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_hv_get_dirty_log()
1634 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_hv_init() argument
1636 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_hv_init()
1638 vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ in kvmppc_mmu_book3s_hv_init()
1643 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; in kvmppc_mmu_book3s_hv_init()