Lines Matching refs:vcpu

174 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,  in kvmppc_map_vrma()  argument
185 struct kvm *kvm = vcpu->kvm; in kvmppc_map_vrma()
245 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_hv_reset_msr() argument
247 unsigned long msr = vcpu->arch.intr_msr; in kvmppc_mmu_book3s_64_hv_reset_msr()
250 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) in kvmppc_mmu_book3s_64_hv_reset_msr()
253 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; in kvmppc_mmu_book3s_64_hv_reset_msr()
254 kvmppc_set_msr(vcpu, msr); in kvmppc_mmu_book3s_64_hv_reset_msr()
277 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, in kvmppc_mmu_book3s_hv_find_slbe() argument
283 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_hv_find_slbe()
284 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) in kvmppc_mmu_book3s_hv_find_slbe()
287 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) in kvmppc_mmu_book3s_hv_find_slbe()
292 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) in kvmppc_mmu_book3s_hv_find_slbe()
293 return &vcpu->arch.slb[i]; in kvmppc_mmu_book3s_hv_find_slbe()
307 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_hv_xlate() argument
310 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_book3s_64_hv_xlate()
317 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); in kvmppc_mmu_book3s_64_hv_xlate()
321 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
327 slb_v = vcpu->kvm->arch.vrma_slb_v; in kvmppc_mmu_book3s_64_hv_xlate()
350 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; in kvmppc_mmu_book3s_64_hv_xlate()
360 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); in kvmppc_mmu_book3s_64_hv_xlate()
390 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_hv_emulate_mmio() argument
398 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != in kvmppc_hv_emulate_mmio()
430 vcpu->arch.paddr_accessed = gpa; in kvmppc_hv_emulate_mmio()
431 vcpu->arch.vaddr_accessed = ea; in kvmppc_hv_emulate_mmio()
432 return kvmppc_emulate_mmio(run, vcpu); in kvmppc_hv_emulate_mmio()
435 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_book3s_hv_page_fault() argument
438 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_hv_page_fault()
460 if (ea != vcpu->arch.pgfault_addr) in kvmppc_book3s_hv_page_fault()
462 index = vcpu->arch.pgfault_index; in kvmppc_book3s_hv_page_fault()
474 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || in kvmppc_book3s_hv_page_fault()
475 hpte[1] != vcpu->arch.pgfault_hpte[1]) in kvmppc_book3s_hv_page_fault()
486 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault()
490 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, in kvmppc_book3s_hv_page_fault()
598 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { in kvmppc_book3s_hv_page_fault()
627 trace_kvm_page_fault_exit(vcpu, hpte, ret); in kvmppc_book3s_hv_page_fault()
1026 struct kvm_vcpu *vcpu; in kvmppc_hv_get_dirty_log() local
1045 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_hv_get_dirty_log()
1046 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_hv_get_dirty_log()
1047 harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map); in kvmppc_hv_get_dirty_log()
1048 harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map); in kvmppc_hv_get_dirty_log()
1049 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_hv_get_dirty_log()
1627 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_hv_init() argument
1629 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_hv_init()
1631 vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ in kvmppc_mmu_book3s_hv_init()
1636 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; in kvmppc_mmu_book3s_hv_init()