Lines Matching refs:vcpu
39 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_reset_msr() argument
41 kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); in kvmppc_mmu_book3s_64_reset_msr()
45 struct kvm_vcpu *vcpu, in kvmppc_mmu_book3s_64_find_slbe() argument
52 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_64_find_slbe()
55 if (!vcpu->arch.slb[i].valid) in kvmppc_mmu_book3s_64_find_slbe()
58 if (vcpu->arch.slb[i].tb) in kvmppc_mmu_book3s_64_find_slbe()
61 if (vcpu->arch.slb[i].esid == cmp_esid) in kvmppc_mmu_book3s_64_find_slbe()
62 return &vcpu->arch.slb[i]; in kvmppc_mmu_book3s_64_find_slbe()
67 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_64_find_slbe()
68 if (vcpu->arch.slb[i].vsid) in kvmppc_mmu_book3s_64_find_slbe()
70 vcpu->arch.slb[i].valid ? 'v' : ' ', in kvmppc_mmu_book3s_64_find_slbe()
71 vcpu->arch.slb[i].large ? 'l' : ' ', in kvmppc_mmu_book3s_64_find_slbe()
72 vcpu->arch.slb[i].tb ? 't' : ' ', in kvmppc_mmu_book3s_64_find_slbe()
73 vcpu->arch.slb[i].esid, in kvmppc_mmu_book3s_64_find_slbe()
74 vcpu->arch.slb[i].vsid); in kvmppc_mmu_book3s_64_find_slbe()
98 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_ea_to_vp() argument
103 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_ea_to_vp()
133 static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu, in kvmppc_mmu_book3s_64_get_pteg() argument
137 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in kvmppc_mmu_book3s_64_get_pteg()
162 if (vcpu->arch.papr_enabled) in kvmppc_mmu_book3s_64_get_pteg()
165 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); in kvmppc_mmu_book3s_64_get_pteg()
208 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_xlate() argument
224 ulong mp_ea = vcpu->arch.magic_page_ea; in kvmppc_mmu_book3s_64_xlate()
229 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_mmu_book3s_64_xlate()
231 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate()
232 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); in kvmppc_mmu_book3s_64_xlate()
242 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_xlate()
260 mutex_lock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_mmu_book3s_64_xlate()
263 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); in kvmppc_mmu_book3s_64_xlate()
272 if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) in kvmppc_mmu_book3s_64_xlate()
274 else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) in kvmppc_mmu_book3s_64_xlate()
285 (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { in kvmppc_mmu_book3s_64_xlate()
310 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate()
316 if (unlikely(vcpu->arch.disable_kernel_nx) && in kvmppc_mmu_book3s_64_xlate()
317 !(kvmppc_get_msr(vcpu) & MSR_PR)) in kvmppc_mmu_book3s_64_xlate()
363 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_mmu_book3s_64_xlate()
370 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_mmu_book3s_64_xlate()
378 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) in kvmppc_mmu_book3s_64_slbmte() argument
387 vcpu_book3s = to_book3s(vcpu); in kvmppc_mmu_book3s_64_slbmte()
393 if (slb_nr > vcpu->arch.slb_nr) in kvmppc_mmu_book3s_64_slbmte()
396 slbe = &vcpu->arch.slb[slb_nr]; in kvmppc_mmu_book3s_64_slbmte()
410 if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { in kvmppc_mmu_book3s_64_slbmte()
427 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); in kvmppc_mmu_book3s_64_slbmte()
430 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) in kvmppc_mmu_book3s_64_slbmfee() argument
434 if (slb_nr > vcpu->arch.slb_nr) in kvmppc_mmu_book3s_64_slbmfee()
437 slbe = &vcpu->arch.slb[slb_nr]; in kvmppc_mmu_book3s_64_slbmfee()
442 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) in kvmppc_mmu_book3s_64_slbmfev() argument
446 if (slb_nr > vcpu->arch.slb_nr) in kvmppc_mmu_book3s_64_slbmfev()
449 slbe = &vcpu->arch.slb[slb_nr]; in kvmppc_mmu_book3s_64_slbmfev()
454 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) in kvmppc_mmu_book3s_64_slbie() argument
461 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); in kvmppc_mmu_book3s_64_slbie()
473 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); in kvmppc_mmu_book3s_64_slbie()
476 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_slbia() argument
482 for (i = 1; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_64_slbia()
483 vcpu->arch.slb[i].valid = false; in kvmppc_mmu_book3s_64_slbia()
484 vcpu->arch.slb[i].orige = 0; in kvmppc_mmu_book3s_64_slbia()
485 vcpu->arch.slb[i].origv = 0; in kvmppc_mmu_book3s_64_slbia()
488 if (kvmppc_get_msr(vcpu) & MSR_IR) { in kvmppc_mmu_book3s_64_slbia()
489 kvmppc_mmu_flush_segments(vcpu); in kvmppc_mmu_book3s_64_slbia()
490 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); in kvmppc_mmu_book3s_64_slbia()
494 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, in kvmppc_mmu_book3s_64_mtsrin() argument
531 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); in kvmppc_mmu_book3s_64_mtsrin()
534 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, in kvmppc_mmu_book3s_64_tlbie() argument
549 if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { in kvmppc_mmu_book3s_64_tlbie()
563 kvm_for_each_vcpu(i, v, vcpu->kvm) in kvmppc_mmu_book3s_64_tlbie()
568 static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) in segment_contains_magic_page() argument
570 ulong mp_ea = vcpu->arch.magic_page_ea; in segment_contains_magic_page()
572 return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) && in segment_contains_magic_page()
577 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, in kvmppc_mmu_book3s_64_esid_to_vsid() argument
583 ulong mp_ea = vcpu->arch.magic_page_ea; in kvmppc_mmu_book3s_64_esid_to_vsid()
585 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_mmu_book3s_64_esid_to_vsid()
588 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); in kvmppc_mmu_book3s_64_esid_to_vsid()
629 !segment_contains_magic_page(vcpu, esid)) in kvmppc_mmu_book3s_64_esid_to_vsid()
633 if (kvmppc_get_msr(vcpu) & MSR_PR) in kvmppc_mmu_book3s_64_esid_to_vsid()
643 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_mmu_book3s_64_esid_to_vsid()
651 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_is_dcbz32() argument
653 return (to_book3s(vcpu)->hid[5] & 0x80); in kvmppc_mmu_book3s_64_is_dcbz32()
656 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_init() argument
658 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_64_init()
674 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; in kvmppc_mmu_book3s_64_init()