Lines Matching refs:vcpu
35 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in kvmppc_mmu_invalidate_pte() argument
44 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) in kvmppc_sid_hash() argument
57 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) in find_sid_vsid() argument
62 if (kvmppc_get_msr(vcpu) & MSR_PR) in find_sid_vsid()
65 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); in find_sid_vsid()
66 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; in find_sid_vsid()
72 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; in find_sid_vsid()
82 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument
98 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_map_page()
108 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page()
118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page()
119 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page()
121 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page()
123 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page()
139 mark_page_dirty(vcpu->kvm, gfn); in kvmppc_mmu_map_page()
159 cpte = kvmppc_mmu_hpte_cache_next(vcpu); in kvmppc_mmu_map_page()
203 kvmppc_mmu_hpte_cache_map(vcpu, cpte); in kvmppc_mmu_map_page()
217 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) in kvmppc_mmu_unmap_page() argument
222 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page()
225 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); in kvmppc_mmu_unmap_page()
228 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) in create_sid_map() argument
231 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in create_sid_map()
235 if (kvmppc_get_msr(vcpu) & MSR_PR) in create_sid_map()
241 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); in create_sid_map()
245 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; in create_sid_map()
255 kvmppc_mmu_pte_flush(vcpu, 0, 0); in create_sid_map()
256 kvmppc_mmu_flush_segments(vcpu); in create_sid_map()
268 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) in kvmppc_mmu_next_segment() argument
270 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_next_segment()
299 kvmppc_mmu_flush_segments(vcpu); in kvmppc_mmu_next_segment()
309 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) in kvmppc_mmu_map_segment() argument
311 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_map_segment()
320 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); in kvmppc_mmu_map_segment()
322 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment()
329 map = find_sid_vsid(vcpu, gvsid); in kvmppc_mmu_map_segment()
331 map = create_sid_map(vcpu, gvsid); in kvmppc_mmu_map_segment()
355 void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) in kvmppc_mmu_flush_segment() argument
357 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_flush_segment()
372 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) in kvmppc_mmu_flush_segments() argument
374 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_flush_segments()
380 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) in kvmppc_mmu_destroy_pr() argument
382 kvmppc_mmu_hpte_destroy(vcpu); in kvmppc_mmu_destroy_pr()
383 __destroy_context(to_book3s(vcpu)->context_id[0]); in kvmppc_mmu_destroy_pr()
386 int kvmppc_mmu_init(struct kvm_vcpu *vcpu) in kvmppc_mmu_init() argument
388 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvmppc_mmu_init()
401 kvmppc_mmu_hpte_init(vcpu); in kvmppc_mmu_init()