Lines Matching refs:vcpu

34 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)  in kvmppc_mmu_invalidate_pte()  argument
43 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) in kvmppc_sid_hash() argument
56 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) in find_sid_vsid() argument
61 if (kvmppc_get_msr(vcpu) & MSR_PR) in find_sid_vsid()
64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); in find_sid_vsid()
65 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; in find_sid_vsid()
71 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; in find_sid_vsid()
81 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument
97 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_map_page()
107 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page()
117 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page()
118 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page()
120 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page()
122 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page()
138 mark_page_dirty(vcpu->kvm, gfn); in kvmppc_mmu_map_page()
158 cpte = kvmppc_mmu_hpte_cache_next(vcpu); in kvmppc_mmu_map_page()
202 kvmppc_mmu_hpte_cache_map(vcpu, cpte); in kvmppc_mmu_map_page()
216 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) in kvmppc_mmu_unmap_page() argument
221 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page()
224 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); in kvmppc_mmu_unmap_page()
227 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) in create_sid_map() argument
230 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in create_sid_map()
234 if (kvmppc_get_msr(vcpu) & MSR_PR) in create_sid_map()
240 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); in create_sid_map()
244 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; in create_sid_map()
254 kvmppc_mmu_pte_flush(vcpu, 0, 0); in create_sid_map()
255 kvmppc_mmu_flush_segments(vcpu); in create_sid_map()
267 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) in kvmppc_mmu_next_segment() argument
269 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_next_segment()
298 kvmppc_mmu_flush_segments(vcpu); in kvmppc_mmu_next_segment()
308 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) in kvmppc_mmu_map_segment() argument
310 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_map_segment()
319 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); in kvmppc_mmu_map_segment()
321 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment()
328 map = find_sid_vsid(vcpu, gvsid); in kvmppc_mmu_map_segment()
330 map = create_sid_map(vcpu, gvsid); in kvmppc_mmu_map_segment()
354 void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) in kvmppc_mmu_flush_segment() argument
356 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_flush_segment()
371 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) in kvmppc_mmu_flush_segments() argument
373 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_flush_segments()
379 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) in kvmppc_mmu_destroy_pr() argument
381 kvmppc_mmu_hpte_destroy(vcpu); in kvmppc_mmu_destroy_pr()
382 __destroy_context(to_book3s(vcpu)->context_id[0]); in kvmppc_mmu_destroy_pr()
385 int kvmppc_mmu_init(struct kvm_vcpu *vcpu) in kvmppc_mmu_init() argument
387 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvmppc_mmu_init()
400 kvmppc_mmu_hpte_init(vcpu); in kvmppc_mmu_init()