Lines Matching refs:vcpu_e500
40 struct kvmppc_vcpu_e500 *vcpu_e500) in gtlb0_get_next_victim() argument
44 victim = vcpu_e500->gtlb_nv[0]++; in gtlb0_get_next_victim()
45 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways)) in gtlb0_get_next_victim()
46 vcpu_e500->gtlb_nv[0] = 0; in gtlb0_get_next_victim()
61 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) in gtlb0_set_base() argument
63 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets, in gtlb0_set_base()
64 vcpu_e500->gtlb_params[0].ways); in gtlb0_set_base()
69 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in get_tlb_esel() local
73 esel &= vcpu_e500->gtlb_params[0].ways - 1; in get_tlb_esel()
74 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); in get_tlb_esel()
76 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; in get_tlb_esel()
83 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, in kvmppc_e500_tlb_index() argument
86 int size = vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_tlb_index()
91 set_base = gtlb0_set_base(vcpu_e500, eaddr); in kvmppc_e500_tlb_index()
92 size = vcpu_e500->gtlb_params[0].ways; in kvmppc_e500_tlb_index()
94 if (eaddr < vcpu_e500->tlb1_min_eaddr || in kvmppc_e500_tlb_index()
95 eaddr > vcpu_e500->tlb1_max_eaddr) in kvmppc_e500_tlb_index()
100 offset = vcpu_e500->gtlb_offset[tlbsel]; in kvmppc_e500_tlb_index()
104 &vcpu_e500->gtlb_arch[offset + set_base + i]; in kvmppc_e500_tlb_index()
132 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_deliver_tlb_miss() local
138 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; in kvmppc_e500_deliver_tlb_miss()
142 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); in kvmppc_e500_deliver_tlb_miss()
154 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) in kvmppc_recalc_tlb1map_range() argument
156 int size = vcpu_e500->gtlb_params[1].entries; in kvmppc_recalc_tlb1map_range()
161 vcpu_e500->tlb1_min_eaddr = ~0UL; in kvmppc_recalc_tlb1map_range()
162 vcpu_e500->tlb1_max_eaddr = 0; in kvmppc_recalc_tlb1map_range()
163 offset = vcpu_e500->gtlb_offset[1]; in kvmppc_recalc_tlb1map_range()
167 &vcpu_e500->gtlb_arch[offset + i]; in kvmppc_recalc_tlb1map_range()
173 vcpu_e500->tlb1_min_eaddr = in kvmppc_recalc_tlb1map_range()
174 min(vcpu_e500->tlb1_min_eaddr, eaddr); in kvmppc_recalc_tlb1map_range()
177 vcpu_e500->tlb1_max_eaddr = in kvmppc_recalc_tlb1map_range()
178 max(vcpu_e500->tlb1_max_eaddr, eaddr); in kvmppc_recalc_tlb1map_range()
182 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, in kvmppc_need_recalc_tlb1map_range() argument
191 return vcpu_e500->tlb1_min_eaddr == start || in kvmppc_need_recalc_tlb1map_range()
192 vcpu_e500->tlb1_max_eaddr == end; in kvmppc_need_recalc_tlb1map_range()
200 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_set_tlb1map_range() local
209 vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); in kvmppc_set_tlb1map_range()
210 vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); in kvmppc_set_tlb1map_range()
214 struct kvmppc_vcpu_e500 *vcpu_e500, in kvmppc_e500_gtlbe_invalidate() argument
218 get_entry(vcpu_e500, tlbsel, esel); in kvmppc_e500_gtlbe_invalidate()
223 if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) in kvmppc_e500_gtlbe_invalidate()
224 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvmppc_e500_gtlbe_invalidate()
231 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) in kvmppc_e500_emul_mt_mmucsr0() argument
236 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0()
237 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); in kvmppc_e500_emul_mt_mmucsr0()
239 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0()
240 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); in kvmppc_e500_emul_mt_mmucsr0()
243 kvmppc_core_flush_tlb(&vcpu_e500->vcpu); in kvmppc_e500_emul_mt_mmucsr0()
250 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbivax() local
261 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_emul_tlbivax()
263 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbivax()
266 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, in kvmppc_e500_emul_tlbivax()
269 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbivax()
273 kvmppc_core_flush_tlb(&vcpu_e500->vcpu); in kvmppc_e500_emul_tlbivax()
278 static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, in tlbilx_all() argument
285 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { in tlbilx_all()
286 tlbe = get_entry(vcpu_e500, tlbsel, esel); in tlbilx_all()
289 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); in tlbilx_all()
290 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); in tlbilx_all()
295 static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, in tlbilx_one() argument
301 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); in tlbilx_one()
303 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); in tlbilx_one()
304 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); in tlbilx_one()
312 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbilx() local
316 tlbilx_all(vcpu_e500, 0, pid, type); in kvmppc_e500_emul_tlbilx()
317 tlbilx_all(vcpu_e500, 1, pid, type); in kvmppc_e500_emul_tlbilx()
319 tlbilx_one(vcpu_e500, pid, ea); in kvmppc_e500_emul_tlbilx()
327 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbre() local
334 gtlbe = get_entry(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbre()
336 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); in kvmppc_e500_emul_tlbre()
346 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbsx() local
353 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); in kvmppc_e500_emul_tlbsx()
355 gtlbe = get_entry(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbsx()
361 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; in kvmppc_e500_emul_tlbsx()
364 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); in kvmppc_e500_emul_tlbsx()
373 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; in kvmppc_e500_emul_tlbsx()
377 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); in kvmppc_e500_emul_tlbsx()
395 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbwe() local
404 gtlbe = get_entry(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbwe()
407 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbwe()
409 kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) in kvmppc_e500_emul_tlbwe()
429 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvmppc_e500_emul_tlbwe()
459 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_tlb_search() local
463 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); in kvmppc_e500_tlb_search()
529 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_mmu_xlate() local
533 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index)); in kvmppc_mmu_xlate()
545 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) in free_gtlb() argument
549 kvmppc_core_flush_tlb(&vcpu_e500->vcpu); in free_gtlb()
550 kfree(vcpu_e500->g2h_tlb1_map); in free_gtlb()
551 kfree(vcpu_e500->gtlb_priv[0]); in free_gtlb()
552 kfree(vcpu_e500->gtlb_priv[1]); in free_gtlb()
554 if (vcpu_e500->shared_tlb_pages) { in free_gtlb()
555 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch, in free_gtlb()
558 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) { in free_gtlb()
559 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]); in free_gtlb()
560 put_page(vcpu_e500->shared_tlb_pages[i]); in free_gtlb()
563 vcpu_e500->num_shared_tlb_pages = 0; in free_gtlb()
565 kfree(vcpu_e500->shared_tlb_pages); in free_gtlb()
566 vcpu_e500->shared_tlb_pages = NULL; in free_gtlb()
568 kfree(vcpu_e500->gtlb_arch); in free_gtlb()
571 vcpu_e500->gtlb_arch = NULL; in free_gtlb()
741 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvm_vcpu_ioctl_config_tlb() local
819 free_gtlb(vcpu_e500); in kvm_vcpu_ioctl_config_tlb()
821 vcpu_e500->gtlb_priv[0] = privs[0]; in kvm_vcpu_ioctl_config_tlb()
822 vcpu_e500->gtlb_priv[1] = privs[1]; in kvm_vcpu_ioctl_config_tlb()
823 vcpu_e500->g2h_tlb1_map = g2h_bitmap; in kvm_vcpu_ioctl_config_tlb()
825 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) in kvm_vcpu_ioctl_config_tlb()
828 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; in kvm_vcpu_ioctl_config_tlb()
829 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; in kvm_vcpu_ioctl_config_tlb()
831 vcpu_e500->gtlb_offset[0] = 0; in kvm_vcpu_ioctl_config_tlb()
832 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; in kvm_vcpu_ioctl_config_tlb()
837 vcpu_e500->shared_tlb_pages = pages; in kvm_vcpu_ioctl_config_tlb()
838 vcpu_e500->num_shared_tlb_pages = num_pages; in kvm_vcpu_ioctl_config_tlb()
840 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0]; in kvm_vcpu_ioctl_config_tlb()
841 vcpu_e500->gtlb_params[0].sets = sets; in kvm_vcpu_ioctl_config_tlb()
843 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; in kvm_vcpu_ioctl_config_tlb()
844 vcpu_e500->gtlb_params[1].sets = 1; in kvm_vcpu_ioctl_config_tlb()
846 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvm_vcpu_ioctl_config_tlb()
865 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvm_vcpu_ioctl_dirty_tlb() local
866 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvm_vcpu_ioctl_dirty_tlb()
904 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) in kvmppc_e500_tlb_init() argument
906 struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; in kvmppc_e500_tlb_init()
910 if (e500_mmu_host_init(vcpu_e500)) in kvmppc_e500_tlb_init()
913 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; in kvmppc_e500_tlb_init()
914 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; in kvmppc_e500_tlb_init()
916 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM; in kvmppc_e500_tlb_init()
917 vcpu_e500->gtlb_params[0].sets = in kvmppc_e500_tlb_init()
920 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE; in kvmppc_e500_tlb_init()
921 vcpu_e500->gtlb_params[1].sets = 1; in kvmppc_e500_tlb_init()
923 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL); in kvmppc_e500_tlb_init()
924 if (!vcpu_e500->gtlb_arch) in kvmppc_e500_tlb_init()
927 vcpu_e500->gtlb_offset[0] = 0; in kvmppc_e500_tlb_init()
928 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; in kvmppc_e500_tlb_init()
930 vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) * in kvmppc_e500_tlb_init()
931 vcpu_e500->gtlb_params[0].entries, in kvmppc_e500_tlb_init()
933 if (!vcpu_e500->gtlb_priv[0]) in kvmppc_e500_tlb_init()
936 vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) * in kvmppc_e500_tlb_init()
937 vcpu_e500->gtlb_params[1].entries, in kvmppc_e500_tlb_init()
939 if (!vcpu_e500->gtlb_priv[1]) in kvmppc_e500_tlb_init()
942 vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) * in kvmppc_e500_tlb_init()
943 vcpu_e500->gtlb_params[1].entries, in kvmppc_e500_tlb_init()
945 if (!vcpu_e500->g2h_tlb1_map) in kvmppc_e500_tlb_init()
948 vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params); in kvmppc_e500_tlb_init()
950 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvmppc_e500_tlb_init()
954 free_gtlb(vcpu_e500); in kvmppc_e500_tlb_init()
958 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) in kvmppc_e500_tlb_uninit() argument
960 free_gtlb(vcpu_e500); in kvmppc_e500_tlb_uninit()
961 e500_mmu_host_uninit(vcpu_e500); in kvmppc_e500_tlb_uninit()