vcpu_e500 120 arch/powerpc/kvm/e500.c static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 122 arch/powerpc/kvm/e500.c vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); vcpu_e500 123 arch/powerpc/kvm/e500.c return vcpu_e500->idt; vcpu_e500 126 arch/powerpc/kvm/e500.c static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 128 arch/powerpc/kvm/e500.c kfree(vcpu_e500->idt); vcpu_e500 129 arch/powerpc/kvm/e500.c vcpu_e500->idt = NULL; vcpu_e500 136 arch/powerpc/kvm/e500.c static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 139 arch/powerpc/kvm/e500.c vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, vcpu_e500 140 arch/powerpc/kvm/e500.c get_cur_as(&vcpu_e500->vcpu), vcpu_e500 141 arch/powerpc/kvm/e500.c get_cur_pid(&vcpu_e500->vcpu), vcpu_e500 142 arch/powerpc/kvm/e500.c get_cur_pr(&vcpu_e500->vcpu), 1); vcpu_e500 143 arch/powerpc/kvm/e500.c vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, vcpu_e500 144 arch/powerpc/kvm/e500.c get_cur_as(&vcpu_e500->vcpu), 0, vcpu_e500 145 arch/powerpc/kvm/e500.c get_cur_pr(&vcpu_e500->vcpu), 1); vcpu_e500 150 arch/powerpc/kvm/e500.c static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 152 arch/powerpc/kvm/e500.c memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); vcpu_e500 155 arch/powerpc/kvm/e500.c kvmppc_e500_recalc_shadow_pid(vcpu_e500); vcpu_e500 160 arch/powerpc/kvm/e500.c struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 163 arch/powerpc/kvm/e500.c struct vcpu_id_table *idt = vcpu_e500->idt; vcpu_e500 173 arch/powerpc/kvm/e500.c kvmppc_e500_recalc_shadow_pid(vcpu_e500); vcpu_e500 185 arch/powerpc/kvm/e500.c unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 189 arch/powerpc/kvm/e500.c struct vcpu_id_table *idt = vcpu_e500->idt; vcpu_e500 208 arch/powerpc/kvm/e500.c kvmppc_e500_recalc_shadow_pid(vcpu_e500); vcpu_e500 223 arch/powerpc/kvm/e500.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 226 arch/powerpc/kvm/e500.c vcpu_e500->pid[0] = vcpu->arch.pid = pid; vcpu_e500 227 arch/powerpc/kvm/e500.c kvmppc_e500_recalc_shadow_pid(vcpu_e500); vcpu_e500 232 arch/powerpc/kvm/e500.c void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 235 arch/powerpc/kvm/e500.c struct vcpu_id_table *idt = vcpu_e500->idt; vcpu_e500 259 arch/powerpc/kvm/e500.c kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); vcpu_e500 288 arch/powerpc/kvm/e500.c void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 290 arch/powerpc/kvm/e500.c kvmppc_e500_id_table_reset_all(vcpu_e500); vcpu_e500 329 arch/powerpc/kvm/e500.c static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 334 arch/powerpc/kvm/e500.c tlbe = get_entry(vcpu_e500, 1, 0); vcpu_e500 340 arch/powerpc/kvm/e500.c tlbe = get_entry(vcpu_e500, 1, 1); vcpu_e500 348 arch/powerpc/kvm/e500.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 350 arch/powerpc/kvm/e500.c kvmppc_e500_tlb_setup(vcpu_e500); vcpu_e500 354 arch/powerpc/kvm/e500.c vcpu_e500->svr = mfspr(SPRN_SVR); vcpu_e500 364 arch/powerpc/kvm/e500.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 371 arch/powerpc/kvm/e500.c sregs->u.e.impl.fsl.svr = vcpu_e500->svr; vcpu_e500 372 arch/powerpc/kvm/e500.c sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; vcpu_e500 373 arch/powerpc/kvm/e500.c sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; vcpu_e500 389 arch/powerpc/kvm/e500.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 393 arch/powerpc/kvm/e500.c vcpu_e500->svr = sregs->u.e.impl.fsl.svr; vcpu_e500 394 arch/powerpc/kvm/e500.c vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; vcpu_e500 395 arch/powerpc/kvm/e500.c vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; vcpu_e500 439 arch/powerpc/kvm/e500.c struct kvmppc_vcpu_e500 *vcpu_e500; vcpu_e500 446 arch/powerpc/kvm/e500.c vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); vcpu_e500 447 arch/powerpc/kvm/e500.c if (!vcpu_e500) { vcpu_e500 452 arch/powerpc/kvm/e500.c vcpu = &vcpu_e500->vcpu; vcpu_e500 457 arch/powerpc/kvm/e500.c if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) { vcpu_e500 462 arch/powerpc/kvm/e500.c err = kvmppc_e500_tlb_init(vcpu_e500); vcpu_e500 475 arch/powerpc/kvm/e500.c kvmppc_e500_tlb_uninit(vcpu_e500); vcpu_e500 477 arch/powerpc/kvm/e500.c kvmppc_e500_id_table_free(vcpu_e500); vcpu_e500 481 arch/powerpc/kvm/e500.c kmem_cache_free(kvm_vcpu_cache, vcpu_e500); vcpu_e500 488 arch/powerpc/kvm/e500.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 491 arch/powerpc/kvm/e500.c kvmppc_e500_tlb_uninit(vcpu_e500); vcpu_e500 492 arch/powerpc/kvm/e500.c kvmppc_e500_id_table_free(vcpu_e500); vcpu_e500 494 arch/powerpc/kvm/e500.c kmem_cache_free(kvm_vcpu_cache, vcpu_e500); vcpu_e500 125 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 132 arch/powerpc/kvm/e500.h int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500); vcpu_e500 133 arch/powerpc/kvm/e500.h void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); vcpu_e500 144 arch/powerpc/kvm/e500.h unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 276 arch/powerpc/kvm/e500.h struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) vcpu_e500 278 arch/powerpc/kvm/e500.h int offset = vcpu_e500->gtlb_offset[tlbsel]; vcpu_e500 279 arch/powerpc/kvm/e500.h return &vcpu_e500->gtlb_arch[offset + entry]; vcpu_e500 282 arch/powerpc/kvm/e500.h void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 284 arch/powerpc/kvm/e500.h void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500); vcpu_e500 315 arch/powerpc/kvm/e500.h struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 318 arch/powerpc/kvm/e500.h return vcpu_e500->pid[tidseld]; vcpu_e500 108 arch/powerpc/kvm/e500_emulate.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 111 arch/powerpc/kvm/e500_emulate.c vcpu_e500->l1csr0 |= L1CSR0_CUL; vcpu_e500 207 arch/powerpc/kvm/e500_emulate.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 218 arch/powerpc/kvm/e500_emulate.c vcpu_e500->pid[1] = spr_val; vcpu_e500 223 arch/powerpc/kvm/e500_emulate.c vcpu_e500->pid[2] = spr_val; vcpu_e500 250 arch/powerpc/kvm/e500_emulate.c vcpu_e500->l1csr0 = spr_val; vcpu_e500 251 arch/powerpc/kvm/e500_emulate.c vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); vcpu_e500 254 arch/powerpc/kvm/e500_emulate.c vcpu_e500->l1csr1 = spr_val; vcpu_e500 255 arch/powerpc/kvm/e500_emulate.c vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR); vcpu_e500 258 arch/powerpc/kvm/e500_emulate.c vcpu_e500->hid0 = spr_val; vcpu_e500 261 arch/powerpc/kvm/e500_emulate.c vcpu_e500->hid1 = spr_val; vcpu_e500 265 arch/powerpc/kvm/e500_emulate.c emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, vcpu_e500 324 arch/powerpc/kvm/e500_emulate.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 330 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu_e500->pid[0]; vcpu_e500 333 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu_e500->pid[1]; vcpu_e500 336 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu_e500->pid[2]; vcpu_e500 380 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu_e500->l1csr0; vcpu_e500 383 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu_e500->l1csr1; vcpu_e500 386 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu_e500->hid0; vcpu_e500 389 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu_e500->hid1; vcpu_e500 392 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu_e500->svr; vcpu_e500 37 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 41 arch/powerpc/kvm/e500_mmu.c victim = vcpu_e500->gtlb_nv[0]++; vcpu_e500 42 arch/powerpc/kvm/e500_mmu.c if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways)) vcpu_e500 43 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_nv[0] = 0; vcpu_e500 58 arch/powerpc/kvm/e500_mmu.c static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) vcpu_e500 60 arch/powerpc/kvm/e500_mmu.c return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets, vcpu_e500 61 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].ways); vcpu_e500 66 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 70 arch/powerpc/kvm/e500_mmu.c esel &= vcpu_e500->gtlb_params[0].ways - 1; vcpu_e500 71 arch/powerpc/kvm/e500_mmu.c esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); vcpu_e500 73 arch/powerpc/kvm/e500_mmu.c esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; vcpu_e500 80 arch/powerpc/kvm/e500_mmu.c static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 83 arch/powerpc/kvm/e500_mmu.c int size = vcpu_e500->gtlb_params[tlbsel].entries; vcpu_e500 88 arch/powerpc/kvm/e500_mmu.c set_base = gtlb0_set_base(vcpu_e500, eaddr); vcpu_e500 89 arch/powerpc/kvm/e500_mmu.c size = vcpu_e500->gtlb_params[0].ways; vcpu_e500 91 arch/powerpc/kvm/e500_mmu.c if (eaddr < vcpu_e500->tlb1_min_eaddr || vcpu_e500 92 arch/powerpc/kvm/e500_mmu.c eaddr > vcpu_e500->tlb1_max_eaddr) vcpu_e500 97 arch/powerpc/kvm/e500_mmu.c offset = vcpu_e500->gtlb_offset[tlbsel]; vcpu_e500 101 arch/powerpc/kvm/e500_mmu.c &vcpu_e500->gtlb_arch[offset + set_base + i]; vcpu_e500 129 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 135 arch/powerpc/kvm/e500_mmu.c victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; vcpu_e500 139 arch/powerpc/kvm/e500_mmu.c | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu_e500 151 arch/powerpc/kvm/e500_mmu.c static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 153 arch/powerpc/kvm/e500_mmu.c int size = vcpu_e500->gtlb_params[1].entries; vcpu_e500 158 arch/powerpc/kvm/e500_mmu.c vcpu_e500->tlb1_min_eaddr = ~0UL; vcpu_e500 159 arch/powerpc/kvm/e500_mmu.c vcpu_e500->tlb1_max_eaddr = 0; vcpu_e500 160 arch/powerpc/kvm/e500_mmu.c offset = vcpu_e500->gtlb_offset[1]; vcpu_e500 164 arch/powerpc/kvm/e500_mmu.c &vcpu_e500->gtlb_arch[offset + i]; vcpu_e500 170 arch/powerpc/kvm/e500_mmu.c vcpu_e500->tlb1_min_eaddr = vcpu_e500 171 arch/powerpc/kvm/e500_mmu.c min(vcpu_e500->tlb1_min_eaddr, eaddr); vcpu_e500 174 arch/powerpc/kvm/e500_mmu.c vcpu_e500->tlb1_max_eaddr = vcpu_e500 175 arch/powerpc/kvm/e500_mmu.c max(vcpu_e500->tlb1_max_eaddr, eaddr); vcpu_e500 179 arch/powerpc/kvm/e500_mmu.c static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 188 arch/powerpc/kvm/e500_mmu.c return vcpu_e500->tlb1_min_eaddr == start || vcpu_e500 189 arch/powerpc/kvm/e500_mmu.c vcpu_e500->tlb1_max_eaddr == end; vcpu_e500 197 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 206 arch/powerpc/kvm/e500_mmu.c vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); vcpu_e500 207 arch/powerpc/kvm/e500_mmu.c vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); vcpu_e500 211 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 215 arch/powerpc/kvm/e500_mmu.c get_entry(vcpu_e500, tlbsel, esel); vcpu_e500 220 arch/powerpc/kvm/e500_mmu.c if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) vcpu_e500 221 arch/powerpc/kvm/e500_mmu.c kvmppc_recalc_tlb1map_range(vcpu_e500); vcpu_e500 228 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) vcpu_e500 233 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) vcpu_e500 234 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); vcpu_e500 236 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) vcpu_e500 237 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); vcpu_e500 240 arch/powerpc/kvm/e500_mmu.c kvmppc_core_flush_tlb(&vcpu_e500->vcpu); vcpu_e500 247 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 258 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; vcpu_e500 260 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); vcpu_e500 263 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, vcpu_e500 266 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); vcpu_e500 270 arch/powerpc/kvm/e500_mmu.c kvmppc_core_flush_tlb(&vcpu_e500->vcpu); vcpu_e500 275 arch/powerpc/kvm/e500_mmu.c static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, vcpu_e500 282 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { vcpu_e500 283 arch/powerpc/kvm/e500_mmu.c tlbe = get_entry(vcpu_e500, tlbsel, esel); vcpu_e500 286 arch/powerpc/kvm/e500_mmu.c inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); vcpu_e500 287 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); vcpu_e500 292 arch/powerpc/kvm/e500_mmu.c static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, vcpu_e500 298 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); vcpu_e500 300 arch/powerpc/kvm/e500_mmu.c inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); vcpu_e500 301 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); vcpu_e500 309 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 313 arch/powerpc/kvm/e500_mmu.c tlbilx_all(vcpu_e500, 0, pid, type); vcpu_e500 314 arch/powerpc/kvm/e500_mmu.c tlbilx_all(vcpu_e500, 1, pid, type); vcpu_e500 316 arch/powerpc/kvm/e500_mmu.c tlbilx_one(vcpu_e500, pid, ea); vcpu_e500 324 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 331 arch/powerpc/kvm/e500_mmu.c gtlbe = get_entry(vcpu_e500, tlbsel, esel); vcpu_e500 333 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu_e500 343 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 350 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); vcpu_e500 352 arch/powerpc/kvm/e500_mmu.c gtlbe = get_entry(vcpu_e500, tlbsel, esel); vcpu_e500 358 arch/powerpc/kvm/e500_mmu.c esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; vcpu_e500 361 arch/powerpc/kvm/e500_mmu.c | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu_e500 370 arch/powerpc/kvm/e500_mmu.c victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; vcpu_e500 374 arch/powerpc/kvm/e500_mmu.c | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu_e500 392 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 401 arch/powerpc/kvm/e500_mmu.c gtlbe = get_entry(vcpu_e500, tlbsel, esel); vcpu_e500 404 arch/powerpc/kvm/e500_mmu.c inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); vcpu_e500 406 arch/powerpc/kvm/e500_mmu.c kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) vcpu_e500 426 arch/powerpc/kvm/e500_mmu.c kvmppc_recalc_tlb1map_range(vcpu_e500); vcpu_e500 456 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 460 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); vcpu_e500 526 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 530 arch/powerpc/kvm/e500_mmu.c gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index)); vcpu_e500 542 arch/powerpc/kvm/e500_mmu.c static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 546 arch/powerpc/kvm/e500_mmu.c kvmppc_core_flush_tlb(&vcpu_e500->vcpu); vcpu_e500 547 arch/powerpc/kvm/e500_mmu.c kfree(vcpu_e500->g2h_tlb1_map); vcpu_e500 548 arch/powerpc/kvm/e500_mmu.c kfree(vcpu_e500->gtlb_priv[0]); vcpu_e500 549 arch/powerpc/kvm/e500_mmu.c kfree(vcpu_e500->gtlb_priv[1]); vcpu_e500 551 arch/powerpc/kvm/e500_mmu.c if (vcpu_e500->shared_tlb_pages) { vcpu_e500 552 arch/powerpc/kvm/e500_mmu.c vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch, vcpu_e500 555 arch/powerpc/kvm/e500_mmu.c for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) { vcpu_e500 556 arch/powerpc/kvm/e500_mmu.c set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]); vcpu_e500 557 arch/powerpc/kvm/e500_mmu.c put_page(vcpu_e500->shared_tlb_pages[i]); vcpu_e500 560 arch/powerpc/kvm/e500_mmu.c vcpu_e500->num_shared_tlb_pages = 0; vcpu_e500 562 arch/powerpc/kvm/e500_mmu.c kfree(vcpu_e500->shared_tlb_pages); vcpu_e500 563 arch/powerpc/kvm/e500_mmu.c vcpu_e500->shared_tlb_pages = NULL; vcpu_e500 565 arch/powerpc/kvm/e500_mmu.c kfree(vcpu_e500->gtlb_arch); vcpu_e500 568 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_arch = NULL; vcpu_e500 738 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 819 arch/powerpc/kvm/e500_mmu.c free_gtlb(vcpu_e500); vcpu_e500 821 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_priv[0] = privs[0]; vcpu_e500 822 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_priv[1] = privs[1]; vcpu_e500 823 arch/powerpc/kvm/e500_mmu.c vcpu_e500->g2h_tlb1_map = g2h_bitmap; vcpu_e500 825 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) vcpu_e500 828 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; vcpu_e500 829 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; vcpu_e500 831 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_offset[0] = 0; vcpu_e500 832 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; vcpu_e500 837 arch/powerpc/kvm/e500_mmu.c vcpu_e500->shared_tlb_pages = pages; vcpu_e500 838 arch/powerpc/kvm/e500_mmu.c vcpu_e500->num_shared_tlb_pages = num_pages; vcpu_e500 840 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0]; vcpu_e500 841 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].sets = sets; vcpu_e500 843 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; vcpu_e500 844 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[1].sets = 1; vcpu_e500 846 arch/powerpc/kvm/e500_mmu.c kvmppc_recalc_tlb1map_range(vcpu_e500); vcpu_e500 863 arch/powerpc/kvm/e500_mmu.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 864 arch/powerpc/kvm/e500_mmu.c kvmppc_recalc_tlb1map_range(vcpu_e500); vcpu_e500 902 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 904 arch/powerpc/kvm/e500_mmu.c struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; vcpu_e500 906 arch/powerpc/kvm/e500_mmu.c if (e500_mmu_host_init(vcpu_e500)) vcpu_e500 909 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; vcpu_e500 910 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; vcpu_e500 912 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM; vcpu_e500 913 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].sets = vcpu_e500 916 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE; vcpu_e500 917 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[1].sets = 1; vcpu_e500 919 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_arch = kmalloc_array(KVM_E500_TLB0_SIZE + vcpu_e500 921 arch/powerpc/kvm/e500_mmu.c sizeof(*vcpu_e500->gtlb_arch), vcpu_e500 923 arch/powerpc/kvm/e500_mmu.c if (!vcpu_e500->gtlb_arch) vcpu_e500 926 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_offset[0] = 0; vcpu_e500 927 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; vcpu_e500 929 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries, vcpu_e500 932 arch/powerpc/kvm/e500_mmu.c if (!vcpu_e500->gtlb_priv[0]) vcpu_e500 935 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries, vcpu_e500 938 arch/powerpc/kvm/e500_mmu.c if (!vcpu_e500->gtlb_priv[1]) vcpu_e500 941 arch/powerpc/kvm/e500_mmu.c vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries, vcpu_e500 942 arch/powerpc/kvm/e500_mmu.c sizeof(*vcpu_e500->g2h_tlb1_map), vcpu_e500 944 arch/powerpc/kvm/e500_mmu.c if (!vcpu_e500->g2h_tlb1_map) vcpu_e500 947 arch/powerpc/kvm/e500_mmu.c vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params); vcpu_e500 949 arch/powerpc/kvm/e500_mmu.c kvmppc_recalc_tlb1map_range(vcpu_e500); vcpu_e500 952 arch/powerpc/kvm/e500_mmu.c free_gtlb(vcpu_e500); vcpu_e500 956 arch/powerpc/kvm/e500_mmu.c void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 958 arch/powerpc/kvm/e500_mmu.c free_gtlb(vcpu_e500); vcpu_e500 959 arch/powerpc/kvm/e500_mmu.c e500_mmu_host_uninit(vcpu_e500); vcpu_e500 124 arch/powerpc/kvm/e500_mmu_host.c static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 131 arch/powerpc/kvm/e500_mmu_host.c __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); vcpu_e500 136 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->vcpu.kvm->arch.lpid); vcpu_e500 141 arch/powerpc/kvm/e500_mmu_host.c static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 149 arch/powerpc/kvm/e500_mmu_host.c stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); vcpu_e500 152 arch/powerpc/kvm/e500_mmu_host.c write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); vcpu_e500 160 arch/powerpc/kvm/e500_mmu_host.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 170 arch/powerpc/kvm/e500_mmu_host.c stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); vcpu_e500 184 arch/powerpc/kvm/e500_mmu_host.c void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, vcpu_e500 188 arch/powerpc/kvm/e500_mmu_host.c get_entry(vcpu_e500, tlbsel, esel); vcpu_e500 189 arch/powerpc/kvm/e500_mmu_host.c struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; vcpu_e500 195 arch/powerpc/kvm/e500_mmu_host.c WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); vcpu_e500 199 arch/powerpc/kvm/e500_mmu_host.c u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; vcpu_e500 211 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; vcpu_e500 215 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->g2h_tlb1_map[esel] = 0; vcpu_e500 225 arch/powerpc/kvm/e500_mmu_host.c kvmppc_e500_tlbil_all(vcpu_e500); vcpu_e500 234 arch/powerpc/kvm/e500_mmu_host.c kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); vcpu_e500 271 arch/powerpc/kvm/e500_mmu_host.c static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 273 arch/powerpc/kvm/e500_mmu_host.c if (vcpu_e500->g2h_tlb1_map) vcpu_e500 274 arch/powerpc/kvm/e500_mmu_host.c memset(vcpu_e500->g2h_tlb1_map, 0, vcpu_e500 275 arch/powerpc/kvm/e500_mmu_host.c sizeof(u64) * vcpu_e500->gtlb_params[1].entries); vcpu_e500 276 arch/powerpc/kvm/e500_mmu_host.c if (vcpu_e500->h2g_tlb1_rmap) vcpu_e500 277 arch/powerpc/kvm/e500_mmu_host.c memset(vcpu_e500->h2g_tlb1_rmap, 0, vcpu_e500 281 arch/powerpc/kvm/e500_mmu_host.c static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 287 arch/powerpc/kvm/e500_mmu_host.c for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { vcpu_e500 289 arch/powerpc/kvm/e500_mmu_host.c &vcpu_e500->gtlb_priv[tlbsel][i].ref; vcpu_e500 297 arch/powerpc/kvm/e500_mmu_host.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 298 arch/powerpc/kvm/e500_mmu_host.c kvmppc_e500_tlbil_all(vcpu_e500); vcpu_e500 299 arch/powerpc/kvm/e500_mmu_host.c clear_tlb_privs(vcpu_e500); vcpu_e500 300 arch/powerpc/kvm/e500_mmu_host.c clear_tlb1_bitmap(vcpu_e500); vcpu_e500 322 arch/powerpc/kvm/e500_mmu_host.c static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 334 arch/powerpc/kvm/e500_mmu_host.c struct kvm *kvm = vcpu_e500->vcpu.kvm; vcpu_e500 353 arch/powerpc/kvm/e500_mmu_host.c slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); vcpu_e500 469 arch/powerpc/kvm/e500_mmu_host.c pgdir = vcpu_e500->vcpu.arch.pgdir; vcpu_e500 495 arch/powerpc/kvm/e500_mmu_host.c kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, vcpu_e500 511 arch/powerpc/kvm/e500_mmu_host.c static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, vcpu_e500 520 arch/powerpc/kvm/e500_mmu_host.c gtlbe = get_entry(vcpu_e500, 0, esel); vcpu_e500 521 arch/powerpc/kvm/e500_mmu_host.c ref = &vcpu_e500->gtlb_priv[0][esel].ref; vcpu_e500 523 arch/powerpc/kvm/e500_mmu_host.c r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), vcpu_e500 529 arch/powerpc/kvm/e500_mmu_host.c write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); vcpu_e500 534 arch/powerpc/kvm/e500_mmu_host.c static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 538 arch/powerpc/kvm/e500_mmu_host.c unsigned int sesel = vcpu_e500->host_tlb1_nv++; vcpu_e500 540 arch/powerpc/kvm/e500_mmu_host.c if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500 541 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->host_tlb1_nv = 0; vcpu_e500 543 arch/powerpc/kvm/e500_mmu_host.c if (vcpu_e500->h2g_tlb1_rmap[sesel]) { vcpu_e500 544 arch/powerpc/kvm/e500_mmu_host.c unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500 545 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); vcpu_e500 548 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; vcpu_e500 549 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500 550 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; vcpu_e500 559 arch/powerpc/kvm/e500_mmu_host.c static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 563 arch/powerpc/kvm/e500_mmu_host.c struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; vcpu_e500 567 arch/powerpc/kvm/e500_mmu_host.c r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, vcpu_e500 574 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; vcpu_e500 575 arch/powerpc/kvm/e500_mmu_host.c write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); vcpu_e500 580 arch/powerpc/kvm/e500_mmu_host.c sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); vcpu_e500 581 arch/powerpc/kvm/e500_mmu_host.c write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); vcpu_e500 589 arch/powerpc/kvm/e500_mmu_host.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 595 arch/powerpc/kvm/e500_mmu_host.c gtlbe = get_entry(vcpu_e500, tlbsel, esel); vcpu_e500 599 arch/powerpc/kvm/e500_mmu_host.c priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; vcpu_e500 603 arch/powerpc/kvm/e500_mmu_host.c kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); vcpu_e500 607 arch/powerpc/kvm/e500_mmu_host.c write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); vcpu_e500 613 arch/powerpc/kvm/e500_mmu_host.c kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, vcpu_e500 766 arch/powerpc/kvm/e500_mmu_host.c int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 799 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries, vcpu_e500 800 arch/powerpc/kvm/e500_mmu_host.c sizeof(*vcpu_e500->h2g_tlb1_rmap), vcpu_e500 802 arch/powerpc/kvm/e500_mmu_host.c if (!vcpu_e500->h2g_tlb1_rmap) vcpu_e500 808 arch/powerpc/kvm/e500_mmu_host.c void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 810 arch/powerpc/kvm/e500_mmu_host.c kfree(vcpu_e500->h2g_tlb1_rmap); vcpu_e500 9 arch/powerpc/kvm/e500_mmu_host.h void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, vcpu_e500 12 arch/powerpc/kvm/e500_mmu_host.h int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500); vcpu_e500 13 arch/powerpc/kvm/e500_mmu_host.h void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); vcpu_e500 55 arch/powerpc/kvm/e500mc.c void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500 73 arch/powerpc/kvm/e500mc.c mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); vcpu_e500 89 arch/powerpc/kvm/e500mc.c void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500 94 arch/powerpc/kvm/e500mc.c mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); vcpu_e500 114 arch/powerpc/kvm/e500mc.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 144 arch/powerpc/kvm/e500mc.c kvmppc_e500_tlbil_all(vcpu_e500); vcpu_e500 197 arch/powerpc/kvm/e500mc.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 207 arch/powerpc/kvm/e500mc.c vcpu_e500->svr = mfspr(SPRN_SVR); vcpu_e500 217 arch/powerpc/kvm/e500mc.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 224 arch/powerpc/kvm/e500mc.c sregs->u.e.impl.fsl.svr = vcpu_e500->svr; vcpu_e500 225 arch/powerpc/kvm/e500mc.c sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; vcpu_e500 226 arch/powerpc/kvm/e500mc.c sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; vcpu_e500 241 arch/powerpc/kvm/e500mc.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 245 arch/powerpc/kvm/e500mc.c vcpu_e500->svr = sregs->u.e.impl.fsl.svr; vcpu_e500 246 arch/powerpc/kvm/e500mc.c vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; vcpu_e500 247 arch/powerpc/kvm/e500mc.c vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; vcpu_e500 307 arch/powerpc/kvm/e500mc.c struct kvmppc_vcpu_e500 *vcpu_e500; vcpu_e500 311 arch/powerpc/kvm/e500mc.c vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); vcpu_e500 312 arch/powerpc/kvm/e500mc.c if (!vcpu_e500) { vcpu_e500 316 arch/powerpc/kvm/e500mc.c vcpu = &vcpu_e500->vcpu; vcpu_e500 325 arch/powerpc/kvm/e500mc.c err = kvmppc_e500_tlb_init(vcpu_e500); vcpu_e500 338 arch/powerpc/kvm/e500mc.c kvmppc_e500_tlb_uninit(vcpu_e500); vcpu_e500 343 arch/powerpc/kvm/e500mc.c kmem_cache_free(kvm_vcpu_cache, vcpu_e500); vcpu_e500 350 arch/powerpc/kvm/e500mc.c struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500 353 arch/powerpc/kvm/e500mc.c kvmppc_e500_tlb_uninit(vcpu_e500); vcpu_e500 355 arch/powerpc/kvm/e500mc.c kmem_cache_free(kvm_vcpu_cache, vcpu_e500);