tlbsel 113 arch/powerpc/kvm/e500.h #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF)) tlbsel 276 arch/powerpc/kvm/e500.h struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) tlbsel 278 arch/powerpc/kvm/e500.h int offset = vcpu_e500->gtlb_offset[tlbsel]; tlbsel 64 arch/powerpc/kvm/e500_mmu.c static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel) tlbsel 69 arch/powerpc/kvm/e500_mmu.c if (tlbsel == 0) { tlbsel 73 arch/powerpc/kvm/e500_mmu.c esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; tlbsel 81 arch/powerpc/kvm/e500_mmu.c gva_t eaddr, int tlbsel, unsigned int pid, int as) tlbsel 83 arch/powerpc/kvm/e500_mmu.c int size = vcpu_e500->gtlb_params[tlbsel].entries; tlbsel 87 arch/powerpc/kvm/e500_mmu.c if (tlbsel == 0) { tlbsel 97 arch/powerpc/kvm/e500_mmu.c offset = vcpu_e500->gtlb_offset[tlbsel]; tlbsel 131 arch/powerpc/kvm/e500_mmu.c int tlbsel; tlbsel 134 arch/powerpc/kvm/e500_mmu.c tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; tlbsel 135 arch/powerpc/kvm/e500_mmu.c victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; tlbsel 138 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) tlbsel 139 arch/powerpc/kvm/e500_mmu.c | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); tlbsel 212 arch/powerpc/kvm/e500_mmu.c int tlbsel, int esel) tlbsel 215 arch/powerpc/kvm/e500_mmu.c get_entry(vcpu_e500, tlbsel, esel); tlbsel 220 arch/powerpc/kvm/e500_mmu.c if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) tlbsel 249 arch/powerpc/kvm/e500_mmu.c int esel, tlbsel; tlbsel 254 arch/powerpc/kvm/e500_mmu.c tlbsel = (ea >> 3) & 0x1; tlbsel 258 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; tlbsel 260 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); tlbsel 263 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, tlbsel 266 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); tlbsel 275 arch/powerpc/kvm/e500_mmu.c static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, tlbsel 282 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { tlbsel 283 arch/powerpc/kvm/e500_mmu.c tlbe = get_entry(vcpu_e500, tlbsel, esel); tlbsel 286 arch/powerpc/kvm/e500_mmu.c inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); tlbsel 287 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); tlbsel 295 arch/powerpc/kvm/e500_mmu.c int tlbsel, esel; tlbsel 297 arch/powerpc/kvm/e500_mmu.c for (tlbsel = 0; tlbsel < 2; tlbsel++) { tlbsel 298 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); tlbsel 300 arch/powerpc/kvm/e500_mmu.c inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); tlbsel 301 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); tlbsel 325 arch/powerpc/kvm/e500_mmu.c int tlbsel, esel; tlbsel 328 arch/powerpc/kvm/e500_mmu.c tlbsel = get_tlb_tlbsel(vcpu); tlbsel 329 arch/powerpc/kvm/e500_mmu.c esel = get_tlb_esel(vcpu, tlbsel); tlbsel 331 arch/powerpc/kvm/e500_mmu.c gtlbe = get_entry(vcpu_e500, tlbsel, esel); tlbsel 333 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); tlbsel 346 arch/powerpc/kvm/e500_mmu.c int esel, tlbsel; tlbsel 349 arch/powerpc/kvm/e500_mmu.c for (tlbsel = 0; tlbsel < 2; tlbsel++) { tlbsel 350 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); tlbsel 352 arch/powerpc/kvm/e500_mmu.c gtlbe = get_entry(vcpu_e500, tlbsel, esel); tlbsel 358 arch/powerpc/kvm/e500_mmu.c esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; tlbsel 360 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) tlbsel 361 arch/powerpc/kvm/e500_mmu.c | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); tlbsel 369 arch/powerpc/kvm/e500_mmu.c tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1; tlbsel 370 arch/powerpc/kvm/e500_mmu.c victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; tlbsel 372 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) tlbsel 374 arch/powerpc/kvm/e500_mmu.c | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); tlbsel 394 arch/powerpc/kvm/e500_mmu.c int tlbsel, esel; tlbsel 398 arch/powerpc/kvm/e500_mmu.c tlbsel = get_tlb_tlbsel(vcpu); tlbsel 399 arch/powerpc/kvm/e500_mmu.c esel = get_tlb_esel(vcpu, tlbsel); tlbsel 401 arch/powerpc/kvm/e500_mmu.c gtlbe = get_entry(vcpu_e500, tlbsel, esel); tlbsel 404 arch/powerpc/kvm/e500_mmu.c inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); tlbsel 405 arch/powerpc/kvm/e500_mmu.c if ((tlbsel == 1) && tlbsel 419 arch/powerpc/kvm/e500_mmu.c if (tlbsel == 1) { tlbsel 438 arch/powerpc/kvm/e500_mmu.c if (tlbsel == 0) { tlbsel 444 arch/powerpc/kvm/e500_mmu.c kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); tlbsel 457 arch/powerpc/kvm/e500_mmu.c int esel, tlbsel; tlbsel 459 arch/powerpc/kvm/e500_mmu.c for (tlbsel = 0; tlbsel < 2; tlbsel++) { tlbsel 460 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); tlbsel 462 arch/powerpc/kvm/e500_mmu.c return index_of(tlbsel, esel); tlbsel 125 arch/powerpc/kvm/e500_mmu_host.c int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) tlbsel 129 arch/powerpc/kvm/e500_mmu_host.c if (tlbsel == 0) { tlbsel 184 arch/powerpc/kvm/e500_mmu_host.c void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, tlbsel 188 arch/powerpc/kvm/e500_mmu_host.c get_entry(vcpu_e500, tlbsel, esel); tlbsel 189 arch/powerpc/kvm/e500_mmu_host.c struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; tlbsel 195 arch/powerpc/kvm/e500_mmu_host.c WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); tlbsel 198 arch/powerpc/kvm/e500_mmu_host.c if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { tlbsel 220 arch/powerpc/kvm/e500_mmu_host.c if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { tlbsel 283 arch/powerpc/kvm/e500_mmu_host.c int tlbsel; tlbsel 286 arch/powerpc/kvm/e500_mmu_host.c for (tlbsel = 0; tlbsel <= 1; tlbsel++) { tlbsel 287 arch/powerpc/kvm/e500_mmu_host.c for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { tlbsel 289 arch/powerpc/kvm/e500_mmu_host.c &vcpu_e500->gtlb_priv[tlbsel][i].ref; tlbsel 324 arch/powerpc/kvm/e500_mmu_host.c int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, tlbsel 356 arch/powerpc/kvm/e500_mmu_host.c if (tlbsel == 1) { tlbsel 592 arch/powerpc/kvm/e500_mmu_host.c int tlbsel = tlbsel_of(index); tlbsel 595 arch/powerpc/kvm/e500_mmu_host.c gtlbe = get_entry(vcpu_e500, tlbsel, esel); tlbsel 597 arch/powerpc/kvm/e500_mmu_host.c switch (tlbsel) { tlbsel 599 arch/powerpc/kvm/e500_mmu_host.c priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; tlbsel 9 arch/powerpc/kvm/e500_mmu_host.h void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,