Lines Matching refs:arch

54 	if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)  in global_invalidates()
62 cpumask_setall(&kvm->arch.need_tlb_flush); in global_invalidates()
64 &kvm->arch.need_tlb_flush); in global_invalidates()
82 head = &kvm->arch.revmap[i]; in kvmppc_add_revmap_chain()
85 tail = &kvm->arch.revmap[head->back]; in kvmppc_add_revmap_chain()
128 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte()
150 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); in remove_revmap_chain()
151 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); in remove_revmap_chain()
215 rmap = &memslot->arch.rmap[slot_fn]; in kvmppc_do_h_enter()
284 if (pte_index >= kvm->arch.hpt_npte) in kvmppc_do_h_enter()
288 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); in kvmppc_do_h_enter()
319 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); in kvmppc_do_h_enter()
336 rev = &kvm->arch.revmap[pte_index]; in kvmppc_do_h_enter()
380 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); in kvmppc_h_enter()
413 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) in do_tlbies()
419 "r" (rbvalues[i]), "r" (kvm->arch.lpid)); in do_tlbies()
421 kvm->arch.tlbie_lock = 0; in do_tlbies()
440 if (pte_index >= kvm->arch.hpt_npte) in kvmppc_do_h_remove()
442 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); in kvmppc_do_h_remove()
453 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); in kvmppc_do_h_remove()
487 &vcpu->arch.gpr[4]); in kvmppc_h_remove()
493 unsigned long *args = &vcpu->arch.gpr[4]; in kvmppc_h_bulk_remove()
518 pte_index >= kvm->arch.hpt_npte) { in kvmppc_h_bulk_remove()
524 hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4)); in kvmppc_h_bulk_remove()
556 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); in kvmppc_h_bulk_remove()
610 if (pte_index >= kvm->arch.hpt_npte) in kvmppc_h_protect()
613 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); in kvmppc_h_protect()
631 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); in kvmppc_h_protect()
673 if (pte_index >= kvm->arch.hpt_npte) in kvmppc_h_read()
679 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); in kvmppc_h_read()
681 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); in kvmppc_h_read()
692 vcpu->arch.gpr[4 + i * 2] = v; in kvmppc_h_read()
693 vcpu->arch.gpr[5 + i * 2] = r; in kvmppc_h_read()
708 if (pte_index >= kvm->arch.hpt_npte) in kvmppc_h_clear_ref()
711 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); in kvmppc_h_clear_ref()
712 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); in kvmppc_h_clear_ref()
737 vcpu->arch.gpr[4] = gr; in kvmppc_h_clear_ref()
754 if (pte_index >= kvm->arch.hpt_npte) in kvmppc_h_clear_mod()
757 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); in kvmppc_h_clear_mod()
758 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); in kvmppc_h_clear_mod()
790 vcpu->arch.gpr[4] = gr; in kvmppc_h_clear_mod()
864 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; in kvmppc_hv_find_lock_hpte()
875 hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7)); in kvmppc_hv_find_lock_hpte()
905 hash = hash ^ kvm->arch.hpt_mask; in kvmppc_hv_find_lock_hpte()
944 hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); in kvmppc_hpte_hv_fault()
947 rev = real_vmalloc_addr(&kvm->arch.revmap[index]); in kvmppc_hpte_hv_fault()
958 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; in kvmppc_hpte_hv_fault()
975 if (data && (vcpu->arch.shregs.msr & MSR_DR)) { in kvmppc_hpte_hv_fault()
976 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); in kvmppc_hpte_hv_fault()
984 vcpu->arch.pgfault_addr = addr; in kvmppc_hpte_hv_fault()
985 vcpu->arch.pgfault_index = index; in kvmppc_hpte_hv_fault()
986 vcpu->arch.pgfault_hpte[0] = v; in kvmppc_hpte_hv_fault()
987 vcpu->arch.pgfault_hpte[1] = r; in kvmppc_hpte_hv_fault()
990 if (data && (vcpu->arch.shregs.msr & MSR_IR) && in kvmppc_hpte_hv_fault()