Lines Matching refs:arch

64 	kvm->arch.hpt_cma_alloc = 0;  in kvmppc_alloc_hpt()
69 kvm->arch.hpt_cma_alloc = 1; in kvmppc_alloc_hpt()
83 kvm->arch.hpt_virt = hpt; in kvmppc_alloc_hpt()
84 kvm->arch.hpt_order = order; in kvmppc_alloc_hpt()
86 kvm->arch.hpt_npte = 1ul << (order - 4); in kvmppc_alloc_hpt()
88 kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; in kvmppc_alloc_hpt()
91 rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); in kvmppc_alloc_hpt()
96 kvm->arch.revmap = rev; in kvmppc_alloc_hpt()
97 kvm->arch.sdr1 = __pa(hpt) | (order - 18); in kvmppc_alloc_hpt()
100 hpt, order, kvm->arch.lpid); in kvmppc_alloc_hpt()
107 if (kvm->arch.hpt_cma_alloc) in kvmppc_alloc_hpt()
120 if (kvm->arch.hpte_setup_done) { in kvmppc_alloc_reset_hpt()
121 kvm->arch.hpte_setup_done = 0; in kvmppc_alloc_reset_hpt()
124 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmppc_alloc_reset_hpt()
125 kvm->arch.hpte_setup_done = 1; in kvmppc_alloc_reset_hpt()
129 if (kvm->arch.hpt_virt) { in kvmppc_alloc_reset_hpt()
130 order = kvm->arch.hpt_order; in kvmppc_alloc_reset_hpt()
132 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); in kvmppc_alloc_reset_hpt()
138 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_alloc_reset_hpt()
152 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_free_hpt()
153 vfree(kvm->arch.revmap); in kvmppc_free_hpt()
154 if (kvm->arch.hpt_cma_alloc) in kvmppc_free_hpt()
155 kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt), in kvmppc_free_hpt()
156 1 << (kvm->arch.hpt_order - PAGE_SHIFT)); in kvmppc_free_hpt()
158 free_pages(kvm->arch.hpt_virt, in kvmppc_free_hpt()
159 kvm->arch.hpt_order - PAGE_SHIFT); in kvmppc_free_hpt()
194 if (npages > kvm->arch.hpt_mask + 1) in kvmppc_map_vrma()
195 npages = kvm->arch.hpt_mask + 1; in kvmppc_map_vrma()
205 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; in kvmppc_map_vrma()
247 unsigned long msr = vcpu->arch.intr_msr; in kvmppc_mmu_book3s_64_hv_reset_msr()
250 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) in kvmppc_mmu_book3s_64_hv_reset_msr()
253 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; in kvmppc_mmu_book3s_64_hv_reset_msr()
283 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_hv_find_slbe()
284 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) in kvmppc_mmu_book3s_hv_find_slbe()
287 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) in kvmppc_mmu_book3s_hv_find_slbe()
292 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) in kvmppc_mmu_book3s_hv_find_slbe()
293 return &vcpu->arch.slb[i]; in kvmppc_mmu_book3s_hv_find_slbe()
317 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); in kvmppc_mmu_book3s_64_hv_xlate()
327 slb_v = vcpu->kvm->arch.vrma_slb_v; in kvmppc_mmu_book3s_64_hv_xlate()
338 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); in kvmppc_mmu_book3s_64_hv_xlate()
340 gr = kvm->arch.revmap[index].guest_rpte; in kvmppc_mmu_book3s_64_hv_xlate()
350 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; in kvmppc_mmu_book3s_64_hv_xlate()
360 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); in kvmppc_mmu_book3s_64_hv_xlate()
430 vcpu->arch.paddr_accessed = gpa; in kvmppc_hv_emulate_mmio()
431 vcpu->arch.vaddr_accessed = ea; in kvmppc_hv_emulate_mmio()
460 if (ea != vcpu->arch.pgfault_addr) in kvmppc_book3s_hv_page_fault()
462 index = vcpu->arch.pgfault_index; in kvmppc_book3s_hv_page_fault()
463 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); in kvmppc_book3s_hv_page_fault()
464 rev = &kvm->arch.revmap[index]; in kvmppc_book3s_hv_page_fault()
474 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || in kvmppc_book3s_hv_page_fault()
475 hpte[1] != vcpu->arch.pgfault_hpte[1]) in kvmppc_book3s_hv_page_fault()
593 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
659 memset(memslot->arch.rmap, 0, in kvmppc_rmap_reset()
660 memslot->npages * sizeof(*memslot->arch.rmap)); in kvmppc_rmap_reset()
697 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); in kvm_handle_hva_range()
715 struct revmap_entry *rev = kvm->arch.revmap; in kvm_unmap_rmapp()
733 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); in kvm_unmap_rmapp()
794 rmapp = memslot->arch.rmap; in kvmppc_core_flush_memslot_hv()
813 struct revmap_entry *rev = kvm->arch.revmap; in kvm_age_rmapp()
831 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); in kvm_age_rmapp()
871 struct revmap_entry *rev = kvm->arch.revmap; in kvm_test_age_rmapp()
886 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); in kvm_test_age_rmapp()
911 return atomic_read(&kvm->arch.vcpus_running) != 0; in vcpus_running()
920 struct revmap_entry *rev = kvm->arch.revmap; in kvm_test_clear_dirty_npages()
941 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); in kvm_test_clear_dirty_npages()
1029 rmapp = memslot->arch.rmap; in kvmppc_hv_get_dirty_log()
1046 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_hv_get_dirty_log()
1047 harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map); in kvmppc_hv_get_dirty_log()
1048 harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map); in kvmppc_hv_get_dirty_log()
1049 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_hv_get_dirty_log()
1105 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvmppc_unpin_guest_page()
1248 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); in kvm_htab_read()
1249 revp = kvm->arch.revmap + i; in kvm_htab_read()
1264 while (i < kvm->arch.hpt_npte && in kvm_htab_read()
1274 while (i < kvm->arch.hpt_npte && in kvm_htab_read()
1290 while (i < kvm->arch.hpt_npte && in kvm_htab_read()
1311 if (i >= kvm->arch.hpt_npte) { in kvm_htab_read()
1343 hpte_setup = kvm->arch.hpte_setup_done; in kvm_htab_write()
1345 kvm->arch.hpte_setup_done = 0; /* temporarily */ in kvm_htab_write()
1348 if (atomic_read(&kvm->arch.vcpus_running)) { in kvm_htab_write()
1349 kvm->arch.hpte_setup_done = 1; in kvm_htab_write()
1370 if (i >= kvm->arch.hpt_npte || in kvm_htab_write()
1371 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) in kvm_htab_write()
1374 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); in kvm_htab_write()
1407 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvm_htab_write()
1429 kvm->arch.hpte_setup_done = hpte_setup; in kvm_htab_write()
1443 atomic_dec(&ctx->kvm->arch.hpte_mod_interest); in kvm_htab_release()
1483 atomic_inc(&kvm->arch.hpte_mod_interest); in kvm_vm_ioctl_get_htab_fd()
1561 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); in debugfs_htab_read()
1562 for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) { in debugfs_htab_read()
1572 gr = kvm->arch.revmap[i].guest_rpte; in debugfs_htab_read()
1622 kvm->arch.htab_dentry = debugfs_create_file("htab", 0400, in kvmppc_mmu_debugfs_init()
1623 kvm->arch.debugfs_dir, kvm, in kvmppc_mmu_debugfs_init()
1629 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_hv_init()
1631 vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ in kvmppc_mmu_book3s_hv_init()
1636 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; in kvmppc_mmu_book3s_hv_init()