Lines Matching refs:kvm
46 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
49 static void kvmppc_rmap_reset(struct kvm *kvm);
51 long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) in kvmppc_alloc_hpt() argument
64 kvm->arch.hpt_cma_alloc = 0; in kvmppc_alloc_hpt()
69 kvm->arch.hpt_cma_alloc = 1; in kvmppc_alloc_hpt()
84 kvm->arch.hpt_virt = hpt; in kvmppc_alloc_hpt()
85 kvm->arch.hpt_order = order; in kvmppc_alloc_hpt()
87 kvm->arch.hpt_npte = 1ul << (order - 4); in kvmppc_alloc_hpt()
89 kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; in kvmppc_alloc_hpt()
92 rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); in kvmppc_alloc_hpt()
97 kvm->arch.revmap = rev; in kvmppc_alloc_hpt()
98 kvm->arch.sdr1 = __pa(hpt) | (order - 18); in kvmppc_alloc_hpt()
101 hpt, order, kvm->arch.lpid); in kvmppc_alloc_hpt()
108 if (kvm->arch.hpt_cma_alloc) in kvmppc_alloc_hpt()
115 long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) in kvmppc_alloc_reset_hpt() argument
120 mutex_lock(&kvm->lock); in kvmppc_alloc_reset_hpt()
121 if (kvm->arch.hpte_setup_done) { in kvmppc_alloc_reset_hpt()
122 kvm->arch.hpte_setup_done = 0; in kvmppc_alloc_reset_hpt()
125 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmppc_alloc_reset_hpt()
126 kvm->arch.hpte_setup_done = 1; in kvmppc_alloc_reset_hpt()
130 if (kvm->arch.hpt_virt) { in kvmppc_alloc_reset_hpt()
131 order = kvm->arch.hpt_order; in kvmppc_alloc_reset_hpt()
133 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); in kvmppc_alloc_reset_hpt()
137 kvmppc_rmap_reset(kvm); in kvmppc_alloc_reset_hpt()
139 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_alloc_reset_hpt()
143 err = kvmppc_alloc_hpt(kvm, htab_orderp); in kvmppc_alloc_reset_hpt()
147 mutex_unlock(&kvm->lock); in kvmppc_alloc_reset_hpt()
151 void kvmppc_free_hpt(struct kvm *kvm) in kvmppc_free_hpt() argument
153 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_free_hpt()
154 vfree(kvm->arch.revmap); in kvmppc_free_hpt()
155 if (kvm->arch.hpt_cma_alloc) in kvmppc_free_hpt()
156 kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt), in kvmppc_free_hpt()
157 1 << (kvm->arch.hpt_order - PAGE_SHIFT)); in kvmppc_free_hpt()
159 free_pages(kvm->arch.hpt_virt, in kvmppc_free_hpt()
160 kvm->arch.hpt_order - PAGE_SHIFT); in kvmppc_free_hpt()
186 struct kvm *kvm = vcpu->kvm; in kvmppc_map_vrma() local
195 if (npages > kvm->arch.hpt_mask + 1) in kvmppc_map_vrma()
196 npages = kvm->arch.hpt_mask + 1; in kvmppc_map_vrma()
206 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; in kvmppc_map_vrma()
216 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, in kvmppc_map_vrma()
258 long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, in kvmppc_virtmode_do_h_enter() argument
266 ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, in kvmppc_virtmode_do_h_enter()
311 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_book3s_64_hv_xlate() local
328 slb_v = vcpu->kvm->arch.vrma_slb_v; in kvmppc_mmu_book3s_64_hv_xlate()
333 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, in kvmppc_mmu_book3s_64_hv_xlate()
339 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); in kvmppc_mmu_book3s_64_hv_xlate()
341 gr = kvm->arch.revmap[index].guest_rpte; in kvmppc_mmu_book3s_64_hv_xlate()
439 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_hv_page_fault() local
464 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); in kvmppc_book3s_hv_page_fault()
465 rev = &kvm->arch.revmap[index]; in kvmppc_book3s_hv_page_fault()
485 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
502 mmu_seq = kvm->mmu_notifier_seq; in kvmppc_book3s_hv_page_fault()
599 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { in kvmppc_book3s_hv_page_fault()
612 kvmppc_invalidate_hpte(kvm, hptep, index); in kvmppc_book3s_hv_page_fault()
616 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); in kvmppc_book3s_hv_page_fault()
647 static void kvmppc_rmap_reset(struct kvm *kvm) in kvmppc_rmap_reset() argument
653 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_rmap_reset()
654 slots = kvm_memslots(kvm); in kvmppc_rmap_reset()
663 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_rmap_reset()
666 static int kvm_handle_hva_range(struct kvm *kvm, in kvm_handle_hva_range() argument
669 int (*handler)(struct kvm *kvm, in kvm_handle_hva_range() argument
678 slots = kvm_memslots(kvm); in kvm_handle_hva_range()
698 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); in kvm_handle_hva_range()
706 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, in kvm_handle_hva() argument
707 int (*handler)(struct kvm *kvm, unsigned long *rmapp, in kvm_handle_hva() argument
710 return kvm_handle_hva_range(kvm, hva, hva + 1, handler); in kvm_handle_hva()
713 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_unmap_rmapp() argument
716 struct revmap_entry *rev = kvm->arch.revmap; in kvm_unmap_rmapp()
734 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); in kvm_unmap_rmapp()
761 kvmppc_invalidate_hpte(kvm, hptep, i); in kvm_unmap_rmapp()
769 note_hpte_modification(kvm, &rev[i]); in kvm_unmap_rmapp()
778 int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva) in kvm_unmap_hva_hv() argument
780 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); in kvm_unmap_hva_hv()
784 int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_unmap_hva_range_hv() argument
786 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); in kvm_unmap_hva_range_hv()
790 void kvmppc_core_flush_memslot_hv(struct kvm *kvm, in kvmppc_core_flush_memslot_hv() argument
807 kvm_unmap_rmapp(kvm, rmapp, gfn); in kvmppc_core_flush_memslot_hv()
813 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_age_rmapp() argument
816 struct revmap_entry *rev = kvm->arch.revmap; in kvm_age_rmapp()
834 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); in kvm_age_rmapp()
852 kvmppc_clear_ref_hpte(kvm, hptep, i); in kvm_age_rmapp()
855 note_hpte_modification(kvm, &rev[i]); in kvm_age_rmapp()
866 int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_age_hva_hv() argument
868 return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp); in kvm_age_hva_hv()
871 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_test_age_rmapp() argument
874 struct revmap_entry *rev = kvm->arch.revmap; in kvm_test_age_rmapp()
889 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); in kvm_test_age_rmapp()
902 int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) in kvm_test_age_hva_hv() argument
904 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); in kvm_test_age_hva_hv()
907 void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) in kvm_set_spte_hva_hv() argument
909 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); in kvm_set_spte_hva_hv()
912 static int vcpus_running(struct kvm *kvm) in vcpus_running() argument
914 return atomic_read(&kvm->arch.vcpus_running) != 0; in vcpus_running()
921 static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) in kvm_test_clear_dirty_npages() argument
923 struct revmap_entry *rev = kvm->arch.revmap; in kvm_test_clear_dirty_npages()
948 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); in kvm_test_clear_dirty_npages()
967 (!hpte_is_writable(hptep1) || vcpus_running(kvm))) in kvm_test_clear_dirty_npages()
986 kvmppc_invalidate_hpte(kvm, hptep, i); in kvm_test_clear_dirty_npages()
993 note_hpte_modification(kvm, &rev[i]); in kvm_test_clear_dirty_npages()
1028 long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvmppc_hv_get_dirty_log() argument
1038 int npages = kvm_test_clear_dirty_npages(kvm, rmapp); in kvmppc_hv_get_dirty_log()
1052 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_hv_get_dirty_log()
1062 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, in kvmppc_pin_guest_page() argument
1072 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_pin_guest_page()
1073 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_pin_guest_page()
1081 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_pin_guest_page()
1089 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_pin_guest_page()
1093 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, in kvmppc_unpin_guest_page() argument
1109 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_unpin_guest_page()
1110 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unpin_guest_page()
1117 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_unpin_guest_page()
1139 struct kvm *kvm; member
1237 struct kvm *kvm = ctx->kvm; in kvm_htab_read() local
1255 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); in kvm_htab_read()
1256 revp = kvm->arch.revmap + i; in kvm_htab_read()
1271 while (i < kvm->arch.hpt_npte && in kvm_htab_read()
1281 while (i < kvm->arch.hpt_npte && in kvm_htab_read()
1297 while (i < kvm->arch.hpt_npte && in kvm_htab_read()
1318 if (i >= kvm->arch.hpt_npte) { in kvm_htab_read()
1334 struct kvm *kvm = ctx->kvm; in kvm_htab_write() local
1349 mutex_lock(&kvm->lock); in kvm_htab_write()
1350 hpte_setup = kvm->arch.hpte_setup_done; in kvm_htab_write()
1352 kvm->arch.hpte_setup_done = 0; /* temporarily */ in kvm_htab_write()
1355 if (atomic_read(&kvm->arch.vcpus_running)) { in kvm_htab_write()
1356 kvm->arch.hpte_setup_done = 1; in kvm_htab_write()
1357 mutex_unlock(&kvm->lock); in kvm_htab_write()
1377 if (i >= kvm->arch.hpt_npte || in kvm_htab_write()
1378 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) in kvm_htab_write()
1381 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); in kvm_htab_write()
1400 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); in kvm_htab_write()
1402 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, in kvm_htab_write()
1414 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvm_htab_write()
1417 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); in kvm_htab_write()
1426 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); in kvm_htab_write()
1436 kvm->arch.hpte_setup_done = hpte_setup; in kvm_htab_write()
1437 mutex_unlock(&kvm->lock); in kvm_htab_write()
1450 atomic_dec(&ctx->kvm->arch.hpte_mod_interest); in kvm_htab_release()
1451 kvm_put_kvm(ctx->kvm); in kvm_htab_release()
1463 int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) in kvm_vm_ioctl_get_htab_fd() argument
1475 kvm_get_kvm(kvm); in kvm_vm_ioctl_get_htab_fd()
1476 ctx->kvm = kvm; in kvm_vm_ioctl_get_htab_fd()
1484 kvm_put_kvm(kvm); in kvm_vm_ioctl_get_htab_fd()
1489 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_htab_fd()
1490 atomic_inc(&kvm->arch.hpte_mod_interest); in kvm_vm_ioctl_get_htab_fd()
1492 synchronize_srcu_expedited(&kvm->srcu); in kvm_vm_ioctl_get_htab_fd()
1493 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_htab_fd()
1500 struct kvm *kvm; member
1510 struct kvm *kvm = inode->i_private; in debugfs_htab_open() local
1517 kvm_get_kvm(kvm); in debugfs_htab_open()
1518 p->kvm = kvm; in debugfs_htab_open()
1529 kvm_put_kvm(p->kvm); in debugfs_htab_release()
1541 struct kvm *kvm; in debugfs_htab_read() local
1566 kvm = p->kvm; in debugfs_htab_read()
1568 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); in debugfs_htab_read()
1569 for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) { in debugfs_htab_read()
1579 gr = kvm->arch.revmap[i].guest_rpte; in debugfs_htab_read()
1627 void kvmppc_mmu_debugfs_init(struct kvm *kvm) in kvmppc_mmu_debugfs_init() argument
1629 kvm->arch.htab_dentry = debugfs_create_file("htab", 0400, in kvmppc_mmu_debugfs_init()
1630 kvm->arch.debugfs_dir, kvm, in kvmppc_mmu_debugfs_init()