Lines Matching refs:memslot
174 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument
188 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
444 struct kvm_memory_slot *memslot; in kvmppc_book3s_hv_page_fault() local
484 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
486 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault()
489 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault()
497 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
512 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
593 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
649 struct kvm_memory_slot *memslot; in kvmppc_rmap_reset() local
654 kvm_for_each_memslot(memslot, slots) { in kvmppc_rmap_reset()
659 memset(memslot->arch.rmap, 0, in kvmppc_rmap_reset()
660 memslot->npages * sizeof(*memslot->arch.rmap)); in kvmppc_rmap_reset()
675 struct kvm_memory_slot *memslot; in kvm_handle_hva_range() local
678 kvm_for_each_memslot(memslot, slots) { in kvm_handle_hva_range()
682 hva_start = max(start, memslot->userspace_addr); in kvm_handle_hva_range()
683 hva_end = min(end, memslot->userspace_addr + in kvm_handle_hva_range()
684 (memslot->npages << PAGE_SHIFT)); in kvm_handle_hva_range()
691 gfn = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range()
692 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in kvm_handle_hva_range()
695 gfn_t gfn_offset = gfn - memslot->base_gfn; in kvm_handle_hva_range()
697 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); in kvm_handle_hva_range()
788 struct kvm_memory_slot *memslot) in kvmppc_core_flush_memslot_hv() argument
794 rmapp = memslot->arch.rmap; in kvmppc_core_flush_memslot_hv()
795 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv()
796 for (n = memslot->npages; n; --n) { in kvmppc_core_flush_memslot_hv()
1004 struct kvm_memory_slot *memslot, in harvest_vpa_dirty() argument
1012 if (gfn < memslot->base_gfn || in harvest_vpa_dirty()
1013 gfn >= memslot->base_gfn + memslot->npages) in harvest_vpa_dirty()
1018 __set_bit_le(gfn - memslot->base_gfn, map); in harvest_vpa_dirty()
1021 long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvmppc_hv_get_dirty_log() argument
1029 rmapp = memslot->arch.rmap; in kvmppc_hv_get_dirty_log()
1030 for (i = 0; i < memslot->npages; ++i) { in kvmppc_hv_get_dirty_log()
1047 harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map); in kvmppc_hv_get_dirty_log()
1048 harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map); in kvmppc_hv_get_dirty_log()
1058 struct kvm_memory_slot *memslot; in kvmppc_pin_guest_page() local
1066 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_pin_guest_page()
1067 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_pin_guest_page()
1069 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_pin_guest_page()
1090 struct kvm_memory_slot *memslot; in kvmppc_unpin_guest_page() local
1103 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unpin_guest_page()
1104 if (memslot) { in kvmppc_unpin_guest_page()
1105 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvmppc_unpin_guest_page()