Lines Matching refs:memslot

175 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,  in kvmppc_map_vrma()  argument
189 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
445 struct kvm_memory_slot *memslot; in kvmppc_book3s_hv_page_fault() local
485 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
487 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault()
490 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault()
498 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
513 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
594 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
650 struct kvm_memory_slot *memslot; in kvmppc_rmap_reset() local
655 kvm_for_each_memslot(memslot, slots) { in kvmppc_rmap_reset()
660 memset(memslot->arch.rmap, 0, in kvmppc_rmap_reset()
661 memslot->npages * sizeof(*memslot->arch.rmap)); in kvmppc_rmap_reset()
676 struct kvm_memory_slot *memslot; in kvm_handle_hva_range() local
679 kvm_for_each_memslot(memslot, slots) { in kvm_handle_hva_range()
683 hva_start = max(start, memslot->userspace_addr); in kvm_handle_hva_range()
684 hva_end = min(end, memslot->userspace_addr + in kvm_handle_hva_range()
685 (memslot->npages << PAGE_SHIFT)); in kvm_handle_hva_range()
692 gfn = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range()
693 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in kvm_handle_hva_range()
696 gfn_t gfn_offset = gfn - memslot->base_gfn; in kvm_handle_hva_range()
698 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); in kvm_handle_hva_range()
791 struct kvm_memory_slot *memslot) in kvmppc_core_flush_memslot_hv() argument
797 rmapp = memslot->arch.rmap; in kvmppc_core_flush_memslot_hv()
798 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv()
799 for (n = memslot->npages; n; --n) { in kvmppc_core_flush_memslot_hv()
1011 struct kvm_memory_slot *memslot, in harvest_vpa_dirty() argument
1019 if (gfn < memslot->base_gfn || in harvest_vpa_dirty()
1020 gfn >= memslot->base_gfn + memslot->npages) in harvest_vpa_dirty()
1025 __set_bit_le(gfn - memslot->base_gfn, map); in harvest_vpa_dirty()
1028 long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvmppc_hv_get_dirty_log() argument
1036 rmapp = memslot->arch.rmap; in kvmppc_hv_get_dirty_log()
1037 for (i = 0; i < memslot->npages; ++i) { in kvmppc_hv_get_dirty_log()
1054 harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map); in kvmppc_hv_get_dirty_log()
1055 harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map); in kvmppc_hv_get_dirty_log()
1065 struct kvm_memory_slot *memslot; in kvmppc_pin_guest_page() local
1073 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_pin_guest_page()
1074 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_pin_guest_page()
1076 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_pin_guest_page()
1097 struct kvm_memory_slot *memslot; in kvmppc_unpin_guest_page() local
1110 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unpin_guest_page()
1111 if (memslot) { in kvmppc_unpin_guest_page()
1112 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvmppc_unpin_guest_page()