memslot 224 arch/mips/kvm/mips.c struct kvm_memory_slot *memslot, memslot 996 arch/mips/kvm/mips.c struct kvm_memory_slot *memslot; memslot 1006 arch/mips/kvm/mips.c memslot = id_to_memslot(slots, log->slot); memslot 1009 arch/mips/kvm/mips.c kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); memslot 1019 arch/mips/kvm/mips.c struct kvm_memory_slot *memslot; memslot 1029 arch/mips/kvm/mips.c memslot = id_to_memslot(slots, log->slot); memslot 1032 arch/mips/kvm/mips.c kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); memslot 473 arch/mips/kvm/mmu.c struct kvm_memory_slot *memslot, memslot 478 arch/mips/kvm/mmu.c struct kvm_memory_slot *memslot; memslot 484 arch/mips/kvm/mmu.c kvm_for_each_memslot(memslot, slots) { memslot 488 arch/mips/kvm/mmu.c hva_start = max(start, memslot->userspace_addr); memslot 489 arch/mips/kvm/mmu.c hva_end = min(end, memslot->userspace_addr + memslot 490 arch/mips/kvm/mmu.c (memslot->npages << PAGE_SHIFT)); memslot 498 arch/mips/kvm/mmu.c gfn = hva_to_gfn_memslot(hva_start, memslot); memslot 499 arch/mips/kvm/mmu.c gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); memslot 501 arch/mips/kvm/mmu.c ret |= handler(kvm, gfn, gfn_end, memslot, data); memslot 509 arch/mips/kvm/mmu.c struct kvm_memory_slot *memslot, void *data) memslot 524 arch/mips/kvm/mmu.c struct kvm_memory_slot *memslot, void *data) memslot 536 arch/mips/kvm/mmu.c if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) memslot 538 arch/mips/kvm/mmu.c else if (memslot->flags & KVM_MEM_READONLY) memslot 566 arch/mips/kvm/mmu.c struct kvm_memory_slot *memslot, void *data) memslot 572 arch/mips/kvm/mmu.c struct kvm_memory_slot *memslot, void *data) memslot 199 arch/powerpc/include/asm/kvm_book3s.h const struct kvm_memory_slot *memslot, memslot 206 arch/powerpc/include/asm/kvm_book3s.h struct kvm_memory_slot *memslot, memslot 215 arch/powerpc/include/asm/kvm_book3s.h extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 217 arch/powerpc/include/asm/kvm_book3s.h extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 219 arch/powerpc/include/asm/kvm_book3s.h extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 222 arch/powerpc/include/asm/kvm_book3s.h struct kvm_memory_slot *memslot, unsigned long *map); memslot 224 arch/powerpc/include/asm/kvm_book3s.h const struct kvm_memory_slot *memslot); memslot 242 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, memslot 259 arch/powerpc/include/asm/kvm_book3s.h struct kvm_memory_slot *memslot, unsigned long *map); memslot 261 arch/powerpc/include/asm/kvm_book3s.h struct kvm_memory_slot *memslot, memslot 486 arch/powerpc/include/asm/kvm_book3s_64.h static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, memslot 493 arch/powerpc/include/asm/kvm_book3s_64.h return !(memslot->base_gfn & mask) && !(memslot->npages & mask); memslot 633 arch/powerpc/include/asm/kvm_book3s_64.h const struct kvm_memory_slot *memslot, memslot 171 arch/powerpc/include/asm/kvm_ppc.h struct kvm_memory_slot *memslot, unsigned long porder); memslot 210 arch/powerpc/include/asm/kvm_ppc.h struct kvm_memory_slot *memslot, memslot 220 arch/powerpc/include/asm/kvm_ppc.h struct kvm_memory_slot *memslot); memslot 280 arch/powerpc/include/asm/kvm_ppc.h void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); memslot 282 arch/powerpc/include/asm/kvm_ppc.h struct kvm_memory_slot *memslot, memslot 849 arch/powerpc/kvm/book3s.c void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) memslot 851 arch/powerpc/kvm/book3s.c kvm->arch.kvm_ops->flush_memslot(kvm, memslot); memslot 855 arch/powerpc/kvm/book3s.c struct kvm_memory_slot *memslot, memslot 858 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); memslot 11 arch/powerpc/kvm/book3s.h struct kvm_memory_slot *memslot); memslot 204 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, memslot 218 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = memslot->npages >> (porder - PAGE_SHIFT); memslot 501 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot; memslot 563 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot = gfn_to_memslot(kvm, gfn); memslot 565 arch/powerpc/kvm/book3s_64_mmu_hv.c trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); memslot 568 arch/powerpc/kvm/book3s_64_mmu_hv.c if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) memslot 576 arch/powerpc/kvm/book3s_64_mmu_hv.c if (gfn_base < memslot->base_gfn) memslot 591 arch/powerpc/kvm/book3s_64_mmu_hv.c hva = gfn_to_hva_memslot(memslot, gfn); memslot 687 arch/powerpc/kvm/book3s_64_mmu_hv.c rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; memslot 747 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot; memslot 752 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm_for_each_memslot(memslot, slots) { memslot 759 arch/powerpc/kvm/book3s_64_mmu_hv.c memset(memslot->arch.rmap, 0, memslot 760 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot->npages * sizeof(*memslot->arch.rmap)); memslot 766 arch/powerpc/kvm/book3s_64_mmu_hv.c typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 777 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot; memslot 780 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm_for_each_memslot(memslot, slots) { memslot 784 arch/powerpc/kvm/book3s_64_mmu_hv.c hva_start = max(start, memslot->userspace_addr); memslot 785 arch/powerpc/kvm/book3s_64_mmu_hv.c hva_end = min(end, memslot->userspace_addr + memslot 786 arch/powerpc/kvm/book3s_64_mmu_hv.c (memslot->npages << PAGE_SHIFT)); memslot 793 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn = hva_to_gfn_memslot(hva_start, memslot); memslot 794 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); memslot 797 arch/powerpc/kvm/book3s_64_mmu_hv.c ret = handler(kvm, memslot, gfn); memslot 813 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot, memslot 845 arch/powerpc/kvm/book3s_64_mmu_hv.c if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap) memslot 846 arch/powerpc/kvm/book3s_64_mmu_hv.c kvmppc_update_dirty_map(memslot, gfn, psize); memslot 854 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 861 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; memslot 884 arch/powerpc/kvm/book3s_64_mmu_hv.c kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn); memslot 901 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot) memslot 907 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn = memslot->base_gfn; memslot 908 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = memslot->arch.rmap; memslot 910 arch/powerpc/kvm/book3s_64_mmu_hv.c kvmppc_radix_flush_memslot(kvm, memslot); memslot 914 arch/powerpc/kvm/book3s_64_mmu_hv.c for (n = memslot->npages; n; --n, ++gfn) { memslot 922 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm_unmap_rmapp(kvm, memslot, gfn); memslot 927 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 936 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; memslot 990 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 999 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; memslot 1130 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot, memslot 1138 arch/powerpc/kvm/book3s_64_mmu_hv.c if (gfn < memslot->base_gfn || memslot 1139 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn >= memslot->base_gfn + memslot->npages) memslot 1144 arch/powerpc/kvm/book3s_64_mmu_hv.c __set_bit_le(gfn - memslot->base_gfn, map); memslot 1148 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot, unsigned long *map) memslot 1154 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = memslot->arch.rmap; memslot 1155 arch/powerpc/kvm/book3s_64_mmu_hv.c for (i = 0; i < memslot->npages; ++i) { memslot 1173 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot; memslot 1181 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot = gfn_to_memslot(kvm, gfn); memslot 1182 arch/powerpc/kvm/book3s_64_mmu_hv.c if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) memslot 1184 arch/powerpc/kvm/book3s_64_mmu_hv.c hva = gfn_to_hva_memslot(memslot, gfn); memslot 1205 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot; memslot 1217 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot = gfn_to_memslot(kvm, gfn); memslot 1218 arch/powerpc/kvm/book3s_64_mmu_hv.c if (memslot && memslot->dirty_bitmap) memslot 1219 arch/powerpc/kvm/book3s_64_mmu_hv.c set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); memslot 1292 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot = memslot 1295 arch/powerpc/kvm/book3s_64_mmu_hv.c if (memslot) { memslot 1297 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; memslot 1300 arch/powerpc/kvm/book3s_64_mmu_hv.c kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn); memslot 377 arch/powerpc/kvm/book3s_64_mmu_radix.c const struct kvm_memory_slot *memslot, memslot 393 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!memslot) { memslot 394 arch/powerpc/kvm/book3s_64_mmu_radix.c memslot = gfn_to_memslot(kvm, gfn); memslot 395 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!memslot) memslot 408 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); memslot 410 arch/powerpc/kvm/book3s_64_mmu_radix.c if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) memslot 411 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_update_dirty_map(memslot, gfn, page_size); memslot 768 arch/powerpc/kvm/book3s_64_mmu_radix.c struct kvm_memory_slot *memslot, memslot 793 arch/powerpc/kvm/book3s_64_mmu_radix.c hva = gfn_to_hva_memslot(memslot, gfn); memslot 800 arch/powerpc/kvm/book3s_64_mmu_radix.c pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, memslot 832 arch/powerpc/kvm/book3s_64_mmu_radix.c large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES); memslot 894 arch/powerpc/kvm/book3s_64_mmu_radix.c struct kvm_memory_slot *memslot; memslot 919 arch/powerpc/kvm/book3s_64_mmu_radix.c memslot = gfn_to_memslot(kvm, gfn); memslot 922 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { memslot 935 arch/powerpc/kvm/book3s_64_mmu_radix.c if (memslot->flags & KVM_MEM_READONLY) { memslot 959 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, memslot 968 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 977 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, memslot 983 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 998 arch/powerpc/kvm/book3s_64_mmu_radix.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; memslot 1008 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 1024 arch/powerpc/kvm/book3s_64_mmu_radix.c struct kvm_memory_slot *memslot, int pagenum) memslot 1026 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gfn = memslot->base_gfn + pagenum; memslot 1043 arch/powerpc/kvm/book3s_64_mmu_radix.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; memslot 1053 arch/powerpc/kvm/book3s_64_mmu_radix.c struct kvm_memory_slot *memslot, unsigned long *map) memslot 1058 arch/powerpc/kvm/book3s_64_mmu_radix.c for (i = 0; i < memslot->npages; i = j) { memslot 1059 arch/powerpc/kvm/book3s_64_mmu_radix.c npages = kvm_radix_test_clear_dirty(kvm, memslot, i); memslot 1078 arch/powerpc/kvm/book3s_64_mmu_radix.c const struct kvm_memory_slot *memslot) memslot 1085 arch/powerpc/kvm/book3s_64_mmu_radix.c gpa = memslot->base_gfn << PAGE_SHIFT; memslot 1087 arch/powerpc/kvm/book3s_64_mmu_radix.c for (n = memslot->npages; n; --n) { memslot 1090 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, memslot 337 arch/powerpc/kvm/book3s_64_vio.c struct kvm_memory_slot *memslot; memslot 339 arch/powerpc/kvm/book3s_64_vio.c memslot = search_memslots(kvm_memslots(kvm), gfn); memslot 340 arch/powerpc/kvm/book3s_64_vio.c if (!memslot) memslot 343 arch/powerpc/kvm/book3s_64_vio.c *ua = __gfn_to_hva_memslot(memslot, gfn) | memslot 82 arch/powerpc/kvm/book3s_64_vio_hv.c struct kvm_memory_slot *memslot; memslot 84 arch/powerpc/kvm/book3s_64_vio_hv.c memslot = search_memslots(kvm_memslots_raw(kvm), gfn); memslot 85 arch/powerpc/kvm/book3s_64_vio_hv.c if (!memslot) memslot 88 arch/powerpc/kvm/book3s_64_vio_hv.c *ua = __gfn_to_hva_memslot(memslot, gfn) | memslot 92 arch/powerpc/kvm/book3s_64_vio_hv.c *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; memslot 4396 arch/powerpc/kvm/book3s_hv.c struct kvm_memory_slot *memslot; memslot 4409 arch/powerpc/kvm/book3s_hv.c memslot = id_to_memslot(slots, log->slot); memslot 4411 arch/powerpc/kvm/book3s_hv.c if (!memslot->dirty_bitmap) memslot 4418 arch/powerpc/kvm/book3s_hv.c n = kvm_dirty_bitmap_bytes(memslot); memslot 4419 arch/powerpc/kvm/book3s_hv.c buf = memslot->dirty_bitmap + n / sizeof(long); memslot 4423 arch/powerpc/kvm/book3s_hv.c r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf); memslot 4425 arch/powerpc/kvm/book3s_hv.c r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf); memslot 4435 arch/powerpc/kvm/book3s_hv.c p = memslot->dirty_bitmap; memslot 4443 arch/powerpc/kvm/book3s_hv.c kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); memslot 4444 arch/powerpc/kvm/book3s_hv.c kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); memslot 4478 arch/powerpc/kvm/book3s_hv.c struct kvm_memory_slot *memslot, memslot 4581 arch/powerpc/kvm/book3s_hv.c struct kvm_memory_slot *memslot; memslot 4609 arch/powerpc/kvm/book3s_hv.c memslot = gfn_to_memslot(kvm, 0); memslot 4613 arch/powerpc/kvm/book3s_hv.c if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) memslot 4617 arch/powerpc/kvm/book3s_hv.c hva = memslot->userspace_addr; memslot 4640 arch/powerpc/kvm/book3s_hv.c kvmppc_map_vrma(vcpu, memslot, porder); memslot 651 arch/powerpc/kvm/book3s_hv_nested.c struct kvm_memory_slot *memslot; memslot 673 arch/powerpc/kvm/book3s_hv_nested.c kvm_for_each_memslot(memslot, kvm_memslots(kvm)) memslot 674 arch/powerpc/kvm/book3s_hv_nested.c kvmhv_free_memslot_nest_rmap(memslot); memslot 878 arch/powerpc/kvm/book3s_hv_nested.c const struct kvm_memory_slot *memslot, memslot 885 arch/powerpc/kvm/book3s_hv_nested.c if (!memslot) memslot 887 arch/powerpc/kvm/book3s_hv_nested.c gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; memslot 894 arch/powerpc/kvm/book3s_hv_nested.c unsigned long *rmap = &memslot->arch.rmap[gfn]; memslot 1265 arch/powerpc/kvm/book3s_hv_nested.c struct kvm_memory_slot *memslot; memslot 1335 arch/powerpc/kvm/book3s_hv_nested.c memslot = gfn_to_memslot(kvm, gfn); memslot 1336 arch/powerpc/kvm/book3s_hv_nested.c if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { memslot 1346 arch/powerpc/kvm/book3s_hv_nested.c if (memslot->flags & KVM_MEM_READONLY) { memslot 1374 arch/powerpc/kvm/book3s_hv_nested.c ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, memslot 1416 arch/powerpc/kvm/book3s_hv_nested.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; memslot 109 arch/powerpc/kvm/book3s_hv_rm_mmu.c void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, memslot 114 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!psize || !memslot->dirty_bitmap) memslot 117 arch/powerpc/kvm/book3s_hv_rm_mmu.c gfn -= memslot->base_gfn; memslot 118 arch/powerpc/kvm/book3s_hv_rm_mmu.c set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); memslot 125 arch/powerpc/kvm/book3s_hv_rm_mmu.c struct kvm_memory_slot *memslot; memslot 131 arch/powerpc/kvm/book3s_hv_rm_mmu.c memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); memslot 132 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (memslot && memslot->dirty_bitmap) memslot 133 arch/powerpc/kvm/book3s_hv_rm_mmu.c kvmppc_update_dirty_map(memslot, gfn, psize); memslot 142 arch/powerpc/kvm/book3s_hv_rm_mmu.c struct kvm_memory_slot *memslot; memslot 147 arch/powerpc/kvm/book3s_hv_rm_mmu.c memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); memslot 149 arch/powerpc/kvm/book3s_hv_rm_mmu.c *memslotp = memslot; memslot 152 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!memslot) memslot 155 arch/powerpc/kvm/book3s_hv_rm_mmu.c rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); memslot 168 arch/powerpc/kvm/book3s_hv_rm_mmu.c struct kvm_memory_slot *memslot; memslot 173 arch/powerpc/kvm/book3s_hv_rm_mmu.c rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn); memslot 192 arch/powerpc/kvm/book3s_hv_rm_mmu.c kvmppc_update_dirty_map(memslot, gfn, memslot 206 arch/powerpc/kvm/book3s_hv_rm_mmu.c struct kvm_memory_slot *memslot; memslot 232 arch/powerpc/kvm/book3s_hv_rm_mmu.c memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); memslot 236 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { memslot 244 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!slot_is_aligned(memslot, psize)) memslot 246 arch/powerpc/kvm/book3s_hv_rm_mmu.c slot_fn = gfn - memslot->base_gfn; memslot 247 arch/powerpc/kvm/book3s_hv_rm_mmu.c rmap = &memslot->arch.rmap[slot_fn]; memslot 250 arch/powerpc/kvm/book3s_hv_rm_mmu.c hva = __gfn_to_hva_memslot(memslot, gfn); memslot 896 arch/powerpc/kvm/book3s_hv_rm_mmu.c struct kvm_memory_slot *memslot; memslot 903 arch/powerpc/kvm/book3s_hv_rm_mmu.c memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); memslot 904 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) memslot 908 arch/powerpc/kvm/book3s_hv_rm_mmu.c hva = __gfn_to_hva_memslot(memslot, gfn); memslot 928 arch/powerpc/kvm/book3s_hv_rm_mmu.c *memslot_p = memslot; memslot 936 arch/powerpc/kvm/book3s_hv_rm_mmu.c struct kvm_memory_slot *memslot; memslot 946 arch/powerpc/kvm/book3s_hv_rm_mmu.c ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot); memslot 960 arch/powerpc/kvm/book3s_hv_rm_mmu.c kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); memslot 398 arch/powerpc/kvm/book3s_pr.c struct kvm_memory_slot *memslot; memslot 401 arch/powerpc/kvm/book3s_pr.c kvm_for_each_memslot(memslot, slots) { memslot 405 arch/powerpc/kvm/book3s_pr.c hva_start = max(start, memslot->userspace_addr); memslot 406 arch/powerpc/kvm/book3s_pr.c hva_end = min(end, memslot->userspace_addr + memslot 407 arch/powerpc/kvm/book3s_pr.c (memslot->npages << PAGE_SHIFT)); memslot 414 arch/powerpc/kvm/book3s_pr.c gfn = hva_to_gfn_memslot(hva_start, memslot); memslot 415 arch/powerpc/kvm/book3s_pr.c gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); memslot 1866 arch/powerpc/kvm/book3s_pr.c struct kvm_memory_slot *memslot; memslot 1882 arch/powerpc/kvm/book3s_pr.c memslot = id_to_memslot(slots, log->slot); memslot 1884 arch/powerpc/kvm/book3s_pr.c ga = memslot->base_gfn << PAGE_SHIFT; memslot 1885 arch/powerpc/kvm/book3s_pr.c ga_end = ga + (memslot->npages << PAGE_SHIFT); memslot 1890 arch/powerpc/kvm/book3s_pr.c n = kvm_dirty_bitmap_bytes(memslot); memslot 1891 arch/powerpc/kvm/book3s_pr.c memset(memslot->dirty_bitmap, 0, n); memslot 1901 arch/powerpc/kvm/book3s_pr.c struct kvm_memory_slot *memslot) memslot 1907 arch/powerpc/kvm/book3s_pr.c struct kvm_memory_slot *memslot, memslot 1816 arch/powerpc/kvm/booke.c struct kvm_memory_slot *memslot, memslot 1830 arch/powerpc/kvm/booke.c void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) memslot 697 arch/powerpc/kvm/powerpc.c struct kvm_memory_slot *memslot, memslot 701 arch/powerpc/kvm/powerpc.c return kvmppc_core_prepare_memory_region(kvm, memslot, mem); memslot 274 arch/powerpc/kvm/trace_hv.h struct kvm_memory_slot *memslot, unsigned long ea, memslot 277 arch/powerpc/kvm/trace_hv.h TP_ARGS(vcpu, hptep, memslot, ea, dsisr), memslot 297 arch/powerpc/kvm/trace_hv.h __entry->base_gfn = memslot ? memslot->base_gfn : -1UL; memslot 298 arch/powerpc/kvm/trace_hv.h __entry->slot_flags = memslot ? memslot->flags : 0; memslot 576 arch/s390/kvm/kvm-s390.c struct kvm_memory_slot *memslot) memslot 585 arch/s390/kvm/kvm-s390.c cur_gfn = memslot->base_gfn; memslot 586 arch/s390/kvm/kvm-s390.c last_gfn = memslot->base_gfn + memslot->npages; memslot 589 arch/s390/kvm/kvm-s390.c vmaddr = gfn_to_hva_memslot(memslot, cur_gfn); memslot 618 arch/s390/kvm/kvm-s390.c struct kvm_memory_slot *memslot; memslot 631 arch/s390/kvm/kvm-s390.c memslot = id_to_memslot(slots, log->slot); memslot 633 arch/s390/kvm/kvm-s390.c if (!memslot->dirty_bitmap) memslot 636 arch/s390/kvm/kvm-s390.c kvm_s390_sync_dirty_log(kvm, memslot); memslot 643 arch/s390/kvm/kvm-s390.c n = kvm_dirty_bitmap_bytes(memslot); memslot 644 arch/s390/kvm/kvm-s390.c memset(memslot->dirty_bitmap, 0, n); memslot 4506 arch/s390/kvm/kvm-s390.c struct kvm_memory_slot *memslot, memslot 1267 arch/x86/include/asm/kvm_host.h struct kvm_memory_slot *memslot); memslot 1269 arch/x86/include/asm/kvm_host.h const struct kvm_memory_slot *memslot); memslot 1271 arch/x86/include/asm/kvm_host.h struct kvm_memory_slot *memslot); memslot 1273 arch/x86/include/asm/kvm_host.h struct kvm_memory_slot *memslot); memslot 1275 arch/x86/include/asm/kvm_host.h struct kvm_memory_slot *memslot); memslot 2002 arch/x86/kvm/mmu.c struct kvm_memory_slot *memslot; memslot 2009 arch/x86/kvm/mmu.c kvm_for_each_memslot(memslot, slots) { memslot 2013 arch/x86/kvm/mmu.c hva_start = max(start, memslot->userspace_addr); memslot 2014 arch/x86/kvm/mmu.c hva_end = min(end, memslot->userspace_addr + memslot 2015 arch/x86/kvm/mmu.c (memslot->npages << PAGE_SHIFT)); memslot 2022 arch/x86/kvm/mmu.c gfn_start = hva_to_gfn_memslot(hva_start, memslot); memslot 2023 arch/x86/kvm/mmu.c gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); memslot 2025 arch/x86/kvm/mmu.c for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL, memslot 2029 arch/x86/kvm/mmu.c ret |= handler(kvm, iterator.rmap, memslot, memslot 5691 arch/x86/kvm/mmu.c slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 5698 arch/x86/kvm/mmu.c for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, memslot 5724 arch/x86/kvm/mmu.c slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 5728 arch/x86/kvm/mmu.c return slot_handle_level_range(kvm, memslot, fn, start_level, memslot 5729 arch/x86/kvm/mmu.c end_level, memslot->base_gfn, memslot 5730 arch/x86/kvm/mmu.c memslot->base_gfn + memslot->npages - 1, memslot 5735 arch/x86/kvm/mmu.c slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 5738 arch/x86/kvm/mmu.c return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, memslot 5743 arch/x86/kvm/mmu.c slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 5746 arch/x86/kvm/mmu.c return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1, memslot 5751 arch/x86/kvm/mmu.c slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, memslot 5754 arch/x86/kvm/mmu.c return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, memslot 5950 arch/x86/kvm/mmu.c struct kvm_memory_slot *memslot; memslot 5956 arch/x86/kvm/mmu.c kvm_for_each_memslot(memslot, slots) { memslot 5959 arch/x86/kvm/mmu.c start = max(gfn_start, memslot->base_gfn); memslot 5960 arch/x86/kvm/mmu.c end = min(gfn_end, memslot->base_gfn + memslot->npages); memslot 5964 arch/x86/kvm/mmu.c slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, memslot 5980 arch/x86/kvm/mmu.c struct kvm_memory_slot *memslot) memslot 5985 arch/x86/kvm/mmu.c flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect, memslot 6008 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, memslot 6009 arch/x86/kvm/mmu.c memslot->npages); memslot 6052 arch/x86/kvm/mmu.c const struct kvm_memory_slot *memslot) memslot 6056 arch/x86/kvm/mmu.c slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, memslot 6062 arch/x86/kvm/mmu.c struct kvm_memory_slot *memslot) memslot 6067 arch/x86/kvm/mmu.c flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); memslot 6079 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, memslot 6080 arch/x86/kvm/mmu.c memslot->npages); memslot 6085 arch/x86/kvm/mmu.c struct kvm_memory_slot *memslot) memslot 6090 arch/x86/kvm/mmu.c flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, memslot 6098 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, memslot 6099 arch/x86/kvm/mmu.c memslot->npages); memslot 6104 arch/x86/kvm/mmu.c struct kvm_memory_slot *memslot) memslot 6109 arch/x86/kvm/mmu.c flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); memslot 6116 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, memslot 6117 arch/x86/kvm/mmu.c memslot->npages); memslot 6366 arch/x86/kvm/mmu.c struct kvm_memory_slot *memslot; memslot 6372 arch/x86/kvm/mmu.c kvm_for_each_memslot(memslot, slots) memslot 6373 arch/x86/kvm/mmu.c nr_pages += memslot->npages; memslot 7312 arch/x86/kvm/vmx/vmx.c struct kvm_memory_slot *memslot, memslot 7315 arch/x86/kvm/vmx/vmx.c kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); memslot 9827 arch/x86/kvm/x86.c struct kvm_memory_slot *memslot, memslot 9832 arch/x86/kvm/x86.c return kvm_arch_create_memslot(kvm, memslot, memslot 353 include/linux/kvm_host.h static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) memslot 355 include/linux/kvm_host.h return ALIGN(memslot->npages, BITS_PER_LONG) / 8; memslot 358 include/linux/kvm_host.h static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) memslot 360 include/linux/kvm_host.h unsigned long len = kvm_dirty_bitmap_bytes(memslot); memslot 362 include/linux/kvm_host.h return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); memslot 583 include/linux/kvm_host.h #define kvm_for_each_memslot(memslot, slots) \ memslot 584 include/linux/kvm_host.h for (memslot = &slots->memslots[0]; \ memslot 585 include/linux/kvm_host.h memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ memslot 586 include/linux/kvm_host.h memslot++) memslot 686 include/linux/kvm_host.h struct kvm_memory_slot *memslot, memslot 49 include/linux/kvm_types.h struct kvm_memory_slot *memslot; memslot 147 tools/testing/selftests/kvm/include/kvm_util.h uint32_t memslot); memslot 149 tools/testing/selftests/kvm/include/kvm_util.h vm_paddr_t paddr_min, uint32_t memslot); memslot 591 tools/testing/selftests/kvm/include/x86_64/vmx.h uint32_t memslot, uint32_t eptp_memslot); memslot 709 tools/testing/selftests/kvm/lib/kvm_util.c memslot2region(struct kvm_vm *vm, uint32_t memslot) memslot 715 tools/testing/selftests/kvm/lib/kvm_util.c if (region->region.slot == memslot) memslot 720 tools/testing/selftests/kvm/lib/kvm_util.c " requested slot: %u\n", memslot); memslot 1558 tools/testing/selftests/kvm/lib/kvm_util.c vm_paddr_t paddr_min, uint32_t memslot) memslot 1570 tools/testing/selftests/kvm/lib/kvm_util.c region = memslot2region(vm, memslot); memslot 1585 tools/testing/selftests/kvm/lib/kvm_util.c paddr_min, vm->page_size, memslot); memslot 1598 tools/testing/selftests/kvm/lib/kvm_util.c uint32_t memslot) memslot 1600 tools/testing/selftests/kvm/lib/kvm_util.c return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); memslot 72 tools/testing/selftests/kvm/lib/kvm_util_internal.h memslot2region(struct kvm_vm *vm, uint32_t memslot); memslot 18 tools/testing/selftests/kvm/lib/s390x/processor.c void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) memslot 29 tools/testing/selftests/kvm/lib/s390x/processor.c KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); memslot 41 tools/testing/selftests/kvm/lib/s390x/processor.c static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) memslot 46 tools/testing/selftests/kvm/lib/s390x/processor.c KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); memslot 71 tools/testing/selftests/kvm/lib/s390x/processor.c uint32_t memslot) memslot 98 tools/testing/selftests/kvm/lib/s390x/processor.c entry[idx] = virt_alloc_region(vm, ri, memslot); memslot 512 tools/testing/selftests/kvm/lib/x86_64/vmx.c uint32_t memslot, uint32_t eptp_memslot) memslot 516 tools/testing/selftests/kvm/lib/x86_64/vmx.c memslot2region(vm, memslot); memslot 46 virt/kvm/arm/mmu.c static bool memslot_is_logging(struct kvm_memory_slot *memslot) memslot 48 virt/kvm/arm/mmu.c return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); memslot 414 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot) memslot 416 virt/kvm/arm/mmu.c phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; memslot 417 virt/kvm/arm/mmu.c phys_addr_t end = addr + PAGE_SIZE * memslot->npages; memslot 439 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot; memslot 446 virt/kvm/arm/mmu.c kvm_for_each_memslot(memslot, slots) memslot 447 virt/kvm/arm/mmu.c stage2_flush_memslot(kvm, memslot); memslot 924 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot) memslot 926 virt/kvm/arm/mmu.c hva_t hva = memslot->userspace_addr; memslot 927 virt/kvm/arm/mmu.c phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; memslot 928 virt/kvm/arm/mmu.c phys_addr_t size = PAGE_SIZE * memslot->npages; memslot 957 virt/kvm/arm/mmu.c gpa_t gpa = addr + (vm_start - memslot->userspace_addr); memslot 974 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot; memslot 982 virt/kvm/arm/mmu.c kvm_for_each_memslot(memslot, slots) memslot 983 virt/kvm/arm/mmu.c stage2_unmap_memslot(kvm, memslot); memslot 1543 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); memslot 1544 virt/kvm/arm/mmu.c phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; memslot 1545 virt/kvm/arm/mmu.c phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; memslot 1612 virt/kvm/arm/mmu.c static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, memslot 1620 virt/kvm/arm/mmu.c size = memslot->npages * PAGE_SIZE; memslot 1622 virt/kvm/arm/mmu.c gpa_start = memslot->base_gfn << PAGE_SHIFT; memslot 1624 virt/kvm/arm/mmu.c uaddr_start = memslot->userspace_addr; memslot 1670 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot, unsigned long hva, memslot 1683 virt/kvm/arm/mmu.c bool logging_active = memslot_is_logging(memslot); memslot 1707 virt/kvm/arm/mmu.c !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { memslot 1785 virt/kvm/arm/mmu.c if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && memslot 1912 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot; memslot 1954 virt/kvm/arm/mmu.c memslot = gfn_to_memslot(vcpu->kvm, gfn); memslot 1955 virt/kvm/arm/mmu.c hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); memslot 2000 virt/kvm/arm/mmu.c ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); memslot 2022 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot; memslot 2028 virt/kvm/arm/mmu.c kvm_for_each_memslot(memslot, slots) { memslot 2032 virt/kvm/arm/mmu.c hva_start = max(start, memslot->userspace_addr); memslot 2033 virt/kvm/arm/mmu.c hva_end = min(end, memslot->userspace_addr + memslot 2034 virt/kvm/arm/mmu.c (memslot->npages << PAGE_SHIFT)); memslot 2038 virt/kvm/arm/mmu.c gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; memslot 2277 virt/kvm/arm/mmu.c struct kvm_memory_slot *memslot, memslot 2294 virt/kvm/arm/mmu.c if (memslot->base_gfn + memslot->npages >= memslot 2342 virt/kvm/arm/mmu.c if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { memslot 2363 virt/kvm/arm/mmu.c stage2_flush_memslot(kvm, memslot); memslot 147 virt/kvm/kvm_main.c static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); memslot 583 virt/kvm/kvm_main.c static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) memslot 585 virt/kvm/kvm_main.c if (!memslot->dirty_bitmap) memslot 588 virt/kvm/kvm_main.c kvfree(memslot->dirty_bitmap); memslot 589 virt/kvm/kvm_main.c memslot->dirty_bitmap = NULL; memslot 608 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot; memslot 613 virt/kvm/kvm_main.c kvm_for_each_memslot(memslot, slots) memslot 614 virt/kvm/kvm_main.c kvm_free_memslot(kvm, memslot, NULL); memslot 862 virt/kvm/kvm_main.c static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) memslot 864 virt/kvm/kvm_main.c unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); memslot 866 virt/kvm/kvm_main.c memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); memslot 867 virt/kvm/kvm_main.c if (!memslot->dirty_bitmap) memslot 1179 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot; memslot 1190 virt/kvm/kvm_main.c memslot = id_to_memslot(slots, id); memslot 1191 virt/kvm/kvm_main.c if (!memslot->dirty_bitmap) memslot 1194 virt/kvm/kvm_main.c n = kvm_dirty_bitmap_bytes(memslot); memslot 1197 virt/kvm/kvm_main.c any = memslot->dirty_bitmap[i]; memslot 1199 virt/kvm/kvm_main.c if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) memslot 1235 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot; memslot 1247 virt/kvm/kvm_main.c memslot = id_to_memslot(slots, id); memslot 1249 virt/kvm/kvm_main.c dirty_bitmap = memslot->dirty_bitmap; memslot 1253 virt/kvm/kvm_main.c n = kvm_dirty_bitmap_bytes(memslot); memslot 1266 virt/kvm/kvm_main.c dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); memslot 1282 virt/kvm/kvm_main.c kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, memslot 1305 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot; memslot 1321 virt/kvm/kvm_main.c memslot = id_to_memslot(slots, id); memslot 1323 virt/kvm/kvm_main.c dirty_bitmap = memslot->dirty_bitmap; memslot 1329 virt/kvm/kvm_main.c if (log->first_page > memslot->npages || memslot 1330 virt/kvm/kvm_main.c log->num_pages > memslot->npages - log->first_page || memslot 1331 virt/kvm/kvm_main.c (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) memslot 1335 virt/kvm/kvm_main.c dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); memslot 1358 virt/kvm/kvm_main.c kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, memslot 1393 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); memslot 1395 virt/kvm/kvm_main.c if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || memslot 1396 virt/kvm/kvm_main.c memslot->flags & KVM_MEMSLOT_INVALID) memslot 1913 virt/kvm/kvm_main.c static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot, memslot 1938 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, map->gfn); memslot 2149 virt/kvm/kvm_main.c static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, memslot 2155 virt/kvm/kvm_main.c addr = gfn_to_hva_memslot(memslot, gfn); memslot 2161 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, gfn); memslot 2246 virt/kvm/kvm_main.c ghc->memslot = __gfn_to_memslot(slots, start_gfn); memslot 2247 virt/kvm/kvm_main.c ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, memslot 2258 virt/kvm/kvm_main.c ghc->memslot = NULL; memslot 2287 virt/kvm/kvm_main.c if (unlikely(!ghc->memslot)) memslot 2293 virt/kvm/kvm_main.c mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT); memslot 2320 virt/kvm/kvm_main.c if (unlikely(!ghc->memslot)) memslot 2358 virt/kvm/kvm_main.c static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, memslot 2361 virt/kvm/kvm_main.c if (memslot && memslot->dirty_bitmap) { memslot 2362 virt/kvm/kvm_main.c unsigned long rel_gfn = gfn - memslot->base_gfn; memslot 2364 virt/kvm/kvm_main.c set_bit_le(rel_gfn, memslot->dirty_bitmap); memslot 2370 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot; memslot 2372 virt/kvm/kvm_main.c memslot = gfn_to_memslot(kvm, gfn); memslot 2373 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, gfn); memslot 2379 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot; memslot 2381 virt/kvm/kvm_main.c memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); memslot 2382 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, gfn);