Home
last modified time | relevance | path

Searched refs:memslot (Results 1 – 25 of 25) sorted by relevance

/linux-4.1.27/arch/powerpc/kvm/
Dbook3s_64_mmu_hv.c174 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument
188 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
444 struct kvm_memory_slot *memslot; in kvmppc_book3s_hv_page_fault() local
484 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
486 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault()
489 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault()
497 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
512 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
593 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
649 struct kvm_memory_slot *memslot; in kvmppc_rmap_reset() local
[all …]
Dbook3s_hv_rm_mmu.c107 struct kvm_memory_slot *memslot; in remove_revmap_chain() local
114 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in remove_revmap_chain()
115 if (!memslot) in remove_revmap_chain()
118 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in remove_revmap_chain()
146 struct kvm_memory_slot *memslot; in kvmppc_do_h_enter() local
170 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_do_h_enter()
174 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { in kvmppc_do_h_enter()
182 if (!slot_is_aligned(memslot, psize)) in kvmppc_do_h_enter()
184 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
185 rmap = &memslot->arch.rmap[slot_fn]; in kvmppc_do_h_enter()
[all …]
Dtrace_hv.h275 struct kvm_memory_slot *memslot, unsigned long ea,
278 TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
298 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
299 __entry->slot_flags = memslot ? memslot->flags : 0;
Dbook3s_pr.c257 struct kvm_memory_slot *memslot; in do_kvm_unmap_hva() local
260 kvm_for_each_memslot(memslot, slots) { in do_kvm_unmap_hva()
264 hva_start = max(start, memslot->userspace_addr); in do_kvm_unmap_hva()
265 hva_end = min(end, memslot->userspace_addr + in do_kvm_unmap_hva()
266 (memslot->npages << PAGE_SHIFT)); in do_kvm_unmap_hva()
273 gfn = hva_to_gfn_memslot(hva_start, memslot); in do_kvm_unmap_hva()
274 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in do_kvm_unmap_hva()
1533 struct kvm_memory_slot *memslot; in kvm_vm_ioctl_get_dirty_log_pr() local
1548 memslot = id_to_memslot(kvm->memslots, log->slot); in kvm_vm_ioctl_get_dirty_log_pr()
1550 ga = memslot->base_gfn << PAGE_SHIFT; in kvm_vm_ioctl_get_dirty_log_pr()
[all …]
Dbook3s.h16 struct kvm_memory_slot *memslot);
Dbook3s_hv.c2330 struct kvm_memory_slot *memslot; in kvm_vm_ioctl_get_dirty_log_hv() local
2340 memslot = id_to_memslot(kvm->memslots, log->slot); in kvm_vm_ioctl_get_dirty_log_hv()
2342 if (!memslot->dirty_bitmap) in kvm_vm_ioctl_get_dirty_log_hv()
2345 n = kvm_dirty_bitmap_bytes(memslot); in kvm_vm_ioctl_get_dirty_log_hv()
2346 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log_hv()
2348 r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap); in kvm_vm_ioctl_get_dirty_log_hv()
2353 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) in kvm_vm_ioctl_get_dirty_log_hv()
2382 struct kvm_memory_slot *memslot, in kvmppc_core_prepare_memory_region_hv() argument
2393 struct kvm_memory_slot *memslot; in kvmppc_core_commit_memory_region_hv() local
2402 memslot = id_to_memslot(kvm->memslots, mem->slot); in kvmppc_core_commit_memory_region_hv()
[all …]
Dbook3s.c753 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvmppc_core_flush_memslot() argument
755 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); in kvmppc_core_flush_memslot()
759 struct kvm_memory_slot *memslot, in kvmppc_core_prepare_memory_region() argument
762 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); in kvmppc_core_prepare_memory_region()
Dpowerpc.c597 struct kvm_memory_slot *memslot, in kvm_arch_prepare_memory_region() argument
601 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); in kvm_arch_prepare_memory_region()
Dbooke.c1786 struct kvm_memory_slot *memslot, in kvmppc_core_prepare_memory_region() argument
1798 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvmppc_core_flush_memslot() argument
/linux-4.1.27/arch/arm/kvm/
Dmmu.c53 static bool memslot_is_logging(struct kvm_memory_slot *memslot) in memslot_is_logging() argument
55 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging()
355 struct kvm_memory_slot *memslot) in stage2_flush_memslot() argument
357 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
358 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
379 struct kvm_memory_slot *memslot; in stage2_flush_vm() local
386 kvm_for_each_memslot(memslot, slots) in stage2_flush_vm()
387 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
745 struct kvm_memory_slot *memslot) in stage2_unmap_memslot() argument
747 hva_t hva = memslot->userspace_addr; in stage2_unmap_memslot()
[all …]
/linux-4.1.27/arch/x86/kvm/
Diommu.c155 struct kvm_memory_slot *memslot; in kvm_iommu_map_memslots() local
163 kvm_for_each_memslot(memslot, slots) { in kvm_iommu_map_memslots()
164 r = kvm_iommu_map_pages(kvm, memslot); in kvm_iommu_map_memslots()
323 struct kvm_memory_slot *memslot; in kvm_iommu_unmap_memslots() local
328 kvm_for_each_memslot(memslot, slots) in kvm_iommu_unmap_memslots()
329 kvm_iommu_unmap_pages(kvm, memslot); in kvm_iommu_unmap_memslots()
Dmmu.c1418 struct kvm_memory_slot *memslot; in kvm_handle_hva_range() local
1422 kvm_for_each_memslot(memslot, slots) { in kvm_handle_hva_range()
1426 hva_start = max(start, memslot->userspace_addr); in kvm_handle_hva_range()
1427 hva_end = min(end, memslot->userspace_addr + in kvm_handle_hva_range()
1428 (memslot->npages << PAGE_SHIFT)); in kvm_handle_hva_range()
1435 gfn_start = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range()
1436 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in kvm_handle_hva_range()
1448 idx = gfn_to_index(gfn_start, memslot->base_gfn, j); in kvm_handle_hva_range()
1449 idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j); in kvm_handle_hva_range()
1451 rmapp = __gfn_to_rmap(gfn_start, j, memslot); in kvm_handle_hva_range()
[all …]
Dx86.c7602 struct kvm_memory_slot *memslot, in kvm_arch_prepare_memory_region() argument
7610 if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { in kvm_arch_prepare_memory_region()
7617 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, in kvm_arch_prepare_memory_region()
7624 memslot->userspace_addr = userspace_addr; in kvm_arch_prepare_memory_region()
Dvmx.c10162 struct kvm_memory_slot *memslot, in vmx_enable_log_dirty_pt_masked() argument
10165 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); in vmx_enable_log_dirty_pt_masked()
/linux-4.1.27/virt/kvm/
Dkvm_main.c107 struct kvm_memory_slot *memslot, gfn_t gfn);
544 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) in kvm_destroy_dirty_bitmap() argument
546 if (!memslot->dirty_bitmap) in kvm_destroy_dirty_bitmap()
549 kvfree(memslot->dirty_bitmap); in kvm_destroy_dirty_bitmap()
550 memslot->dirty_bitmap = NULL; in kvm_destroy_dirty_bitmap()
570 struct kvm_memory_slot *memslot; in kvm_free_physmem() local
572 kvm_for_each_memslot(memslot, slots) in kvm_free_physmem()
573 kvm_free_physmem_slot(kvm, memslot, NULL); in kvm_free_physmem()
647 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) in kvm_create_dirty_bitmap() argument
649 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); in kvm_create_dirty_bitmap()
[all …]
/linux-4.1.27/arch/powerpc/include/asm/
Dkvm_book3s_64.h381 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, in slot_is_aligned() argument
388 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned()
Dkvm_ppc.h164 struct kvm_memory_slot *memslot, unsigned long porder);
184 struct kvm_memory_slot *memslot,
192 struct kvm_memory_slot *memslot);
243 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
245 struct kvm_memory_slot *memslot,
Dkvm_book3s.h176 struct kvm_memory_slot *memslot, unsigned long *map);
/linux-4.1.27/include/linux/
Dkvm_host.h294 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) in kvm_dirty_bitmap_bytes() argument
296 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; in kvm_dirty_bitmap_bytes()
437 #define kvm_for_each_memslot(memslot, slots) \ argument
438 for (memslot = &slots->memslots[0]; \
439 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
440 memslot++)
523 struct kvm_memory_slot *memslot,
Dkvm_types.h62 struct kvm_memory_slot *memslot; member
/linux-4.1.27/arch/mips/kvm/
Dmips.c200 struct kvm_memory_slot *memslot, in kvm_arch_prepare_memory_region() argument
974 struct kvm_memory_slot *memslot; in kvm_vm_ioctl_get_dirty_log() local
988 memslot = id_to_memslot(kvm->memslots, log->slot); in kvm_vm_ioctl_get_dirty_log()
990 ga = memslot->base_gfn << PAGE_SHIFT; in kvm_vm_ioctl_get_dirty_log()
991 ga_end = ga + (memslot->npages << PAGE_SHIFT); in kvm_vm_ioctl_get_dirty_log()
996 n = kvm_dirty_bitmap_bytes(memslot); in kvm_vm_ioctl_get_dirty_log()
997 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
/linux-4.1.27/arch/ia64/include/asm/sn/
Dgeo.h62 char memslot; /* The memory slot on the bus */ member
/linux-4.1.27/arch/s390/kvm/
Dkvm-s390.c212 struct kvm_memory_slot *memslot) in kvm_s390_sync_dirty_log() argument
220 last_gfn = memslot->base_gfn + memslot->npages; in kvm_s390_sync_dirty_log()
221 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { in kvm_s390_sync_dirty_log()
222 address = gfn_to_hva_memslot(memslot, cur_gfn); in kvm_s390_sync_dirty_log()
239 struct kvm_memory_slot *memslot; in kvm_vm_ioctl_get_dirty_log() local
248 memslot = id_to_memslot(kvm->memslots, log->slot); in kvm_vm_ioctl_get_dirty_log()
250 if (!memslot->dirty_bitmap) in kvm_vm_ioctl_get_dirty_log()
253 kvm_s390_sync_dirty_log(kvm, memslot); in kvm_vm_ioctl_get_dirty_log()
260 n = kvm_dirty_bitmap_bytes(memslot); in kvm_vm_ioctl_get_dirty_log()
261 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
[all …]
/linux-4.1.27/arch/x86/include/asm/
Dkvm_host.h872 struct kvm_memory_slot *memslot);
874 struct kvm_memory_slot *memslot);
876 struct kvm_memory_slot *memslot);
878 struct kvm_memory_slot *memslot);
880 struct kvm_memory_slot *memslot);
/linux-4.1.27/Documentation/virtual/kvm/
Dmmu.txt420 information in leaf sptes. When a new memslot is added or an existing
421 memslot is changed, this information may become stale and needs to be
446 memslot update, while some SRCU readers might be using the old copy. We do not