Lines Matching refs:kvm

106 static void mark_page_dirty_in_slot(struct kvm *kvm,
151 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument
161 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_all_cpus_request()
184 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
186 long dirty_count = kvm->tlbs_dirty; in kvm_flush_remote_tlbs()
189 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs()
190 ++kvm->stat.remote_tlb_flush; in kvm_flush_remote_tlbs()
191 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); in kvm_flush_remote_tlbs()
196 void kvm_reload_remote_mmus(struct kvm *kvm) in kvm_reload_remote_mmus() argument
198 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); in kvm_reload_remote_mmus()
201 void kvm_make_mclock_inprogress_request(struct kvm *kvm) in kvm_make_mclock_inprogress_request() argument
203 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); in kvm_make_mclock_inprogress_request()
206 void kvm_make_scan_ioapic_request(struct kvm *kvm) in kvm_make_scan_ioapic_request() argument
208 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); in kvm_make_scan_ioapic_request()
211 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
218 vcpu->kvm = kvm; in kvm_vcpu_init()
256 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) in mmu_notifier_to_kvm()
258 return container_of(mn, struct kvm, mmu_notifier); in mmu_notifier_to_kvm()
265 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_page() local
286 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_invalidate_page()
287 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_page()
289 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_invalidate_page()
290 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; in kvm_mmu_notifier_invalidate_page()
293 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_invalidate_page()
295 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_page()
297 kvm_arch_mmu_notifier_invalidate_page(kvm, address); in kvm_mmu_notifier_invalidate_page()
299 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_invalidate_page()
307 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_change_pte() local
310 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_change_pte()
311 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_change_pte()
312 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_change_pte()
313 kvm_set_spte_hva(kvm, address, pte); in kvm_mmu_notifier_change_pte()
314 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_change_pte()
315 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_change_pte()
323 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_start() local
326 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_invalidate_range_start()
327 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_start()
333 kvm->mmu_notifier_count++; in kvm_mmu_notifier_invalidate_range_start()
334 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); in kvm_mmu_notifier_invalidate_range_start()
335 need_tlb_flush |= kvm->tlbs_dirty; in kvm_mmu_notifier_invalidate_range_start()
338 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_invalidate_range_start()
340 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_start()
341 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_invalidate_range_start()
349 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_end() local
351 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_end()
357 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_invalidate_range_end()
364 kvm->mmu_notifier_count--; in kvm_mmu_notifier_invalidate_range_end()
365 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_end()
367 BUG_ON(kvm->mmu_notifier_count < 0); in kvm_mmu_notifier_invalidate_range_end()
375 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_clear_flush_young() local
378 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_clear_flush_young()
379 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_flush_young()
381 young = kvm_age_hva(kvm, start, end); in kvm_mmu_notifier_clear_flush_young()
383 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_clear_flush_young()
385 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_flush_young()
386 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_clear_flush_young()
395 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_test_young() local
398 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_test_young()
399 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_test_young()
400 young = kvm_test_age_hva(kvm, address); in kvm_mmu_notifier_test_young()
401 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_test_young()
402 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_test_young()
410 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_release() local
413 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_release()
414 kvm_arch_flush_shadow_all(kvm); in kvm_mmu_notifier_release()
415 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_release()
428 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
430 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; in kvm_init_mmu_notifier()
431 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); in kvm_init_mmu_notifier()
436 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
443 static void kvm_init_memslots_id(struct kvm *kvm) in kvm_init_memslots_id() argument
446 struct kvm_memslots *slots = kvm->memslots; in kvm_init_memslots_id()
452 static struct kvm *kvm_create_vm(unsigned long type) in kvm_create_vm()
455 struct kvm *kvm = kvm_arch_alloc_vm(); in kvm_create_vm() local
457 if (!kvm) in kvm_create_vm()
460 spin_lock_init(&kvm->mmu_lock); in kvm_create_vm()
462 kvm->mm = current->mm; in kvm_create_vm()
463 kvm_eventfd_init(kvm); in kvm_create_vm()
464 mutex_init(&kvm->lock); in kvm_create_vm()
465 mutex_init(&kvm->irq_lock); in kvm_create_vm()
466 mutex_init(&kvm->slots_lock); in kvm_create_vm()
467 atomic_set(&kvm->users_count, 1); in kvm_create_vm()
468 INIT_LIST_HEAD(&kvm->devices); in kvm_create_vm()
470 r = kvm_arch_init_vm(kvm, type); in kvm_create_vm()
479 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); in kvm_create_vm()
485 kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots)); in kvm_create_vm()
486 if (!kvm->memslots) in kvm_create_vm()
493 kvm->memslots->generation = -150; in kvm_create_vm()
495 kvm_init_memslots_id(kvm); in kvm_create_vm()
496 if (init_srcu_struct(&kvm->srcu)) in kvm_create_vm()
498 if (init_srcu_struct(&kvm->irq_srcu)) in kvm_create_vm()
501 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), in kvm_create_vm()
503 if (!kvm->buses[i]) in kvm_create_vm()
507 r = kvm_init_mmu_notifier(kvm); in kvm_create_vm()
512 list_add(&kvm->vm_list, &vm_list); in kvm_create_vm()
515 return kvm; in kvm_create_vm()
518 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_create_vm()
520 cleanup_srcu_struct(&kvm->srcu); in kvm_create_vm()
525 kfree(kvm->buses[i]); in kvm_create_vm()
526 kvfree(kvm->memslots); in kvm_create_vm()
527 kvm_arch_free_vm(kvm); in kvm_create_vm()
556 static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free, in kvm_free_physmem_slot() argument
562 kvm_arch_free_memslot(kvm, free, dont); in kvm_free_physmem_slot()
567 static void kvm_free_physmem(struct kvm *kvm) in kvm_free_physmem() argument
569 struct kvm_memslots *slots = kvm->memslots; in kvm_free_physmem()
573 kvm_free_physmem_slot(kvm, memslot, NULL); in kvm_free_physmem()
575 kvfree(kvm->memslots); in kvm_free_physmem()
578 static void kvm_destroy_devices(struct kvm *kvm) in kvm_destroy_devices() argument
582 list_for_each_safe(node, tmp, &kvm->devices) { in kvm_destroy_devices()
591 static void kvm_destroy_vm(struct kvm *kvm) in kvm_destroy_vm() argument
594 struct mm_struct *mm = kvm->mm; in kvm_destroy_vm()
596 kvm_arch_sync_events(kvm); in kvm_destroy_vm()
598 list_del(&kvm->vm_list); in kvm_destroy_vm()
600 kvm_free_irq_routing(kvm); in kvm_destroy_vm()
602 kvm_io_bus_destroy(kvm->buses[i]); in kvm_destroy_vm()
603 kvm_coalesced_mmio_free(kvm); in kvm_destroy_vm()
605 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); in kvm_destroy_vm()
607 kvm_arch_flush_shadow_all(kvm); in kvm_destroy_vm()
609 kvm_arch_destroy_vm(kvm); in kvm_destroy_vm()
610 kvm_destroy_devices(kvm); in kvm_destroy_vm()
611 kvm_free_physmem(kvm); in kvm_destroy_vm()
612 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_destroy_vm()
613 cleanup_srcu_struct(&kvm->srcu); in kvm_destroy_vm()
614 kvm_arch_free_vm(kvm); in kvm_destroy_vm()
619 void kvm_get_kvm(struct kvm *kvm) in kvm_get_kvm() argument
621 atomic_inc(&kvm->users_count); in kvm_get_kvm()
625 void kvm_put_kvm(struct kvm *kvm) in kvm_put_kvm() argument
627 if (atomic_dec_and_test(&kvm->users_count)) in kvm_put_kvm()
628 kvm_destroy_vm(kvm); in kvm_put_kvm()
635 struct kvm *kvm = filp->private_data; in kvm_vm_release() local
637 kvm_irqfd_release(kvm); in kvm_vm_release()
639 kvm_put_kvm(kvm); in kvm_vm_release()
729 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, in install_new_memslots() argument
732 struct kvm_memslots *old_memslots = kvm->memslots; in install_new_memslots()
741 rcu_assign_pointer(kvm->memslots, slots); in install_new_memslots()
742 synchronize_srcu_expedited(&kvm->srcu); in install_new_memslots()
751 kvm_arch_memslots_updated(kvm); in install_new_memslots()
764 int __kvm_set_memory_region(struct kvm *kvm, in __kvm_set_memory_region() argument
797 slot = id_to_memslot(kvm->memslots, mem->slot); in __kvm_set_memory_region()
840 kvm_for_each_memslot(slot, kvm->memslots) { in __kvm_set_memory_region()
858 if (kvm_arch_create_memslot(kvm, &new, npages)) in __kvm_set_memory_region()
871 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); in __kvm_set_memory_region()
877 old_memslots = install_new_memslots(kvm, slots); in __kvm_set_memory_region()
880 kvm_iommu_unmap_pages(kvm, &old); in __kvm_set_memory_region()
888 kvm_arch_flush_shadow_memslot(kvm, slot); in __kvm_set_memory_region()
898 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); in __kvm_set_memory_region()
909 old_memslots = install_new_memslots(kvm, slots); in __kvm_set_memory_region()
911 kvm_arch_commit_memory_region(kvm, mem, &old, change); in __kvm_set_memory_region()
913 kvm_free_physmem_slot(kvm, &old, &new); in __kvm_set_memory_region()
926 r = kvm_iommu_map_pages(kvm, &new); in __kvm_set_memory_region()
935 kvm_free_physmem_slot(kvm, &new, &old); in __kvm_set_memory_region()
941 int kvm_set_memory_region(struct kvm *kvm, in kvm_set_memory_region() argument
946 mutex_lock(&kvm->slots_lock); in kvm_set_memory_region()
947 r = __kvm_set_memory_region(kvm, mem); in kvm_set_memory_region()
948 mutex_unlock(&kvm->slots_lock); in kvm_set_memory_region()
953 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, in kvm_vm_ioctl_set_memory_region() argument
958 return kvm_set_memory_region(kvm, mem); in kvm_vm_ioctl_set_memory_region()
961 int kvm_get_dirty_log(struct kvm *kvm, in kvm_get_dirty_log() argument
973 memslot = id_to_memslot(kvm->memslots, log->slot); in kvm_get_dirty_log()
1019 int kvm_get_dirty_log_protect(struct kvm *kvm, in kvm_get_dirty_log_protect() argument
1032 memslot = id_to_memslot(kvm->memslots, log->slot); in kvm_get_dirty_log_protect()
1044 spin_lock(&kvm->mmu_lock); in kvm_get_dirty_log_protect()
1060 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_get_dirty_log_protect()
1065 spin_unlock(&kvm->mmu_lock); in kvm_get_dirty_log_protect()
1089 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
1091 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
1095 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
1097 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
1107 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size() argument
1114 addr = gfn_to_hva(kvm, gfn); in kvm_host_page_size()
1164 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
1166 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
1185 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
1187 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
1381 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, in __gfn_to_pfn() argument
1389 slot = gfn_to_memslot(kvm, gfn); in __gfn_to_pfn()
1395 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn_atomic() argument
1397 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); in gfn_to_pfn_atomic()
1401 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, in gfn_to_pfn_async() argument
1404 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); in gfn_to_pfn_async()
1408 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn() argument
1410 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); in gfn_to_pfn()
1414 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, in gfn_to_pfn_prot() argument
1417 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); in gfn_to_pfn_prot()
1432 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, in gfn_to_page_many_atomic() argument
1438 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); in gfn_to_page_many_atomic()
1462 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument
1466 pfn = gfn_to_pfn(kvm, gfn); in gfn_to_page()
1534 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
1540 addr = gfn_to_hva_prot(kvm, gfn, NULL); in kvm_read_guest_page()
1550 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) in kvm_read_guest() argument
1558 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
1570 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, in kvm_read_guest_atomic() argument
1578 addr = gfn_to_hva_prot(kvm, gfn, NULL); in kvm_read_guest_atomic()
1590 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, in kvm_write_guest_page() argument
1596 addr = gfn_to_hva(kvm, gfn); in kvm_write_guest_page()
1602 mark_page_dirty(kvm, gfn); in kvm_write_guest_page()
1607 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, in kvm_write_guest() argument
1616 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
1628 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_gfn_to_hva_cache_init() argument
1631 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_gfn_to_hva_cache_init()
1641 ghc->memslot = gfn_to_memslot(kvm, start_gfn); in kvm_gfn_to_hva_cache_init()
1651 ghc->memslot = gfn_to_memslot(kvm, start_gfn); in kvm_gfn_to_hva_cache_init()
1665 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_cached() argument
1668 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_write_guest_cached()
1674 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); in kvm_write_guest_cached()
1677 return kvm_write_guest(kvm, ghc->gpa, data, len); in kvm_write_guest_cached()
1685 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); in kvm_write_guest_cached()
1691 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_cached() argument
1694 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_read_guest_cached()
1700 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); in kvm_read_guest_cached()
1703 return kvm_read_guest(kvm, ghc->gpa, data, len); in kvm_read_guest_cached()
1716 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) in kvm_clear_guest_page() argument
1720 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest_page()
1724 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) in kvm_clear_guest() argument
1732 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); in kvm_clear_guest()
1743 static void mark_page_dirty_in_slot(struct kvm *kvm, in mark_page_dirty_in_slot() argument
1754 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
1758 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
1759 mark_page_dirty_in_slot(kvm, memslot, gfn); in mark_page_dirty()
1907 struct kvm *kvm = me->kvm; in kvm_vcpu_on_spin() local
1909 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; in kvm_vcpu_on_spin()
1924 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vcpu_on_spin()
1941 kvm->last_boosted_vcpu = i; in kvm_vcpu_on_spin()
1970 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
1993 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
2018 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) in kvm_vm_ioctl_create_vcpu() argument
2026 vcpu = kvm_arch_vcpu_create(kvm, id); in kvm_vm_ioctl_create_vcpu()
2036 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2041 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { in kvm_vm_ioctl_create_vcpu()
2046 kvm_for_each_vcpu(r, v, kvm) in kvm_vm_ioctl_create_vcpu()
2052 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); in kvm_vm_ioctl_create_vcpu()
2055 kvm_get_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
2058 kvm_put_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
2062 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; in kvm_vm_ioctl_create_vcpu()
2064 atomic_inc(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
2066 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2071 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2097 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_ioctl()
2299 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_compat_ioctl()
2375 struct kvm *kvm = dev->kvm; in kvm_device_release() local
2377 kvm_put_kvm(kvm); in kvm_device_release()
2426 static int kvm_ioctl_create_device(struct kvm *kvm, in kvm_ioctl_create_device() argument
2449 dev->kvm = kvm; in kvm_ioctl_create_device()
2463 list_add(&dev->vm_node, &kvm->devices); in kvm_ioctl_create_device()
2464 kvm_get_kvm(kvm); in kvm_ioctl_create_device()
2469 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) in kvm_vm_ioctl_check_extension_generic() argument
2495 return kvm_vm_ioctl_check_extension(kvm, arg); in kvm_vm_ioctl_check_extension_generic()
2501 struct kvm *kvm = filp->private_data; in kvm_vm_ioctl() local
2505 if (kvm->mm != current->mm) in kvm_vm_ioctl()
2509 r = kvm_vm_ioctl_create_vcpu(kvm, arg); in kvm_vm_ioctl()
2519 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); in kvm_vm_ioctl()
2528 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_ioctl()
2538 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
2547 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
2557 r = kvm_irqfd(kvm, &data); in kvm_vm_ioctl()
2566 r = kvm_ioeventfd(kvm, &data); in kvm_vm_ioctl()
2572 mutex_lock(&kvm->lock); in kvm_vm_ioctl()
2573 if (atomic_read(&kvm->online_vcpus) != 0) in kvm_vm_ioctl()
2576 kvm->bsp_vcpu_id = arg; in kvm_vm_ioctl()
2577 mutex_unlock(&kvm->lock); in kvm_vm_ioctl()
2587 r = kvm_send_userspace_msi(kvm, &msi); in kvm_vm_ioctl()
2600 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, in kvm_vm_ioctl()
2638 r = kvm_set_irq_routing(kvm, entries, routing.nr, in kvm_vm_ioctl()
2652 r = kvm_ioctl_create_device(kvm, &cd); in kvm_vm_ioctl()
2664 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); in kvm_vm_ioctl()
2686 struct kvm *kvm = filp->private_data; in kvm_vm_compat_ioctl() local
2689 if (kvm->mm != current->mm) in kvm_vm_compat_ioctl()
2705 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
2729 struct kvm *kvm; in kvm_dev_ioctl_create_vm() local
2731 kvm = kvm_create_vm(type); in kvm_dev_ioctl_create_vm()
2732 if (IS_ERR(kvm)) in kvm_dev_ioctl_create_vm()
2733 return PTR_ERR(kvm); in kvm_dev_ioctl_create_vm()
2735 r = kvm_coalesced_mmio_init(kvm); in kvm_dev_ioctl_create_vm()
2737 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
2741 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); in kvm_dev_ioctl_create_vm()
2743 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
3038 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
3055 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
3105 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
3112 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
3117 bus = kvm->buses[bus_idx]; in kvm_io_bus_register_dev()
3129 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_register_dev()
3130 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_register_dev()
3137 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_unregister_dev() argument
3143 bus = kvm->buses[bus_idx]; in kvm_io_bus_unregister_dev()
3164 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_unregister_dev()
3165 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_unregister_dev()
3177 struct kvm *kvm; in vm_stat_get() local
3181 list_for_each_entry(kvm, &vm_list, vm_list) in vm_stat_get()
3182 *val += *(u32 *)((void *)kvm + offset); in vm_stat_get()
3192 struct kvm *kvm; in vcpu_stat_get() local
3198 list_for_each_entry(kvm, &vm_list, vm_list) in vcpu_stat_get()
3199 kvm_for_each_vcpu(i, vcpu, kvm) in vcpu_stat_get()