Lines Matching refs:kvm

159 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)  in kvm_make_all_cpus_request()  argument
169 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_all_cpus_request()
192 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
194 long dirty_count = kvm->tlbs_dirty; in kvm_flush_remote_tlbs()
197 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs()
198 ++kvm->stat.remote_tlb_flush; in kvm_flush_remote_tlbs()
199 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); in kvm_flush_remote_tlbs()
204 void kvm_reload_remote_mmus(struct kvm *kvm) in kvm_reload_remote_mmus() argument
206 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); in kvm_reload_remote_mmus()
209 void kvm_make_mclock_inprogress_request(struct kvm *kvm) in kvm_make_mclock_inprogress_request() argument
211 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); in kvm_make_mclock_inprogress_request()
214 void kvm_make_scan_ioapic_request(struct kvm *kvm) in kvm_make_scan_ioapic_request() argument
216 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); in kvm_make_scan_ioapic_request()
219 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
226 vcpu->kvm = kvm; in kvm_vcpu_init()
268 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) in mmu_notifier_to_kvm()
270 return container_of(mn, struct kvm, mmu_notifier); in mmu_notifier_to_kvm()
277 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_page() local
298 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_invalidate_page()
299 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_page()
301 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_invalidate_page()
302 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; in kvm_mmu_notifier_invalidate_page()
305 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_invalidate_page()
307 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_page()
309 kvm_arch_mmu_notifier_invalidate_page(kvm, address); in kvm_mmu_notifier_invalidate_page()
311 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_invalidate_page()
319 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_change_pte() local
322 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_change_pte()
323 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_change_pte()
324 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_change_pte()
325 kvm_set_spte_hva(kvm, address, pte); in kvm_mmu_notifier_change_pte()
326 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_change_pte()
327 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_change_pte()
335 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_start() local
338 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_invalidate_range_start()
339 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_start()
345 kvm->mmu_notifier_count++; in kvm_mmu_notifier_invalidate_range_start()
346 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); in kvm_mmu_notifier_invalidate_range_start()
347 need_tlb_flush |= kvm->tlbs_dirty; in kvm_mmu_notifier_invalidate_range_start()
350 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_invalidate_range_start()
352 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_start()
353 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_invalidate_range_start()
361 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_end() local
363 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_end()
369 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_invalidate_range_end()
376 kvm->mmu_notifier_count--; in kvm_mmu_notifier_invalidate_range_end()
377 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_end()
379 BUG_ON(kvm->mmu_notifier_count < 0); in kvm_mmu_notifier_invalidate_range_end()
387 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_clear_flush_young() local
390 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_clear_flush_young()
391 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_flush_young()
393 young = kvm_age_hva(kvm, start, end); in kvm_mmu_notifier_clear_flush_young()
395 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_clear_flush_young()
397 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_flush_young()
398 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_clear_flush_young()
408 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_clear_young() local
411 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_clear_young()
412 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_young()
426 young = kvm_age_hva(kvm, start, end); in kvm_mmu_notifier_clear_young()
427 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_young()
428 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_clear_young()
437 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_test_young() local
440 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_test_young()
441 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_test_young()
442 young = kvm_test_age_hva(kvm, address); in kvm_mmu_notifier_test_young()
443 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_test_young()
444 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_test_young()
452 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_release() local
455 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_release()
456 kvm_arch_flush_shadow_all(kvm); in kvm_mmu_notifier_release()
457 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_release()
471 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
473 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; in kvm_init_mmu_notifier()
474 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); in kvm_init_mmu_notifier()
479 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
518 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, in kvm_free_memslot() argument
524 kvm_arch_free_memslot(kvm, free, dont); in kvm_free_memslot()
529 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) in kvm_free_memslots() argument
537 kvm_free_memslot(kvm, memslot, NULL); in kvm_free_memslots()
542 static struct kvm *kvm_create_vm(unsigned long type) in kvm_create_vm()
545 struct kvm *kvm = kvm_arch_alloc_vm(); in kvm_create_vm() local
547 if (!kvm) in kvm_create_vm()
550 spin_lock_init(&kvm->mmu_lock); in kvm_create_vm()
552 kvm->mm = current->mm; in kvm_create_vm()
553 kvm_eventfd_init(kvm); in kvm_create_vm()
554 mutex_init(&kvm->lock); in kvm_create_vm()
555 mutex_init(&kvm->irq_lock); in kvm_create_vm()
556 mutex_init(&kvm->slots_lock); in kvm_create_vm()
557 atomic_set(&kvm->users_count, 1); in kvm_create_vm()
558 INIT_LIST_HEAD(&kvm->devices); in kvm_create_vm()
560 r = kvm_arch_init_vm(kvm, type); in kvm_create_vm()
569 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); in kvm_create_vm()
576 kvm->memslots[i] = kvm_alloc_memslots(); in kvm_create_vm()
577 if (!kvm->memslots[i]) in kvm_create_vm()
581 if (init_srcu_struct(&kvm->srcu)) in kvm_create_vm()
583 if (init_srcu_struct(&kvm->irq_srcu)) in kvm_create_vm()
586 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), in kvm_create_vm()
588 if (!kvm->buses[i]) in kvm_create_vm()
592 r = kvm_init_mmu_notifier(kvm); in kvm_create_vm()
597 list_add(&kvm->vm_list, &vm_list); in kvm_create_vm()
602 return kvm; in kvm_create_vm()
605 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_create_vm()
607 cleanup_srcu_struct(&kvm->srcu); in kvm_create_vm()
612 kfree(kvm->buses[i]); in kvm_create_vm()
614 kvm_free_memslots(kvm, kvm->memslots[i]); in kvm_create_vm()
615 kvm_arch_free_vm(kvm); in kvm_create_vm()
632 static void kvm_destroy_devices(struct kvm *kvm) in kvm_destroy_devices() argument
636 list_for_each_safe(node, tmp, &kvm->devices) { in kvm_destroy_devices()
645 static void kvm_destroy_vm(struct kvm *kvm) in kvm_destroy_vm() argument
648 struct mm_struct *mm = kvm->mm; in kvm_destroy_vm()
650 kvm_arch_sync_events(kvm); in kvm_destroy_vm()
652 list_del(&kvm->vm_list); in kvm_destroy_vm()
654 kvm_free_irq_routing(kvm); in kvm_destroy_vm()
656 kvm_io_bus_destroy(kvm->buses[i]); in kvm_destroy_vm()
657 kvm_coalesced_mmio_free(kvm); in kvm_destroy_vm()
659 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); in kvm_destroy_vm()
661 kvm_arch_flush_shadow_all(kvm); in kvm_destroy_vm()
663 kvm_arch_destroy_vm(kvm); in kvm_destroy_vm()
664 kvm_destroy_devices(kvm); in kvm_destroy_vm()
666 kvm_free_memslots(kvm, kvm->memslots[i]); in kvm_destroy_vm()
667 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_destroy_vm()
668 cleanup_srcu_struct(&kvm->srcu); in kvm_destroy_vm()
669 kvm_arch_free_vm(kvm); in kvm_destroy_vm()
675 void kvm_get_kvm(struct kvm *kvm) in kvm_get_kvm() argument
677 atomic_inc(&kvm->users_count); in kvm_get_kvm()
681 void kvm_put_kvm(struct kvm *kvm) in kvm_put_kvm() argument
683 if (atomic_dec_and_test(&kvm->users_count)) in kvm_put_kvm()
684 kvm_destroy_vm(kvm); in kvm_put_kvm()
691 struct kvm *kvm = filp->private_data; in kvm_vm_release() local
693 kvm_irqfd_release(kvm); in kvm_vm_release()
695 kvm_put_kvm(kvm); in kvm_vm_release()
783 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, in install_new_memslots() argument
786 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); in install_new_memslots()
795 rcu_assign_pointer(kvm->memslots[as_id], slots); in install_new_memslots()
796 synchronize_srcu_expedited(&kvm->srcu); in install_new_memslots()
805 kvm_arch_memslots_updated(kvm, slots); in install_new_memslots()
818 int __kvm_set_memory_region(struct kvm *kvm, in __kvm_set_memory_region() argument
855 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); in __kvm_set_memory_region()
899 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { in __kvm_set_memory_region()
917 if (kvm_arch_create_memslot(kvm, &new, npages)) in __kvm_set_memory_region()
930 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); in __kvm_set_memory_region()
936 old_memslots = install_new_memslots(kvm, as_id, slots); in __kvm_set_memory_region()
939 kvm_iommu_unmap_pages(kvm, &old); in __kvm_set_memory_region()
947 kvm_arch_flush_shadow_memslot(kvm, slot); in __kvm_set_memory_region()
957 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); in __kvm_set_memory_region()
968 old_memslots = install_new_memslots(kvm, as_id, slots); in __kvm_set_memory_region()
970 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); in __kvm_set_memory_region()
972 kvm_free_memslot(kvm, &old, &new); in __kvm_set_memory_region()
985 r = kvm_iommu_map_pages(kvm, &new); in __kvm_set_memory_region()
994 kvm_free_memslot(kvm, &new, &old); in __kvm_set_memory_region()
1000 int kvm_set_memory_region(struct kvm *kvm, in kvm_set_memory_region() argument
1005 mutex_lock(&kvm->slots_lock); in kvm_set_memory_region()
1006 r = __kvm_set_memory_region(kvm, mem); in kvm_set_memory_region()
1007 mutex_unlock(&kvm->slots_lock); in kvm_set_memory_region()
1012 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, in kvm_vm_ioctl_set_memory_region() argument
1018 return kvm_set_memory_region(kvm, mem); in kvm_vm_ioctl_set_memory_region()
1021 int kvm_get_dirty_log(struct kvm *kvm, in kvm_get_dirty_log() argument
1036 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log()
1083 int kvm_get_dirty_log_protect(struct kvm *kvm, in kvm_get_dirty_log_protect() argument
1099 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log_protect()
1112 spin_lock(&kvm->mmu_lock); in kvm_get_dirty_log_protect()
1128 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_get_dirty_log_protect()
1133 spin_unlock(&kvm->mmu_lock); in kvm_get_dirty_log_protect()
1157 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
1159 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
1168 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
1170 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
1180 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size() argument
1187 addr = gfn_to_hva(kvm, gfn); in kvm_host_page_size()
1237 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
1239 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
1264 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
1266 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
1467 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, in gfn_to_pfn_prot() argument
1470 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, in gfn_to_pfn_prot()
1487 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn_atomic() argument
1489 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn_atomic()
1499 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn() argument
1501 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn()
1541 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument
1545 pfn = gfn_to_pfn(kvm, gfn); in gfn_to_page()
1638 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
1641 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_page()
1656 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) in kvm_read_guest() argument
1664 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
1713 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, in kvm_read_guest_atomic() argument
1717 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_atomic()
1751 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, in kvm_write_guest_page() argument
1754 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_write_guest_page()
1769 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, in kvm_write_guest() argument
1778 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
1811 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_gfn_to_hva_cache_init() argument
1814 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_gfn_to_hva_cache_init()
1824 ghc->memslot = gfn_to_memslot(kvm, start_gfn); in kvm_gfn_to_hva_cache_init()
1834 ghc->memslot = gfn_to_memslot(kvm, start_gfn); in kvm_gfn_to_hva_cache_init()
1848 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_cached() argument
1851 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_write_guest_cached()
1857 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); in kvm_write_guest_cached()
1860 return kvm_write_guest(kvm, ghc->gpa, data, len); in kvm_write_guest_cached()
1874 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_cached() argument
1877 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_read_guest_cached()
1883 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); in kvm_read_guest_cached()
1886 return kvm_read_guest(kvm, ghc->gpa, data, len); in kvm_read_guest_cached()
1899 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) in kvm_clear_guest_page() argument
1903 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest_page()
1907 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) in kvm_clear_guest() argument
1915 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); in kvm_clear_guest()
1936 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
1940 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
2150 struct kvm *kvm = me->kvm; in kvm_vcpu_on_spin() local
2152 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; in kvm_vcpu_on_spin()
2167 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vcpu_on_spin()
2184 kvm->last_boosted_vcpu = i; in kvm_vcpu_on_spin()
2213 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
2236 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
2261 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) in kvm_vm_ioctl_create_vcpu() argument
2269 vcpu = kvm_arch_vcpu_create(kvm, id); in kvm_vm_ioctl_create_vcpu()
2279 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2284 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { in kvm_vm_ioctl_create_vcpu()
2289 kvm_for_each_vcpu(r, v, kvm) in kvm_vm_ioctl_create_vcpu()
2295 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); in kvm_vm_ioctl_create_vcpu()
2298 kvm_get_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
2301 kvm_put_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
2305 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; in kvm_vm_ioctl_create_vcpu()
2312 atomic_inc(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
2314 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2319 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2345 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_ioctl()
2547 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_compat_ioctl()
2623 struct kvm *kvm = dev->kvm; in kvm_device_release() local
2625 kvm_put_kvm(kvm); in kvm_device_release()
2674 static int kvm_ioctl_create_device(struct kvm *kvm, in kvm_ioctl_create_device() argument
2697 dev->kvm = kvm; in kvm_ioctl_create_device()
2711 list_add(&dev->vm_node, &kvm->devices); in kvm_ioctl_create_device()
2712 kvm_get_kvm(kvm); in kvm_ioctl_create_device()
2717 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) in kvm_vm_ioctl_check_extension_generic() argument
2745 return kvm_vm_ioctl_check_extension(kvm, arg); in kvm_vm_ioctl_check_extension_generic()
2751 struct kvm *kvm = filp->private_data; in kvm_vm_ioctl() local
2755 if (kvm->mm != current->mm) in kvm_vm_ioctl()
2759 r = kvm_vm_ioctl_create_vcpu(kvm, arg); in kvm_vm_ioctl()
2769 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); in kvm_vm_ioctl()
2778 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_ioctl()
2788 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
2797 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
2807 r = kvm_irqfd(kvm, &data); in kvm_vm_ioctl()
2816 r = kvm_ioeventfd(kvm, &data); in kvm_vm_ioctl()
2826 r = kvm_send_userspace_msi(kvm, &msi); in kvm_vm_ioctl()
2839 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, in kvm_vm_ioctl()
2877 r = kvm_set_irq_routing(kvm, entries, routing.nr, in kvm_vm_ioctl()
2891 r = kvm_ioctl_create_device(kvm, &cd); in kvm_vm_ioctl()
2903 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); in kvm_vm_ioctl()
2925 struct kvm *kvm = filp->private_data; in kvm_vm_compat_ioctl() local
2928 if (kvm->mm != current->mm) in kvm_vm_compat_ioctl()
2944 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
2968 struct kvm *kvm; in kvm_dev_ioctl_create_vm() local
2970 kvm = kvm_create_vm(type); in kvm_dev_ioctl_create_vm()
2971 if (IS_ERR(kvm)) in kvm_dev_ioctl_create_vm()
2972 return PTR_ERR(kvm); in kvm_dev_ioctl_create_vm()
2974 r = kvm_coalesced_mmio_init(kvm); in kvm_dev_ioctl_create_vm()
2976 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
2980 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); in kvm_dev_ioctl_create_vm()
2982 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
3271 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
3288 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
3338 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
3345 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
3350 bus = kvm->buses[bus_idx]; in kvm_io_bus_register_dev()
3362 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_register_dev()
3363 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_register_dev()
3370 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_unregister_dev() argument
3376 bus = kvm->buses[bus_idx]; in kvm_io_bus_unregister_dev()
3397 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_unregister_dev()
3398 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_unregister_dev()
3410 struct kvm *kvm; in vm_stat_get() local
3414 list_for_each_entry(kvm, &vm_list, vm_list) in vm_stat_get()
3415 *val += *(u32 *)((void *)kvm + offset); in vm_stat_get()
3425 struct kvm *kvm; in vcpu_stat_get() local
3431 list_for_each_entry(kvm, &vm_list, vm_list) in vcpu_stat_get()
3432 kvm_for_each_vcpu(i, vcpu, kvm) in vcpu_stat_get()