Lines Matching refs:kvm
183 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
185 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
226 struct kvm *kvm; member
325 struct kvm *kvm, int irq_source_id, int level,
379 struct kvm { struct
447 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) in kvm_get_vcpu() argument
454 return kvm->vcpus[i]; in kvm_get_vcpu()
457 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ argument
459 idx < atomic_read(&kvm->online_vcpus) && \
460 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
463 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) in kvm_get_vcpu_by_id() argument
468 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_vcpu_by_id()
479 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
486 void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
487 void kvm_arch_irq_routing_update(struct kvm *kvm);
489 static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) in kvm_vcpu_request_scan_ioapic() argument
492 static inline void kvm_arch_irq_routing_update(struct kvm *kvm) in kvm_arch_irq_routing_update() argument
514 void kvm_get_kvm(struct kvm *kvm);
515 void kvm_put_kvm(struct kvm *kvm);
517 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) in __kvm_memslots() argument
519 return rcu_dereference_check(kvm->memslots[as_id], in __kvm_memslots()
520 srcu_read_lock_held(&kvm->srcu) in __kvm_memslots()
521 || lockdep_is_held(&kvm->slots_lock)); in __kvm_memslots()
524 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) in kvm_memslots() argument
526 return __kvm_memslots(kvm, 0); in kvm_memslots()
533 return __kvm_memslots(vcpu->kvm, as_id); in kvm_vcpu_memslots()
566 int kvm_set_memory_region(struct kvm *kvm,
568 int __kvm_set_memory_region(struct kvm *kvm,
570 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
572 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
574 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
575 int kvm_arch_prepare_memory_region(struct kvm *kvm,
579 void kvm_arch_commit_memory_region(struct kvm *kvm,
587 void kvm_arch_flush_shadow_all(struct kvm *kvm);
589 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
595 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
596 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
597 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
605 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
606 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
607 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
619 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
621 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
623 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
624 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
626 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
628 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
630 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
632 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
634 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
635 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
636 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
637 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
638 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
639 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
669 void kvm_flush_remote_tlbs(struct kvm *kvm);
670 void kvm_reload_remote_mmus(struct kvm *kvm);
671 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
672 void kvm_make_scan_ioapic_request(struct kvm *kvm);
673 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
681 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
683 int kvm_get_dirty_log(struct kvm *kvm,
686 int kvm_get_dirty_log_protect(struct kvm *kvm,
689 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
694 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
697 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
733 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
749 static inline struct kvm *kvm_arch_alloc_vm(void) in kvm_arch_alloc_vm()
751 return kzalloc(sizeof(struct kvm), GFP_KERNEL); in kvm_arch_alloc_vm()
754 static inline void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
756 kfree(kvm); in kvm_arch_free_vm()
761 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
762 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
763 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
765 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) in kvm_arch_register_noncoherent_dma() argument
769 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) in kvm_arch_unregister_noncoherent_dma() argument
773 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) in kvm_arch_has_noncoherent_dma() argument
779 void kvm_arch_start_assignment(struct kvm *kvm);
780 void kvm_arch_end_assignment(struct kvm *kvm);
781 bool kvm_arch_has_assigned_device(struct kvm *kvm);
783 static inline void kvm_arch_start_assignment(struct kvm *kvm) in kvm_arch_start_assignment() argument
787 static inline void kvm_arch_end_assignment(struct kvm *kvm) in kvm_arch_end_assignment() argument
791 static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) in kvm_arch_has_assigned_device() argument
812 bool kvm_arch_intc_initialized(struct kvm *kvm);
814 static inline bool kvm_arch_intc_initialized(struct kvm *kvm) in kvm_arch_intc_initialized() argument
820 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
821 void kvm_arch_destroy_vm(struct kvm *kvm);
822 void kvm_arch_sync_events(struct kvm *kvm);
835 int kvm_irq_map_gsi(struct kvm *kvm,
837 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
839 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
841 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
844 struct kvm *kvm, int irq_source_id,
846 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
847 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
848 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
849 void kvm_register_irq_ack_notifier(struct kvm *kvm,
851 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
853 int kvm_request_irq_source_id(struct kvm *kvm);
854 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
857 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
858 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
860 static inline int kvm_iommu_map_pages(struct kvm *kvm, in kvm_iommu_map_pages() argument
866 static inline void kvm_iommu_unmap_pages(struct kvm *kvm, in kvm_iommu_unmap_pages() argument
958 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) in memslot_id() argument
960 return gfn_to_memslot(kvm, gfn)->id; in memslot_id()
986 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) in kvm_is_error_gpa() argument
988 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in kvm_is_error_gpa()
1013 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) in mmu_notifier_retry() argument
1015 if (unlikely(kvm->mmu_notifier_count)) in mmu_notifier_retry()
1028 if (kvm->mmu_notifier_seq != mmu_seq) in mmu_notifier_retry()
1042 int kvm_setup_default_irq_routing(struct kvm *kvm);
1043 int kvm_setup_empty_irq_routing(struct kvm *kvm);
1044 int kvm_set_irq_routing(struct kvm *kvm,
1050 void kvm_free_irq_routing(struct kvm *kvm);
1054 static inline void kvm_free_irq_routing(struct kvm *kvm) {} in kvm_free_irq_routing() argument
1058 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1062 void kvm_eventfd_init(struct kvm *kvm);
1063 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1066 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1067 void kvm_irqfd_release(struct kvm *kvm);
1068 void kvm_irq_routing_update(struct kvm *);
1070 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
1075 static inline void kvm_irqfd_release(struct kvm *kvm) {} in kvm_irqfd_release() argument
1080 static inline void kvm_eventfd_init(struct kvm *kvm) {} in kvm_eventfd_init() argument
1082 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
1087 static inline void kvm_irqfd_release(struct kvm *kvm) {} in kvm_irqfd_release() argument
1090 static inline void kvm_irq_routing_update(struct kvm *kvm) in kvm_irq_routing_update() argument
1095 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_ioeventfd() argument
1127 struct kvm *kvm; member
1194 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,