Lines Matching refs:kvm

44 	struct kvm *kvm;  member
109 kvm_set_irq(assigned_dev->kvm, in kvm_assigned_dev_raise_guest_irq()
114 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, in kvm_assigned_dev_raise_guest_irq()
142 static int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, in kvm_set_irq_inatomic() argument
160 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_set_irq_inatomic()
161 if (kvm_irq_map_gsi(kvm, entries, irq) > 0) { in kvm_set_irq_inatomic()
163 ret = kvm_arch_set_irq_inatomic(e, kvm, irq_source_id, in kvm_set_irq_inatomic()
166 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_set_irq_inatomic()
174 int ret = kvm_set_irq_inatomic(assigned_dev->kvm, in kvm_assigned_dev_msi()
199 ret = kvm_set_irq_inatomic(assigned_dev->kvm, in kvm_assigned_dev_msix()
228 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false); in kvm_assigned_dev_ack_irq()
250 kvm_set_irq(dev->kvm, dev->irq_source_id, in kvm_assigned_dev_ack_irq()
257 static void deassign_guest_irq(struct kvm *kvm, in deassign_guest_irq() argument
261 kvm_unregister_irq_ack_notifier(kvm, in deassign_guest_irq()
264 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, in deassign_guest_irq()
268 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); in deassign_guest_irq()
274 static void deassign_host_irq(struct kvm *kvm, in deassign_host_irq() argument
321 static int kvm_deassign_irq(struct kvm *kvm, in kvm_deassign_irq() argument
327 if (!irqchip_in_kernel(kvm)) in kvm_deassign_irq()
337 deassign_host_irq(kvm, assigned_dev); in kvm_deassign_irq()
339 deassign_guest_irq(kvm, assigned_dev); in kvm_deassign_irq()
344 static void kvm_free_assigned_irq(struct kvm *kvm, in kvm_free_assigned_irq() argument
347 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); in kvm_free_assigned_irq()
350 static void kvm_free_assigned_device(struct kvm *kvm, in kvm_free_assigned_device() argument
354 kvm_free_assigned_irq(kvm, assigned_dev); in kvm_free_assigned_device()
374 void kvm_free_all_assigned_devices(struct kvm *kvm) in kvm_free_all_assigned_devices() argument
379 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { in kvm_free_all_assigned_devices()
384 kvm_free_assigned_device(kvm, assigned_dev); in kvm_free_all_assigned_devices()
388 static int assigned_device_enable_host_intx(struct kvm *kvm, in assigned_device_enable_host_intx() argument
422 static int assigned_device_enable_host_msi(struct kvm *kvm, in assigned_device_enable_host_msi() argument
444 static int assigned_device_enable_host_msix(struct kvm *kvm, in assigned_device_enable_host_msix() argument
476 static int assigned_device_enable_guest_intx(struct kvm *kvm, in assigned_device_enable_guest_intx() argument
485 static int assigned_device_enable_guest_msi(struct kvm *kvm, in assigned_device_enable_guest_msi() argument
494 static int assigned_device_enable_guest_msix(struct kvm *kvm, in assigned_device_enable_guest_msix() argument
503 static int assign_host_irq(struct kvm *kvm, in assign_host_irq() argument
517 r = assigned_device_enable_host_intx(kvm, dev); in assign_host_irq()
520 r = assigned_device_enable_host_msi(kvm, dev); in assign_host_irq()
523 r = assigned_device_enable_host_msix(kvm, dev); in assign_host_irq()
536 static int assign_guest_irq(struct kvm *kvm, in assign_guest_irq() argument
547 id = kvm_request_irq_source_id(kvm); in assign_guest_irq()
555 r = assigned_device_enable_guest_intx(kvm, dev, irq); in assign_guest_irq()
558 r = assigned_device_enable_guest_msi(kvm, dev, irq); in assign_guest_irq()
561 r = assigned_device_enable_guest_msix(kvm, dev, irq); in assign_guest_irq()
570 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); in assign_guest_irq()
572 kvm_free_irq_source_id(kvm, dev->irq_source_id); in assign_guest_irq()
580 static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, in kvm_vm_ioctl_assign_irq() argument
587 if (!irqchip_in_kernel(kvm)) in kvm_vm_ioctl_assign_irq()
590 mutex_lock(&kvm->lock); in kvm_vm_ioctl_assign_irq()
592 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, in kvm_vm_ioctl_assign_irq()
611 r = assign_host_irq(kvm, match, host_irq_type); in kvm_vm_ioctl_assign_irq()
616 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); in kvm_vm_ioctl_assign_irq()
618 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_assign_irq()
622 static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, in kvm_vm_ioctl_deassign_dev_irq() argument
630 mutex_lock(&kvm->lock); in kvm_vm_ioctl_deassign_dev_irq()
632 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, in kvm_vm_ioctl_deassign_dev_irq()
639 r = kvm_deassign_irq(kvm, match, irq_type); in kvm_vm_ioctl_deassign_dev_irq()
641 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_deassign_dev_irq()
705 static int kvm_vm_ioctl_assign_device(struct kvm *kvm, in kvm_vm_ioctl_assign_device() argument
715 mutex_lock(&kvm->lock); in kvm_vm_ioctl_assign_device()
716 idx = srcu_read_lock(&kvm->srcu); in kvm_vm_ioctl_assign_device()
718 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, in kvm_vm_ioctl_assign_device()
783 match->kvm = kvm; in kvm_vm_ioctl_assign_device()
786 list_add(&match->list, &kvm->arch.assigned_dev_head); in kvm_vm_ioctl_assign_device()
788 if (!kvm->arch.iommu_domain) { in kvm_vm_ioctl_assign_device()
789 r = kvm_iommu_map_guest(kvm); in kvm_vm_ioctl_assign_device()
793 r = kvm_assign_device(kvm, match->dev); in kvm_vm_ioctl_assign_device()
798 srcu_read_unlock(&kvm->srcu, idx); in kvm_vm_ioctl_assign_device()
799 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_assign_device()
813 srcu_read_unlock(&kvm->srcu, idx); in kvm_vm_ioctl_assign_device()
814 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_assign_device()
818 static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, in kvm_vm_ioctl_deassign_device() argument
824 mutex_lock(&kvm->lock); in kvm_vm_ioctl_deassign_device()
826 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, in kvm_vm_ioctl_deassign_device()
835 kvm_deassign_device(kvm, match->dev); in kvm_vm_ioctl_deassign_device()
837 kvm_free_assigned_device(kvm, match); in kvm_vm_ioctl_deassign_device()
840 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_deassign_device()
845 static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, in kvm_vm_ioctl_set_msix_nr() argument
851 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_msix_nr()
853 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, in kvm_vm_ioctl_set_msix_nr()
886 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_msix_nr()
890 static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, in kvm_vm_ioctl_set_msix_entry() argument
896 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_msix_entry()
898 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, in kvm_vm_ioctl_set_msix_entry()
920 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_msix_entry()
925 static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm, in kvm_vm_ioctl_set_pci_irq_mask() argument
931 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_pci_irq_mask()
933 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, in kvm_vm_ioctl_set_pci_irq_mask()
947 kvm_set_irq(match->kvm, match->irq_source_id, in kvm_vm_ioctl_set_pci_irq_mask()
970 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_pci_irq_mask()
974 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, in kvm_vm_ioctl_assigned_device() argument
987 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); in kvm_vm_ioctl_assigned_device()
1002 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); in kvm_vm_ioctl_assigned_device()
1013 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); in kvm_vm_ioctl_assigned_device()
1024 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); in kvm_vm_ioctl_assigned_device()
1034 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); in kvm_vm_ioctl_assigned_device()
1044 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); in kvm_vm_ioctl_assigned_device()
1055 r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev); in kvm_vm_ioctl_assigned_device()