H A D | assigned-dev.c | 66 *assigned_dev, int irq) find_index_from_host_irq() 71 host_msix_entries = assigned_dev->host_msix_entries; find_index_from_host_irq() 74 for (i = 0; i < assigned_dev->entries_nr; i++) find_index_from_host_irq() 87 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; kvm_assigned_dev_intx() local 90 spin_lock(&assigned_dev->intx_lock); kvm_assigned_dev_intx() 91 if (pci_check_and_mask_intx(assigned_dev->dev)) { kvm_assigned_dev_intx() 92 assigned_dev->host_irq_disabled = true; kvm_assigned_dev_intx() 96 spin_unlock(&assigned_dev->intx_lock); kvm_assigned_dev_intx() 102 kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev, kvm_assigned_dev_raise_guest_irq() argument 105 if (unlikely(assigned_dev->irq_requested_type & kvm_assigned_dev_raise_guest_irq() 107 spin_lock(&assigned_dev->intx_mask_lock); kvm_assigned_dev_raise_guest_irq() 108 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) kvm_assigned_dev_raise_guest_irq() 109 kvm_set_irq(assigned_dev->kvm, kvm_assigned_dev_raise_guest_irq() 110 assigned_dev->irq_source_id, vector, 1, kvm_assigned_dev_raise_guest_irq() 112 spin_unlock(&assigned_dev->intx_mask_lock); kvm_assigned_dev_raise_guest_irq() 114 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, kvm_assigned_dev_raise_guest_irq() 120 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; kvm_assigned_dev_thread_intx() local 122 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { kvm_assigned_dev_thread_intx() 123 spin_lock_irq(&assigned_dev->intx_lock); kvm_assigned_dev_thread_intx() 125 assigned_dev->host_irq_disabled = true; kvm_assigned_dev_thread_intx() 126 spin_unlock_irq(&assigned_dev->intx_lock); kvm_assigned_dev_thread_intx() 129 kvm_assigned_dev_raise_guest_irq(assigned_dev, kvm_assigned_dev_thread_intx() 130 assigned_dev->guest_irq); kvm_assigned_dev_thread_intx() 173 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; kvm_assigned_dev_msi() local 174 int ret = kvm_set_irq_inatomic(assigned_dev->kvm, kvm_assigned_dev_msi() 175 assigned_dev->irq_source_id, kvm_assigned_dev_msi() 176 assigned_dev->guest_irq, 1); kvm_assigned_dev_msi() 182 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; kvm_assigned_dev_thread_msi() local 184 kvm_assigned_dev_raise_guest_irq(assigned_dev, kvm_assigned_dev_thread_msi() 185 assigned_dev->guest_irq); kvm_assigned_dev_thread_msi() 192 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; kvm_assigned_dev_msix() local 193 int index = find_index_from_host_irq(assigned_dev, irq); kvm_assigned_dev_msix() 198 vector = assigned_dev->guest_msix_entries[index].vector; kvm_assigned_dev_msix() 199 ret = kvm_set_irq_inatomic(assigned_dev->kvm, kvm_assigned_dev_msix() 200 assigned_dev->irq_source_id, kvm_assigned_dev_msix() 209 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; kvm_assigned_dev_thread_msix() local 210 int index = find_index_from_host_irq(assigned_dev, irq); kvm_assigned_dev_thread_msix() 214 vector = assigned_dev->guest_msix_entries[index].vector; kvm_assigned_dev_thread_msix() 215 kvm_assigned_dev_raise_guest_irq(assigned_dev, vector); kvm_assigned_dev_thread_msix() 258 struct kvm_assigned_dev_kernel *assigned_dev) deassign_guest_irq() 260 if (assigned_dev->ack_notifier.gsi != -1) deassign_guest_irq() 262 &assigned_dev->ack_notifier); deassign_guest_irq() 264 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, deassign_guest_irq() 265 assigned_dev->guest_irq, 0, false); deassign_guest_irq() 267 if (assigned_dev->irq_source_id != -1) deassign_guest_irq() 268 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); deassign_guest_irq() 269 assigned_dev->irq_source_id = -1; deassign_guest_irq() 270 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); deassign_guest_irq() 275 struct kvm_assigned_dev_kernel *assigned_dev) deassign_host_irq() 287 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { deassign_host_irq() 289 for (i = 0; i < assigned_dev->entries_nr; i++) deassign_host_irq() 290 disable_irq(assigned_dev->host_msix_entries[i].vector); deassign_host_irq() 292 for (i = 0; i < assigned_dev->entries_nr; i++) deassign_host_irq() 293 free_irq(assigned_dev->host_msix_entries[i].vector, deassign_host_irq() 294 assigned_dev); deassign_host_irq() 296 assigned_dev->entries_nr = 0; deassign_host_irq() 297 kfree(assigned_dev->host_msix_entries); deassign_host_irq() 298 kfree(assigned_dev->guest_msix_entries); deassign_host_irq() 299 pci_disable_msix(assigned_dev->dev); deassign_host_irq() 302 if ((assigned_dev->irq_requested_type & deassign_host_irq() 304 (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { deassign_host_irq() 305 spin_lock_irq(&assigned_dev->intx_lock); deassign_host_irq() 306 pci_intx(assigned_dev->dev, false); deassign_host_irq() 307 spin_unlock_irq(&assigned_dev->intx_lock); deassign_host_irq() 308 synchronize_irq(assigned_dev->host_irq); deassign_host_irq() 310 disable_irq(assigned_dev->host_irq); deassign_host_irq() 312 free_irq(assigned_dev->host_irq, assigned_dev); deassign_host_irq() 314 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) deassign_host_irq() 315 pci_disable_msi(assigned_dev->dev); deassign_host_irq() 318 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); deassign_host_irq() 322 struct kvm_assigned_dev_kernel *assigned_dev, kvm_deassign_irq() 330 if (!assigned_dev->irq_requested_type) kvm_deassign_irq() 337 deassign_host_irq(kvm, assigned_dev); kvm_deassign_irq() 339 deassign_guest_irq(kvm, assigned_dev); kvm_deassign_irq() 345 struct kvm_assigned_dev_kernel *assigned_dev) kvm_free_assigned_irq() 347 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); kvm_free_assigned_irq() 352 *assigned_dev) kvm_free_assigned_device() 354 kvm_free_assigned_irq(kvm, assigned_dev); kvm_free_assigned_device() 356 pci_reset_function(assigned_dev->dev); kvm_free_assigned_device() 357 if (pci_load_and_free_saved_state(assigned_dev->dev, kvm_free_assigned_device() 358 &assigned_dev->pci_saved_state)) kvm_free_assigned_device() 360 __func__, dev_name(&assigned_dev->dev->dev)); kvm_free_assigned_device() 362 pci_restore_state(assigned_dev->dev); kvm_free_assigned_device() 364 pci_clear_dev_assigned(assigned_dev->dev); kvm_free_assigned_device() 366 pci_release_regions(assigned_dev->dev); kvm_free_assigned_device() 367 pci_disable_device(assigned_dev->dev); kvm_free_assigned_device() 368 pci_dev_put(assigned_dev->dev); kvm_free_assigned_device() 370 list_del(&assigned_dev->list); kvm_free_assigned_device() 371 kfree(assigned_dev); kvm_free_assigned_device() 377 struct kvm_assigned_dev_kernel *assigned_dev; kvm_free_all_assigned_devices() local 380 assigned_dev = list_entry(ptr, kvm_free_all_assigned_devices() 384 kvm_free_assigned_device(kvm, assigned_dev); kvm_free_all_assigned_devices() 706 struct kvm_assigned_pci_dev *assigned_dev) kvm_vm_ioctl_assign_device() 712 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) kvm_vm_ioctl_assign_device() 719 assigned_dev->assigned_dev_id); kvm_vm_ioctl_assign_device() 733 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr, kvm_vm_ioctl_assign_device() 734 assigned_dev->busnr, kvm_vm_ioctl_assign_device() 735 assigned_dev->devfn); kvm_vm_ioctl_assign_device() 772 assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3; kvm_vm_ioctl_assign_device() 774 match->assigned_dev_id = assigned_dev->assigned_dev_id; kvm_vm_ioctl_assign_device() 775 match->host_segnr = assigned_dev->segnr; kvm_vm_ioctl_assign_device() 776 match->host_busnr = assigned_dev->busnr; kvm_vm_ioctl_assign_device() 777 match->host_devfn = assigned_dev->devfn; kvm_vm_ioctl_assign_device() 778 match->flags = assigned_dev->flags; kvm_vm_ioctl_assign_device() 819 struct kvm_assigned_pci_dev *assigned_dev) kvm_vm_ioctl_deassign_device() 827 assigned_dev->assigned_dev_id); kvm_vm_ioctl_deassign_device() 926 struct kvm_assigned_pci_dev *assigned_dev) kvm_vm_ioctl_set_pci_irq_mask() 934 assigned_dev->assigned_dev_id); kvm_vm_ioctl_set_pci_irq_mask() 943 match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX; kvm_vm_ioctl_set_pci_irq_mask() 946 if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { kvm_vm_ioctl_set_pci_irq_mask() 953 } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { kvm_vm_ioctl_set_pci_irq_mask() 982 struct kvm_assigned_pci_dev assigned_dev; kvm_vm_ioctl_assigned_device() local 985 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) kvm_vm_ioctl_assigned_device() 987 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); kvm_vm_ioctl_assigned_device() 1019 struct kvm_assigned_pci_dev assigned_dev; kvm_vm_ioctl_assigned_device() local 1022 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) kvm_vm_ioctl_assigned_device() 1024 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); kvm_vm_ioctl_assigned_device() 1050 struct kvm_assigned_pci_dev assigned_dev; kvm_vm_ioctl_assigned_device() local 1053 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) kvm_vm_ioctl_assigned_device() 1055 r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev); kvm_vm_ioctl_assigned_device() 65 find_index_from_host_irq(struct kvm_assigned_dev_kernel *assigned_dev, int irq) find_index_from_host_irq() argument 257 deassign_guest_irq(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) deassign_guest_irq() argument 274 deassign_host_irq(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) deassign_host_irq() argument 321 kvm_deassign_irq(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev, unsigned long irq_requested_type) kvm_deassign_irq() argument 344 kvm_free_assigned_irq(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) kvm_free_assigned_irq() argument 350 kvm_free_assigned_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) kvm_free_assigned_device() argument 705 kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) kvm_vm_ioctl_assign_device() argument 818 kvm_vm_ioctl_deassign_device(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) kvm_vm_ioctl_deassign_device() argument 925 kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) kvm_vm_ioctl_set_pci_irq_mask() argument
|