H A D | eventfd.c | 50 struct kvm_kernel_irqfd *irqfd = irqfd_inject() local 52 struct kvm *kvm = irqfd->kvm; irqfd_inject() 54 if (!irqfd->resampler) { irqfd_inject() 55 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, irqfd_inject() 57 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, irqfd_inject() 61 irqfd->gsi, 1, false); irqfd_inject() 74 struct kvm_kernel_irqfd *irqfd; irqfd_resampler_ack() local 86 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) irqfd_resampler_ack() 87 eventfd_signal(irqfd->resamplefd, 1); irqfd_resampler_ack() 93 irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd) irqfd_resampler_shutdown() argument 95 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler; irqfd_resampler_shutdown() 100 list_del_rcu(&irqfd->resampler_link); irqfd_resampler_shutdown() 120 struct kvm_kernel_irqfd *irqfd = irqfd_shutdown() local 128 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); irqfd_shutdown() 134 flush_work(&irqfd->inject); irqfd_shutdown() 136 if (irqfd->resampler) { irqfd_shutdown() 137 irqfd_resampler_shutdown(irqfd); irqfd_shutdown() 138 eventfd_ctx_put(irqfd->resamplefd); irqfd_shutdown() 145 irq_bypass_unregister_consumer(&irqfd->consumer); irqfd_shutdown() 147 eventfd_ctx_put(irqfd->eventfd); irqfd_shutdown() 148 kfree(irqfd); irqfd_shutdown() 154 irqfd_is_active(struct kvm_kernel_irqfd *irqfd) irqfd_is_active() argument 156 return list_empty(&irqfd->list) ? false : true; irqfd_is_active() 160 * Mark the irqfd as inactive and schedule it for removal 165 irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) irqfd_deactivate() argument 167 BUG_ON(!irqfd_is_active(irqfd)); irqfd_deactivate() 169 list_del_init(&irqfd->list); irqfd_deactivate() 171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown); irqfd_deactivate() 189 struct kvm_kernel_irqfd *irqfd = irqfd_wakeup() local 193 struct kvm *kvm = irqfd->kvm; irqfd_wakeup() 200 seq = read_seqcount_begin(&irqfd->irq_entry_sc); irqfd_wakeup() 201 irq = irqfd->irq_entry; irqfd_wakeup() 202 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); irqfd_wakeup() 207 schedule_work(&irqfd->inject); irqfd_wakeup() 218 * We must check if someone deactivated the irqfd before irqfd_wakeup() 223 * We cannot race against the irqfd going away since the irqfd_wakeup() 226 if (irqfd_is_active(irqfd)) irqfd_wakeup() 227 irqfd_deactivate(irqfd); irqfd_wakeup() 239 struct kvm_kernel_irqfd *irqfd = irqfd_ptable_queue_proc() local 241 add_wait_queue(wqh, &irqfd->wait); irqfd_ptable_queue_proc() 245 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd) irqfd_update() argument 251 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); irqfd_update() 253 write_seqcount_begin(&irqfd->irq_entry_sc); irqfd_update() 257 irqfd->irq_entry = *e; irqfd_update() 259 irqfd->irq_entry.type = 0; irqfd_update() 261 write_seqcount_end(&irqfd->irq_entry_sc); irqfd_update() 286 struct kvm_kernel_irqfd *irqfd, *tmp; kvm_irqfd_assign() local 296 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); kvm_irqfd_assign() 297 if (!irqfd) kvm_irqfd_assign() 300 irqfd->kvm = kvm; kvm_irqfd_assign() 301 irqfd->gsi = args->gsi; kvm_irqfd_assign() 302 INIT_LIST_HEAD(&irqfd->list); kvm_irqfd_assign() 303 INIT_WORK(&irqfd->inject, irqfd_inject); kvm_irqfd_assign() 304 INIT_WORK(&irqfd->shutdown, irqfd_shutdown); kvm_irqfd_assign() 305 seqcount_init(&irqfd->irq_entry_sc); kvm_irqfd_assign() 319 irqfd->eventfd = eventfd; kvm_irqfd_assign() 330 irqfd->resamplefd = resamplefd; kvm_irqfd_assign() 331 INIT_LIST_HEAD(&irqfd->resampler_link); kvm_irqfd_assign() 337 if (resampler->notifier.gsi == irqfd->gsi) { kvm_irqfd_assign() 338 irqfd->resampler = resampler; kvm_irqfd_assign() 343 if (!irqfd->resampler) { kvm_irqfd_assign() 353 resampler->notifier.gsi = irqfd->gsi; kvm_irqfd_assign() 360 irqfd->resampler = resampler; kvm_irqfd_assign() 363 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); kvm_irqfd_assign() 373 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); kvm_irqfd_assign() 374 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); kvm_irqfd_assign() 380 if (irqfd->eventfd != tmp->eventfd) kvm_irqfd_assign() 389 irqfd_update(kvm, irqfd); kvm_irqfd_assign() 392 list_add_tail(&irqfd->list, &kvm->irqfds.items); kvm_irqfd_assign() 400 events = f.file->f_op->poll(f.file, &irqfd->pt); kvm_irqfd_assign() 403 schedule_work(&irqfd->inject); kvm_irqfd_assign() 406 * do not drop the file until the irqfd is fully initialized, otherwise kvm_irqfd_assign() 411 irqfd->consumer.token = (void *)irqfd->eventfd; kvm_irqfd_assign() 412 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer; kvm_irqfd_assign() 413 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer; kvm_irqfd_assign() 414 irqfd->consumer.stop = kvm_arch_irq_bypass_stop; kvm_irqfd_assign() 415 irqfd->consumer.start = kvm_arch_irq_bypass_start; kvm_irqfd_assign() 416 ret = irq_bypass_register_consumer(&irqfd->consumer); kvm_irqfd_assign() 419 irqfd->consumer.token, ret); kvm_irqfd_assign() 425 if (irqfd->resampler) kvm_irqfd_assign() 426 irqfd_resampler_shutdown(irqfd); kvm_irqfd_assign() 437 kfree(irqfd); kvm_irqfd_assign() 519 * shutdown any irqfd's that match fd+gsi 524 struct kvm_kernel_irqfd *irqfd, *tmp; kvm_irqfd_deassign() local 533 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { kvm_irqfd_deassign() 534 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { kvm_irqfd_deassign() 541 write_seqcount_begin(&irqfd->irq_entry_sc); kvm_irqfd_deassign() 542 irqfd->irq_entry.type = 0; kvm_irqfd_deassign() 543 write_seqcount_end(&irqfd->irq_entry_sc); kvm_irqfd_deassign() 544 irqfd_deactivate(irqfd); kvm_irqfd_deassign() 580 struct kvm_kernel_irqfd *irqfd, *tmp; kvm_irqfd_release() local 584 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) kvm_irqfd_release() 585 irqfd_deactivate(irqfd); kvm_irqfd_release() 603 struct kvm_kernel_irqfd *irqfd; kvm_irq_routing_update() local 607 list_for_each_entry(irqfd, &kvm->irqfds.items, list) { kvm_irq_routing_update() 608 irqfd_update(kvm, irqfd); kvm_irq_routing_update() 611 if (irqfd->producer) { kvm_irq_routing_update() 613 irqfd->kvm, irqfd->producer->irq, kvm_irq_routing_update() 614 irqfd->gsi, 1); kvm_irq_routing_update() 630 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup"); kvm_irqfd_init()
|