Lines Matching refs:irqfd
103 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); in irqfd_inject() local
104 struct kvm *kvm = irqfd->kvm; in irqfd_inject()
106 if (!irqfd->resampler) { in irqfd_inject()
107 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject()
109 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject()
113 irqfd->gsi, 1, false); in irqfd_inject()
126 struct _irqfd *irqfd; in irqfd_resampler_ack() local
137 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) in irqfd_resampler_ack()
138 eventfd_signal(irqfd->resamplefd, 1); in irqfd_resampler_ack()
144 irqfd_resampler_shutdown(struct _irqfd *irqfd) in irqfd_resampler_shutdown() argument
146 struct _irqfd_resampler *resampler = irqfd->resampler; in irqfd_resampler_shutdown()
151 list_del_rcu(&irqfd->resampler_link); in irqfd_resampler_shutdown()
171 struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown); in irqfd_shutdown() local
178 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); in irqfd_shutdown()
184 flush_work(&irqfd->inject); in irqfd_shutdown()
186 if (irqfd->resampler) { in irqfd_shutdown()
187 irqfd_resampler_shutdown(irqfd); in irqfd_shutdown()
188 eventfd_ctx_put(irqfd->resamplefd); in irqfd_shutdown()
194 eventfd_ctx_put(irqfd->eventfd); in irqfd_shutdown()
195 kfree(irqfd); in irqfd_shutdown()
201 irqfd_is_active(struct _irqfd *irqfd) in irqfd_is_active() argument
203 return list_empty(&irqfd->list) ? false : true; in irqfd_is_active()
212 irqfd_deactivate(struct _irqfd *irqfd) in irqfd_deactivate() argument
214 BUG_ON(!irqfd_is_active(irqfd)); in irqfd_deactivate()
216 list_del_init(&irqfd->list); in irqfd_deactivate()
218 queue_work(irqfd_cleanup_wq, &irqfd->shutdown); in irqfd_deactivate()
227 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait); in irqfd_wakeup() local
230 struct kvm *kvm = irqfd->kvm; in irqfd_wakeup()
237 seq = read_seqcount_begin(&irqfd->irq_entry_sc); in irqfd_wakeup()
238 irq = irqfd->irq_entry; in irqfd_wakeup()
239 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); in irqfd_wakeup()
245 schedule_work(&irqfd->inject); in irqfd_wakeup()
264 if (irqfd_is_active(irqfd)) in irqfd_wakeup()
265 irqfd_deactivate(irqfd); in irqfd_wakeup()
277 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt); in irqfd_ptable_queue_proc() local
278 add_wait_queue(wqh, &irqfd->wait); in irqfd_ptable_queue_proc()
282 static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd) in irqfd_update() argument
288 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); in irqfd_update()
290 write_seqcount_begin(&irqfd->irq_entry_sc); in irqfd_update()
292 irqfd->irq_entry.type = 0; in irqfd_update()
298 irqfd->irq_entry = *e; in irqfd_update()
301 write_seqcount_end(&irqfd->irq_entry_sc); in irqfd_update()
307 struct _irqfd *irqfd, *tmp; in kvm_irqfd_assign() local
317 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); in kvm_irqfd_assign()
318 if (!irqfd) in kvm_irqfd_assign()
321 irqfd->kvm = kvm; in kvm_irqfd_assign()
322 irqfd->gsi = args->gsi; in kvm_irqfd_assign()
323 INIT_LIST_HEAD(&irqfd->list); in kvm_irqfd_assign()
324 INIT_WORK(&irqfd->inject, irqfd_inject); in kvm_irqfd_assign()
325 INIT_WORK(&irqfd->shutdown, irqfd_shutdown); in kvm_irqfd_assign()
326 seqcount_init(&irqfd->irq_entry_sc); in kvm_irqfd_assign()
340 irqfd->eventfd = eventfd; in kvm_irqfd_assign()
351 irqfd->resamplefd = resamplefd; in kvm_irqfd_assign()
352 INIT_LIST_HEAD(&irqfd->resampler_link); in kvm_irqfd_assign()
358 if (resampler->notifier.gsi == irqfd->gsi) { in kvm_irqfd_assign()
359 irqfd->resampler = resampler; in kvm_irqfd_assign()
364 if (!irqfd->resampler) { in kvm_irqfd_assign()
374 resampler->notifier.gsi = irqfd->gsi; in kvm_irqfd_assign()
381 irqfd->resampler = resampler; in kvm_irqfd_assign()
384 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); in kvm_irqfd_assign()
394 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); in kvm_irqfd_assign()
395 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); in kvm_irqfd_assign()
401 if (irqfd->eventfd != tmp->eventfd) in kvm_irqfd_assign()
410 irqfd_update(kvm, irqfd); in kvm_irqfd_assign()
413 list_add_tail(&irqfd->list, &kvm->irqfds.items); in kvm_irqfd_assign()
421 events = f.file->f_op->poll(f.file, &irqfd->pt); in kvm_irqfd_assign()
424 schedule_work(&irqfd->inject); in kvm_irqfd_assign()
435 if (irqfd->resampler) in kvm_irqfd_assign()
436 irqfd_resampler_shutdown(irqfd); in kvm_irqfd_assign()
447 kfree(irqfd); in kvm_irqfd_assign()
528 struct _irqfd *irqfd, *tmp; in kvm_irqfd_deassign() local
537 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { in kvm_irqfd_deassign()
538 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { in kvm_irqfd_deassign()
545 write_seqcount_begin(&irqfd->irq_entry_sc); in kvm_irqfd_deassign()
546 irqfd->irq_entry.type = 0; in kvm_irqfd_deassign()
547 write_seqcount_end(&irqfd->irq_entry_sc); in kvm_irqfd_deassign()
548 irqfd_deactivate(irqfd); in kvm_irqfd_deassign()
584 struct _irqfd *irqfd, *tmp; in kvm_irqfd_release() local
588 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) in kvm_irqfd_release()
589 irqfd_deactivate(irqfd); in kvm_irqfd_release()
607 struct _irqfd *irqfd; in kvm_irq_routing_update() local
611 list_for_each_entry(irqfd, &kvm->irqfds.items, list) in kvm_irq_routing_update()
612 irqfd_update(kvm, irqfd); in kvm_irq_routing_update()