Lines Matching refs:kvm
60 struct kvm *kvm; member
77 struct kvm *kvm; member
104 struct kvm *kvm = irqfd->kvm; in irqfd_inject() local
107 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject()
109 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject()
112 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_inject()
125 struct kvm *kvm; in irqfd_resampler_ack() local
130 kvm = resampler->kvm; in irqfd_resampler_ack()
132 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_ack()
135 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_resampler_ack()
140 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_resampler_ack()
147 struct kvm *kvm = resampler->kvm; in irqfd_resampler_shutdown() local
149 mutex_lock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
152 synchronize_srcu(&kvm->irq_srcu); in irqfd_resampler_shutdown()
156 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); in irqfd_resampler_shutdown()
157 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_shutdown()
162 mutex_unlock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
230 struct kvm *kvm = irqfd->kvm; in irqfd_wakeup() local
235 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_wakeup()
242 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, in irqfd_wakeup()
246 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_wakeup()
253 spin_lock_irqsave(&kvm->irqfds.lock, flags); in irqfd_wakeup()
267 spin_unlock_irqrestore(&kvm->irqfds.lock, flags); in irqfd_wakeup()
282 static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd) in irqfd_update() argument
288 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); in irqfd_update()
305 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd_assign() argument
314 if (!kvm_arch_intc_initialized(kvm)) in kvm_irqfd_assign()
321 irqfd->kvm = kvm; in kvm_irqfd_assign()
354 mutex_lock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
357 &kvm->irqfds.resampler_list, link) { in kvm_irqfd_assign()
368 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
372 resampler->kvm = kvm; in kvm_irqfd_assign()
378 list_add(&resampler->link, &kvm->irqfds.resampler_list); in kvm_irqfd_assign()
379 kvm_register_irq_ack_notifier(kvm, in kvm_irqfd_assign()
385 synchronize_srcu(&kvm->irq_srcu); in kvm_irqfd_assign()
387 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
397 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
400 list_for_each_entry(tmp, &kvm->irqfds.items, list) { in kvm_irqfd_assign()
405 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
409 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irqfd_assign()
410 irqfd_update(kvm, irqfd); in kvm_irqfd_assign()
411 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irqfd_assign()
413 list_add_tail(&irqfd->list, &kvm->irqfds.items); in kvm_irqfd_assign()
415 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
451 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_irq_has_notifier() argument
456 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irq_has_notifier()
457 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_irq_has_notifier()
459 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, in kvm_irq_has_notifier()
462 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
466 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
472 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_notify_acked_irq() argument
479 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_notify_acked_irq()
480 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_notify_acked_irq()
482 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, in kvm_notify_acked_irq()
486 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_acked_irq()
489 void kvm_register_irq_ack_notifier(struct kvm *kvm, in kvm_register_irq_ack_notifier() argument
492 mutex_lock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
493 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); in kvm_register_irq_ack_notifier()
494 mutex_unlock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
495 kvm_vcpu_request_scan_ioapic(kvm); in kvm_register_irq_ack_notifier()
498 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, in kvm_unregister_irq_ack_notifier() argument
501 mutex_lock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
503 mutex_unlock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
504 synchronize_srcu(&kvm->irq_srcu); in kvm_unregister_irq_ack_notifier()
505 kvm_vcpu_request_scan_ioapic(kvm); in kvm_unregister_irq_ack_notifier()
510 kvm_eventfd_init(struct kvm *kvm) in kvm_eventfd_init() argument
513 spin_lock_init(&kvm->irqfds.lock); in kvm_eventfd_init()
514 INIT_LIST_HEAD(&kvm->irqfds.items); in kvm_eventfd_init()
515 INIT_LIST_HEAD(&kvm->irqfds.resampler_list); in kvm_eventfd_init()
516 mutex_init(&kvm->irqfds.resampler_lock); in kvm_eventfd_init()
518 INIT_LIST_HEAD(&kvm->ioeventfds); in kvm_eventfd_init()
526 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd_deassign() argument
535 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
537 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { in kvm_irqfd_deassign()
552 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
566 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
572 return kvm_irqfd_deassign(kvm, args); in kvm_irqfd()
574 return kvm_irqfd_assign(kvm, args); in kvm_irqfd()
582 kvm_irqfd_release(struct kvm *kvm) in kvm_irqfd_release() argument
586 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
588 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) in kvm_irqfd_release()
591 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
605 void kvm_irq_routing_update(struct kvm *kvm) in kvm_irq_routing_update() argument
609 spin_lock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
611 list_for_each_entry(irqfd, &kvm->irqfds.items, list) in kvm_irq_routing_update()
612 irqfd_update(kvm, irqfd); in kvm_irq_routing_update()
614 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
749 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) in ioeventfd_check_collision() argument
753 list_for_each_entry(_p, &kvm->ioeventfds, list) in ioeventfd_check_collision()
774 static int kvm_assign_ioeventfd_idx(struct kvm *kvm, in kvm_assign_ioeventfd_idx() argument
805 mutex_lock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
808 if (ioeventfd_check_collision(kvm, p)) { in kvm_assign_ioeventfd_idx()
815 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, in kvm_assign_ioeventfd_idx()
820 kvm->buses[bus_idx]->ioeventfd_count++; in kvm_assign_ioeventfd_idx()
821 list_add_tail(&p->list, &kvm->ioeventfds); in kvm_assign_ioeventfd_idx()
823 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
828 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
838 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_deassign_ioeventfd_idx() argument
849 mutex_lock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
851 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { in kvm_deassign_ioeventfd_idx()
864 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); in kvm_deassign_ioeventfd_idx()
865 kvm->buses[bus_idx]->ioeventfd_count--; in kvm_deassign_ioeventfd_idx()
871 mutex_unlock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
878 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_deassign_ioeventfd() argument
881 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); in kvm_deassign_ioeventfd()
884 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); in kvm_deassign_ioeventfd()
890 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_assign_ioeventfd() argument
922 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); in kvm_assign_ioeventfd()
930 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); in kvm_assign_ioeventfd()
938 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); in kvm_assign_ioeventfd()
944 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_ioeventfd() argument
947 return kvm_deassign_ioeventfd(kvm, args); in kvm_ioeventfd()
949 return kvm_assign_ioeventfd(kvm, args); in kvm_ioeventfd()