Lines Matching refs:vgic
117 static const struct vgic_params *vgic; variable
121 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source); in add_sgi_source()
126 return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq); in queue_sgi()
131 return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic); in kvm_vgic_map_resources()
276 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_edge()
285 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_enabled()
292 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_queued()
299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_active()
306 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_set_queued()
313 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_clear_queued()
320 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_set_active()
327 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_clear_active()
334 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_get_level()
341 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_set_level()
348 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_level()
355 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_soft_pend()
362 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_soft_pend()
374 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_is_pending()
381 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_set_pending()
388 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_pending()
492 reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset); in vgic_handle_enable_reg()
514 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_handle_set_pending_reg()
551 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_handle_clear_pending_reg()
585 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_handle_set_active_reg()
604 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_handle_clear_active_reg()
821 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_handle_mmio_access()
940 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in compute_active_for_cpu()
970 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in compute_pending_for_cpu()
1009 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_update_state()
1101 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in dist_active_irq()
1135 for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) { in vgic_retire_disabled_irqs()
1188 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_queue_irq()
1202 for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) { in vgic_queue_irq()
1212 lr = find_first_bit(elrsr_ptr, vgic->nr_lr); in vgic_queue_irq()
1213 if (lr >= vgic->nr_lr) in vgic_queue_irq()
1252 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in __kvm_vgic_flush_hwstate()
1362 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_process_maintenance()
1377 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { in vgic_process_maintenance()
1419 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_sync_hwirq()
1437 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in __kvm_vgic_sync_hwstate()
1446 for (lr = 0; lr < vgic->nr_lr; lr++) { in __kvm_vgic_sync_hwstate()
1456 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); in __kvm_vgic_sync_hwstate()
1457 if (level_pending || pending < vgic->nr_lr) in __kvm_vgic_sync_hwstate()
1463 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_flush_hwstate()
1483 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_vcpu_pending_irq()
1528 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_update_irq_pending()
1536 if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020)) in vgic_update_irq_pending()
1624 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) in vgic_lazy_init()
1710 return &vcpu->kvm->arch.vgic.irq_phys_map_list; in vgic_get_irq_phys_map_list()
1729 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_map_phys_irq()
1823 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_unmap_phys_irq()
1849 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_destroy_irq_phys_map()
1896 vgic_cpu->nr_lr = vgic->nr_lr; in vgic_vcpu_init_maps()
1920 return vgic->max_gic_vcpus; in kvm_vgic_get_max_vcpus()
1925 struct vgic_dist *dist = &kvm->arch.vgic; in kvm_vgic_destroy()
1964 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_init()
2021 ret = kvm->arch.vgic.vm_ops.init_model(kvm); in vgic_init()
2090 spin_lock_init(&kvm->arch.vgic.lock); in kvm_vgic_early_init()
2091 spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock); in kvm_vgic_early_init()
2092 INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list); in kvm_vgic_early_init()
2113 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) { in kvm_vgic_create()
2140 kvm->arch.vgic.in_kernel = true; in kvm_vgic_create()
2141 kvm->arch.vgic.vgic_model = type; in kvm_vgic_create()
2142 kvm->arch.vgic.vctrl_base = vgic->vctrl_base; in kvm_vgic_create()
2143 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; in kvm_vgic_create()
2144 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; in kvm_vgic_create()
2145 kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF; in kvm_vgic_create()
2160 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; in vgic_ioaddr_overlap()
2161 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; in vgic_ioaddr_overlap()
2211 struct vgic_dist *vgic = &kvm->arch.vgic; in kvm_vgic_addr() local
2220 addr_ptr = &vgic->vgic_dist_base; in kvm_vgic_addr()
2226 addr_ptr = &vgic->vgic_cpu_base; in kvm_vgic_addr()
2233 addr_ptr = &vgic->vgic_dist_base; in kvm_vgic_addr()
2239 addr_ptr = &vgic->vgic_redist_base; in kvm_vgic_addr()
2249 if (vgic->vgic_model != type_needed) { in kvm_vgic_addr()
2306 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) in vgic_set_common_attr()
2309 dev->kvm->arch.vgic.nr_irqs = val; in vgic_set_common_attr()
2349 r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr); in vgic_get_common_attr()
2368 enable_percpu_irq(vgic->maint_irq, 0); in vgic_init_maintenance_interrupt()
2381 disable_percpu_irq(vgic->maint_irq); in vgic_cpu_notify()
2416 ret = vgic_probe(vgic_node, &vgic_ops, &vgic); in kvm_vgic_hyp_init()
2420 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler, in kvm_vgic_hyp_init()
2423 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq); in kvm_vgic_hyp_init()
2438 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); in kvm_vgic_hyp_init()