Lines Matching refs:vcpu
107 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
108 static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
109 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
110 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
111 static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
112 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
114 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
119 static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) in add_sgi_source() argument
121 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source); in add_sgi_source()
124 static bool queue_sgi(struct kvm_vcpu *vcpu, int irq) in queue_sgi() argument
126 return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq); in queue_sgi()
274 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_edge() argument
276 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_edge()
279 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq); in vgic_irq_is_edge()
283 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_enabled() argument
285 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_enabled()
287 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); in vgic_irq_is_enabled()
290 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_queued() argument
292 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_queued()
294 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); in vgic_irq_is_queued()
297 static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_active() argument
299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_active()
301 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); in vgic_irq_is_active()
304 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) in vgic_irq_set_queued() argument
306 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_set_queued()
308 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1); in vgic_irq_set_queued()
311 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq) in vgic_irq_clear_queued() argument
313 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_clear_queued()
315 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); in vgic_irq_clear_queued()
318 static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) in vgic_irq_set_active() argument
320 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_set_active()
322 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); in vgic_irq_set_active()
325 static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) in vgic_irq_clear_active() argument
327 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_clear_active()
329 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); in vgic_irq_clear_active()
332 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_get_level() argument
334 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_get_level()
336 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq); in vgic_dist_irq_get_level()
339 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_set_level() argument
341 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_set_level()
343 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1); in vgic_dist_irq_set_level()
346 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_clear_level() argument
348 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_level()
350 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0); in vgic_dist_irq_clear_level()
353 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_soft_pend() argument
355 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_soft_pend()
357 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq); in vgic_dist_irq_soft_pend()
360 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_clear_soft_pend() argument
362 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_soft_pend()
364 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0); in vgic_dist_irq_clear_soft_pend()
365 if (!vgic_dist_irq_get_level(vcpu, irq)) { in vgic_dist_irq_clear_soft_pend()
366 vgic_dist_irq_clear_pending(vcpu, irq); in vgic_dist_irq_clear_soft_pend()
367 if (!compute_pending_for_cpu(vcpu)) in vgic_dist_irq_clear_soft_pend()
368 clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); in vgic_dist_irq_clear_soft_pend()
372 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_is_pending() argument
374 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_is_pending()
376 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq); in vgic_dist_irq_is_pending()
379 void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_set_pending() argument
381 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_set_pending()
383 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1); in vgic_dist_irq_set_pending()
386 void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_clear_pending() argument
388 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_pending()
390 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0); in vgic_dist_irq_clear_pending()
393 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) in vgic_cpu_irq_set() argument
396 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); in vgic_cpu_irq_set()
399 vcpu->arch.vgic_cpu.pending_shared); in vgic_cpu_irq_set()
402 void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) in vgic_cpu_irq_clear() argument
405 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); in vgic_cpu_irq_clear()
408 vcpu->arch.vgic_cpu.pending_shared); in vgic_cpu_irq_clear()
411 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq) in vgic_can_sample_irq() argument
413 return !vgic_irq_is_queued(vcpu, irq); in vgic_can_sample_irq()
477 bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, in handle_mmio_raz_wi() argument
692 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) in vgic_unqueue_irqs() argument
694 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_unqueue_irqs()
695 u64 elrsr = vgic_get_elrsr(vcpu); in vgic_unqueue_irqs()
700 struct vgic_lr lr = vgic_get_lr(vcpu, i); in vgic_unqueue_irqs()
713 add_sgi_source(vcpu, lr.irq, lr.source); in vgic_unqueue_irqs()
721 vgic_irq_set_active(vcpu, lr.irq); in vgic_unqueue_irqs()
727 vgic_retire_lr(i, vcpu); in vgic_unqueue_irqs()
730 vgic_update_state(vcpu->kvm); in vgic_unqueue_irqs()
774 static bool call_range_handler(struct kvm_vcpu *vcpu, in call_range_handler() argument
783 return range->handle_mmio(vcpu, mmio, offset); in call_range_handler()
796 ret = range->handle_mmio(vcpu, &mmio32, offset + 4); in call_range_handler()
800 ret |= range->handle_mmio(vcpu, &mmio32, offset); in call_range_handler()
817 static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, in vgic_handle_mmio_access() argument
821 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_handle_mmio_access()
824 struct kvm_run *run = vcpu->run; in vgic_handle_mmio_access()
846 updated_state = call_range_handler(vcpu, &mmio, offset, range); in vgic_handle_mmio_access()
858 kvm_handle_mmio_return(vcpu, run); in vgic_handle_mmio_access()
861 vgic_kick_vcpus(vcpu->kvm); in vgic_handle_mmio_access()
866 static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu, in vgic_handle_mmio_read() argument
870 return vgic_handle_mmio_access(vcpu, this, addr, len, val, false); in vgic_handle_mmio_read()
873 static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu, in vgic_handle_mmio_write() argument
877 return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val, in vgic_handle_mmio_write()
907 struct kvm_vcpu *vcpu = NULL; in vgic_register_kvm_io_dev() local
911 vcpu = kvm_get_vcpu(kvm, redist_vcpu_id); in vgic_register_kvm_io_dev()
916 iodev->redist_vcpu = vcpu; in vgic_register_kvm_io_dev()
938 static int compute_active_for_cpu(struct kvm_vcpu *vcpu) in compute_active_for_cpu() argument
940 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in compute_active_for_cpu()
946 vcpu_id = vcpu->vcpu_id; in compute_active_for_cpu()
947 act_percpu = vcpu->arch.vgic_cpu.active_percpu; in compute_active_for_cpu()
948 act_shared = vcpu->arch.vgic_cpu.active_shared; in compute_active_for_cpu()
968 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) in compute_pending_for_cpu() argument
970 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in compute_pending_for_cpu()
976 vcpu_id = vcpu->vcpu_id; in compute_pending_for_cpu()
977 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; in compute_pending_for_cpu()
978 pend_shared = vcpu->arch.vgic_cpu.pending_shared; in compute_pending_for_cpu()
1010 struct kvm_vcpu *vcpu; in vgic_update_state() local
1013 kvm_for_each_vcpu(c, vcpu, kvm) { in vgic_update_state()
1014 if (compute_pending_for_cpu(vcpu)) in vgic_update_state()
1017 if (compute_active_for_cpu(vcpu)) in vgic_update_state()
1024 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr) in vgic_get_lr() argument
1026 return vgic_ops->get_lr(vcpu, lr); in vgic_get_lr()
1029 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, in vgic_set_lr() argument
1032 vgic_ops->set_lr(vcpu, lr, vlr); in vgic_set_lr()
1035 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu) in vgic_get_elrsr() argument
1037 return vgic_ops->get_elrsr(vcpu); in vgic_get_elrsr()
1040 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) in vgic_get_eisr() argument
1042 return vgic_ops->get_eisr(vcpu); in vgic_get_eisr()
1045 static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu) in vgic_clear_eisr() argument
1047 vgic_ops->clear_eisr(vcpu); in vgic_clear_eisr()
1050 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) in vgic_get_interrupt_status() argument
1052 return vgic_ops->get_interrupt_status(vcpu); in vgic_get_interrupt_status()
1055 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu) in vgic_enable_underflow() argument
1057 vgic_ops->enable_underflow(vcpu); in vgic_enable_underflow()
1060 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu) in vgic_disable_underflow() argument
1062 vgic_ops->disable_underflow(vcpu); in vgic_disable_underflow()
1065 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) in vgic_get_vmcr() argument
1067 vgic_ops->get_vmcr(vcpu, vmcr); in vgic_get_vmcr()
1070 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) in vgic_set_vmcr() argument
1072 vgic_ops->set_vmcr(vcpu, vmcr); in vgic_set_vmcr()
1075 static inline void vgic_enable(struct kvm_vcpu *vcpu) in vgic_enable() argument
1077 vgic_ops->enable(vcpu); in vgic_enable()
1080 static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu) in vgic_retire_lr() argument
1082 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); in vgic_retire_lr()
1084 vgic_irq_clear_queued(vcpu, vlr.irq); in vgic_retire_lr()
1091 vgic_dist_irq_set_pending(vcpu, vlr.irq); in vgic_retire_lr()
1096 vgic_set_lr(vcpu, lr_nr, vlr); in vgic_retire_lr()
1099 static bool dist_active_irq(struct kvm_vcpu *vcpu) in dist_active_irq() argument
1101 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in dist_active_irq()
1103 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); in dist_active_irq()
1106 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) in kvm_vgic_map_is_active() argument
1110 for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { in kvm_vgic_map_is_active()
1111 struct vgic_lr vlr = vgic_get_lr(vcpu, i); in kvm_vgic_map_is_active()
1117 return vgic_irq_is_active(vcpu, map->virt_irq); in kvm_vgic_map_is_active()
1129 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) in vgic_retire_disabled_irqs() argument
1131 u64 elrsr = vgic_get_elrsr(vcpu); in vgic_retire_disabled_irqs()
1136 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); in vgic_retire_disabled_irqs()
1138 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) in vgic_retire_disabled_irqs()
1139 vgic_retire_lr(lr, vcpu); in vgic_retire_disabled_irqs()
1143 static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, in vgic_queue_irq_to_lr() argument
1146 if (vgic_irq_is_active(vcpu, irq)) { in vgic_queue_irq_to_lr()
1149 vgic_irq_clear_active(vcpu, irq); in vgic_queue_irq_to_lr()
1150 vgic_update_state(vcpu->kvm); in vgic_queue_irq_to_lr()
1152 WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq)); in vgic_queue_irq_to_lr()
1157 if (!vgic_irq_is_edge(vcpu, irq)) in vgic_queue_irq_to_lr()
1162 map = vgic_irq_map_search(vcpu, irq); in vgic_queue_irq_to_lr()
1174 vgic_irq_set_queued(vcpu, irq); in vgic_queue_irq_to_lr()
1178 vgic_set_lr(vcpu, lr_nr, vlr); in vgic_queue_irq_to_lr()
1186 bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) in vgic_queue_irq() argument
1188 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_queue_irq()
1189 u64 elrsr = vgic_get_elrsr(vcpu); in vgic_queue_irq()
1203 vlr = vgic_get_lr(vcpu, lr); in vgic_queue_irq()
1206 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); in vgic_queue_irq()
1221 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); in vgic_queue_irq()
1226 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) in vgic_queue_hwirq() argument
1228 if (!vgic_can_sample_irq(vcpu, irq)) in vgic_queue_hwirq()
1231 if (vgic_queue_irq(vcpu, 0, irq)) { in vgic_queue_hwirq()
1232 if (vgic_irq_is_edge(vcpu, irq)) { in vgic_queue_hwirq()
1233 vgic_dist_irq_clear_pending(vcpu, irq); in vgic_queue_hwirq()
1234 vgic_cpu_irq_clear(vcpu, irq); in vgic_queue_hwirq()
1236 vgic_irq_set_queued(vcpu, irq); in vgic_queue_hwirq()
1249 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) in __kvm_vgic_flush_hwstate() argument
1251 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in __kvm_vgic_flush_hwstate()
1252 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in __kvm_vgic_flush_hwstate()
1258 vcpu_id = vcpu->vcpu_id; in __kvm_vgic_flush_hwstate()
1260 pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu; in __kvm_vgic_flush_hwstate()
1261 pa_shared = vcpu->arch.vgic_cpu.pend_act_shared; in __kvm_vgic_flush_hwstate()
1272 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu)) in __kvm_vgic_flush_hwstate()
1277 if (!queue_sgi(vcpu, i)) in __kvm_vgic_flush_hwstate()
1283 if (!vgic_queue_hwirq(vcpu, i)) in __kvm_vgic_flush_hwstate()
1289 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) in __kvm_vgic_flush_hwstate()
1298 vgic_enable_underflow(vcpu); in __kvm_vgic_flush_hwstate()
1300 vgic_disable_underflow(vcpu); in __kvm_vgic_flush_hwstate()
1311 static int process_queued_irq(struct kvm_vcpu *vcpu, in process_queued_irq() argument
1327 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); in process_queued_irq()
1332 vgic_irq_clear_queued(vcpu, vlr.irq); in process_queued_irq()
1335 if (vgic_irq_is_edge(vcpu, vlr.irq)) { in process_queued_irq()
1337 pending = vgic_dist_irq_is_pending(vcpu, vlr.irq); in process_queued_irq()
1339 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { in process_queued_irq()
1340 vgic_cpu_irq_set(vcpu, vlr.irq); in process_queued_irq()
1343 vgic_dist_irq_clear_pending(vcpu, vlr.irq); in process_queued_irq()
1344 vgic_cpu_irq_clear(vcpu, vlr.irq); in process_queued_irq()
1354 vgic_set_lr(vcpu, lr, vlr); in process_queued_irq()
1359 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) in vgic_process_maintenance() argument
1361 u32 status = vgic_get_interrupt_status(vcpu); in vgic_process_maintenance()
1362 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_process_maintenance()
1363 struct kvm *kvm = vcpu->kvm; in vgic_process_maintenance()
1373 u64 eisr = vgic_get_eisr(vcpu); in vgic_process_maintenance()
1378 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); in vgic_process_maintenance()
1380 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); in vgic_process_maintenance()
1393 level_pending |= process_queued_irq(vcpu, lr, vlr); in vgic_process_maintenance()
1399 vgic_disable_underflow(vcpu); in vgic_process_maintenance()
1407 vgic_clear_eisr(vcpu); in vgic_process_maintenance()
1417 static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) in vgic_sync_hwirq() argument
1419 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_sync_hwirq()
1429 level_pending = process_queued_irq(vcpu, lr, vlr); in vgic_sync_hwirq()
1435 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) in __kvm_vgic_sync_hwstate() argument
1437 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in __kvm_vgic_sync_hwstate()
1443 level_pending = vgic_process_maintenance(vcpu); in __kvm_vgic_sync_hwstate()
1447 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); in __kvm_vgic_sync_hwstate()
1449 level_pending |= vgic_sync_hwirq(vcpu, lr, vlr); in __kvm_vgic_sync_hwstate()
1454 elrsr = vgic_get_elrsr(vcpu); in __kvm_vgic_sync_hwstate()
1458 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); in __kvm_vgic_sync_hwstate()
1461 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) in kvm_vgic_flush_hwstate() argument
1463 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_flush_hwstate()
1465 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vgic_flush_hwstate()
1469 __kvm_vgic_flush_hwstate(vcpu); in kvm_vgic_flush_hwstate()
1473 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) in kvm_vgic_sync_hwstate() argument
1475 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vgic_sync_hwstate()
1478 __kvm_vgic_sync_hwstate(vcpu); in kvm_vgic_sync_hwstate()
1481 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) in kvm_vgic_vcpu_pending_irq() argument
1483 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_vcpu_pending_irq()
1485 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vgic_vcpu_pending_irq()
1488 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); in kvm_vgic_vcpu_pending_irq()
1493 struct kvm_vcpu *vcpu; in vgic_kick_vcpus() local
1500 kvm_for_each_vcpu(c, vcpu, kvm) { in vgic_kick_vcpus()
1501 if (kvm_vgic_vcpu_pending_irq(vcpu)) in vgic_kick_vcpus()
1502 kvm_vcpu_kick(vcpu); in vgic_kick_vcpus()
1506 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) in vgic_validate_injection() argument
1508 int edge_triggered = vgic_irq_is_edge(vcpu, irq); in vgic_validate_injection()
1516 int state = vgic_dist_irq_is_pending(vcpu, irq); in vgic_validate_injection()
1519 int state = vgic_dist_irq_get_level(vcpu, irq); in vgic_validate_injection()
1529 struct kvm_vcpu *vcpu; in vgic_update_irq_pending() local
1541 vcpu = kvm_get_vcpu(kvm, cpuid); in vgic_update_irq_pending()
1542 edge_triggered = vgic_irq_is_edge(vcpu, irq_num); in vgic_update_irq_pending()
1545 if (!vgic_validate_injection(vcpu, irq_num, level)) { in vgic_update_irq_pending()
1557 vcpu = kvm_get_vcpu(kvm, cpuid); in vgic_update_irq_pending()
1564 vgic_dist_irq_set_level(vcpu, irq_num); in vgic_update_irq_pending()
1565 vgic_dist_irq_set_pending(vcpu, irq_num); in vgic_update_irq_pending()
1568 vgic_dist_irq_clear_level(vcpu, irq_num); in vgic_update_irq_pending()
1569 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) { in vgic_update_irq_pending()
1570 vgic_dist_irq_clear_pending(vcpu, irq_num); in vgic_update_irq_pending()
1571 vgic_cpu_irq_clear(vcpu, irq_num); in vgic_update_irq_pending()
1572 if (!compute_pending_for_cpu(vcpu)) in vgic_update_irq_pending()
1581 enabled = vgic_irq_is_enabled(vcpu, irq_num); in vgic_update_irq_pending()
1588 if (!vgic_can_sample_irq(vcpu, irq_num)) { in vgic_update_irq_pending()
1598 vgic_cpu_irq_set(vcpu, irq_num); in vgic_update_irq_pending()
1704 static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu, in vgic_get_irq_phys_map_list() argument
1708 return &vcpu->arch.vgic_cpu.irq_phys_map_list; in vgic_get_irq_phys_map_list()
1710 return &vcpu->kvm->arch.vgic.irq_phys_map_list; in vgic_get_irq_phys_map_list()
1726 struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, in kvm_vgic_map_phys_irq() argument
1729 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_map_phys_irq()
1730 struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq); in kvm_vgic_map_phys_irq()
1757 map = vgic_irq_map_search(vcpu, virt_irq); in kvm_vgic_map_phys_irq()
1784 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu, in vgic_irq_map_search() argument
1787 struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq); in vgic_irq_map_search()
1821 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map) in kvm_vgic_unmap_phys_irq() argument
1823 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_unmap_phys_irq()
1830 root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq); in kvm_vgic_unmap_phys_irq()
1862 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_vgic_vcpu_destroy() argument
1864 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in kvm_vgic_vcpu_destroy()
1869 vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list); in kvm_vgic_vcpu_destroy()
1875 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) in vgic_vcpu_init_maps() argument
1877 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_vcpu_init_maps()
1887 kvm_vgic_vcpu_destroy(vcpu); in vgic_vcpu_init_maps()
1906 void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu) in kvm_vgic_vcpu_early_init() argument
1908 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in kvm_vgic_vcpu_early_init()
1926 struct kvm_vcpu *vcpu; in kvm_vgic_destroy() local
1929 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_vgic_destroy()
1930 kvm_vgic_vcpu_destroy(vcpu); in kvm_vgic_destroy()
1965 struct kvm_vcpu *vcpu; in vgic_init() local
2025 kvm_for_each_vcpu(vcpu_id, vcpu, kvm) { in vgic_init()
2026 ret = vgic_vcpu_init_maps(vcpu, nr_irqs); in vgic_init()
2040 vcpu->vcpu_id, i, 1); in vgic_init()
2042 vcpu->vcpu_id, i, in vgic_init()
2047 vcpu->vcpu_id, i, in vgic_init()
2052 vgic_enable(vcpu); in vgic_init()
2098 struct kvm_vcpu *vcpu; in kvm_vgic_create() local
2124 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vgic_create()
2125 if (!mutex_trylock(&vcpu->mutex)) in kvm_vgic_create()
2130 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vgic_create()
2131 if (vcpu->arch.has_run_once) in kvm_vgic_create()
2149 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); in kvm_vgic_create()
2150 mutex_unlock(&vcpu->mutex); in kvm_vgic_create()