Lines Matching refs:vcpu
83 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
84 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
85 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
86 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
91 static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) in add_sgi_source() argument
93 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source); in add_sgi_source()
96 static bool queue_sgi(struct kvm_vcpu *vcpu, int irq) in queue_sgi() argument
98 return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq); in queue_sgi()
246 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_edge() argument
248 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_edge()
251 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq); in vgic_irq_is_edge()
255 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_enabled() argument
257 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_enabled()
259 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); in vgic_irq_is_enabled()
262 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_queued() argument
264 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_queued()
266 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); in vgic_irq_is_queued()
269 static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_active() argument
271 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_active()
273 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); in vgic_irq_is_active()
276 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) in vgic_irq_set_queued() argument
278 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_set_queued()
280 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1); in vgic_irq_set_queued()
283 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq) in vgic_irq_clear_queued() argument
285 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_clear_queued()
287 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); in vgic_irq_clear_queued()
290 static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) in vgic_irq_set_active() argument
292 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_set_active()
294 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); in vgic_irq_set_active()
297 static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) in vgic_irq_clear_active() argument
299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_clear_active()
301 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); in vgic_irq_clear_active()
304 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_get_level() argument
306 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_get_level()
308 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq); in vgic_dist_irq_get_level()
311 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_set_level() argument
313 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_set_level()
315 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1); in vgic_dist_irq_set_level()
318 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_clear_level() argument
320 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_level()
322 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0); in vgic_dist_irq_clear_level()
325 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_soft_pend() argument
327 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_soft_pend()
329 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq); in vgic_dist_irq_soft_pend()
332 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_clear_soft_pend() argument
334 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_soft_pend()
336 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0); in vgic_dist_irq_clear_soft_pend()
339 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_is_pending() argument
341 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_is_pending()
343 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq); in vgic_dist_irq_is_pending()
346 void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_set_pending() argument
348 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_set_pending()
350 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1); in vgic_dist_irq_set_pending()
353 void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) in vgic_dist_irq_clear_pending() argument
355 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_dist_irq_clear_pending()
357 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0); in vgic_dist_irq_clear_pending()
360 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) in vgic_cpu_irq_set() argument
363 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); in vgic_cpu_irq_set()
366 vcpu->arch.vgic_cpu.pending_shared); in vgic_cpu_irq_set()
369 void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) in vgic_cpu_irq_clear() argument
372 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); in vgic_cpu_irq_clear()
375 vcpu->arch.vgic_cpu.pending_shared); in vgic_cpu_irq_clear()
378 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq) in vgic_can_sample_irq() argument
380 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq); in vgic_can_sample_irq()
444 bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, in handle_mmio_raz_wi() argument
660 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) in vgic_unqueue_irqs() argument
662 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_unqueue_irqs()
666 struct vgic_lr lr = vgic_get_lr(vcpu, i); in vgic_unqueue_irqs()
679 add_sgi_source(vcpu, lr.irq, lr.source); in vgic_unqueue_irqs()
687 vgic_irq_set_active(vcpu, lr.irq); in vgic_unqueue_irqs()
698 vgic_dist_irq_set_pending(vcpu, lr.irq); in vgic_unqueue_irqs()
702 vgic_set_lr(vcpu, i, lr); in vgic_unqueue_irqs()
708 vgic_retire_lr(i, lr.irq, vcpu); in vgic_unqueue_irqs()
709 vgic_irq_clear_queued(vcpu, lr.irq); in vgic_unqueue_irqs()
712 vgic_update_state(vcpu->kvm); in vgic_unqueue_irqs()
756 static bool call_range_handler(struct kvm_vcpu *vcpu, in call_range_handler() argument
765 return range->handle_mmio(vcpu, mmio, offset); in call_range_handler()
778 ret = range->handle_mmio(vcpu, &mmio32, offset + 4); in call_range_handler()
782 ret |= range->handle_mmio(vcpu, &mmio32, offset); in call_range_handler()
799 static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, in vgic_handle_mmio_access() argument
803 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_handle_mmio_access()
806 struct kvm_run *run = vcpu->run; in vgic_handle_mmio_access()
828 updated_state = call_range_handler(vcpu, &mmio, offset, range); in vgic_handle_mmio_access()
840 kvm_handle_mmio_return(vcpu, run); in vgic_handle_mmio_access()
843 vgic_kick_vcpus(vcpu->kvm); in vgic_handle_mmio_access()
848 static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu, in vgic_handle_mmio_read() argument
852 return vgic_handle_mmio_access(vcpu, this, addr, len, val, false); in vgic_handle_mmio_read()
855 static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu, in vgic_handle_mmio_write() argument
859 return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val, in vgic_handle_mmio_write()
889 struct kvm_vcpu *vcpu = NULL; in vgic_register_kvm_io_dev() local
893 vcpu = kvm_get_vcpu(kvm, redist_vcpu_id); in vgic_register_kvm_io_dev()
898 iodev->redist_vcpu = vcpu; in vgic_register_kvm_io_dev()
920 static int compute_active_for_cpu(struct kvm_vcpu *vcpu) in compute_active_for_cpu() argument
922 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in compute_active_for_cpu()
928 vcpu_id = vcpu->vcpu_id; in compute_active_for_cpu()
929 act_percpu = vcpu->arch.vgic_cpu.active_percpu; in compute_active_for_cpu()
930 act_shared = vcpu->arch.vgic_cpu.active_shared; in compute_active_for_cpu()
950 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) in compute_pending_for_cpu() argument
952 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in compute_pending_for_cpu()
958 vcpu_id = vcpu->vcpu_id; in compute_pending_for_cpu()
959 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; in compute_pending_for_cpu()
960 pend_shared = vcpu->arch.vgic_cpu.pending_shared; in compute_pending_for_cpu()
986 struct kvm_vcpu *vcpu; in vgic_update_state() local
994 kvm_for_each_vcpu(c, vcpu, kvm) { in vgic_update_state()
995 if (compute_pending_for_cpu(vcpu)) in vgic_update_state()
998 if (compute_active_for_cpu(vcpu)) in vgic_update_state()
1005 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr) in vgic_get_lr() argument
1007 return vgic_ops->get_lr(vcpu, lr); in vgic_get_lr()
1010 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, in vgic_set_lr() argument
1013 vgic_ops->set_lr(vcpu, lr, vlr); in vgic_set_lr()
1016 static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, in vgic_sync_lr_elrsr() argument
1019 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr); in vgic_sync_lr_elrsr()
1022 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu) in vgic_get_elrsr() argument
1024 return vgic_ops->get_elrsr(vcpu); in vgic_get_elrsr()
1027 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) in vgic_get_eisr() argument
1029 return vgic_ops->get_eisr(vcpu); in vgic_get_eisr()
1032 static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu) in vgic_clear_eisr() argument
1034 vgic_ops->clear_eisr(vcpu); in vgic_clear_eisr()
1037 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) in vgic_get_interrupt_status() argument
1039 return vgic_ops->get_interrupt_status(vcpu); in vgic_get_interrupt_status()
1042 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu) in vgic_enable_underflow() argument
1044 vgic_ops->enable_underflow(vcpu); in vgic_enable_underflow()
1047 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu) in vgic_disable_underflow() argument
1049 vgic_ops->disable_underflow(vcpu); in vgic_disable_underflow()
1052 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) in vgic_get_vmcr() argument
1054 vgic_ops->get_vmcr(vcpu, vmcr); in vgic_get_vmcr()
1057 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) in vgic_set_vmcr() argument
1059 vgic_ops->set_vmcr(vcpu, vmcr); in vgic_set_vmcr()
1062 static inline void vgic_enable(struct kvm_vcpu *vcpu) in vgic_enable() argument
1064 vgic_ops->enable(vcpu); in vgic_enable()
1067 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) in vgic_retire_lr() argument
1069 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_retire_lr()
1070 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); in vgic_retire_lr()
1073 vgic_set_lr(vcpu, lr_nr, vlr); in vgic_retire_lr()
1076 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); in vgic_retire_lr()
1088 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) in vgic_retire_disabled_irqs() argument
1090 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_retire_disabled_irqs()
1094 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); in vgic_retire_disabled_irqs()
1096 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { in vgic_retire_disabled_irqs()
1097 vgic_retire_lr(lr, vlr.irq, vcpu); in vgic_retire_disabled_irqs()
1098 if (vgic_irq_is_queued(vcpu, vlr.irq)) in vgic_retire_disabled_irqs()
1099 vgic_irq_clear_queued(vcpu, vlr.irq); in vgic_retire_disabled_irqs()
1104 static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, in vgic_queue_irq_to_lr() argument
1107 if (vgic_irq_is_active(vcpu, irq)) { in vgic_queue_irq_to_lr()
1110 vgic_irq_clear_active(vcpu, irq); in vgic_queue_irq_to_lr()
1111 vgic_update_state(vcpu->kvm); in vgic_queue_irq_to_lr()
1112 } else if (vgic_dist_irq_is_pending(vcpu, irq)) { in vgic_queue_irq_to_lr()
1117 if (!vgic_irq_is_edge(vcpu, irq)) in vgic_queue_irq_to_lr()
1120 vgic_set_lr(vcpu, lr_nr, vlr); in vgic_queue_irq_to_lr()
1121 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); in vgic_queue_irq_to_lr()
1129 bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) in vgic_queue_irq() argument
1131 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_queue_irq()
1132 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_queue_irq()
1147 vlr = vgic_get_lr(vcpu, lr); in vgic_queue_irq()
1151 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); in vgic_queue_irq()
1169 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); in vgic_queue_irq()
1174 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) in vgic_queue_hwirq() argument
1176 if (!vgic_can_sample_irq(vcpu, irq)) in vgic_queue_hwirq()
1179 if (vgic_queue_irq(vcpu, 0, irq)) { in vgic_queue_hwirq()
1180 if (vgic_irq_is_edge(vcpu, irq)) { in vgic_queue_hwirq()
1181 vgic_dist_irq_clear_pending(vcpu, irq); in vgic_queue_hwirq()
1182 vgic_cpu_irq_clear(vcpu, irq); in vgic_queue_hwirq()
1184 vgic_irq_set_queued(vcpu, irq); in vgic_queue_hwirq()
1197 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) in __kvm_vgic_flush_hwstate() argument
1199 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in __kvm_vgic_flush_hwstate()
1200 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in __kvm_vgic_flush_hwstate()
1206 vcpu_id = vcpu->vcpu_id; in __kvm_vgic_flush_hwstate()
1208 pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu; in __kvm_vgic_flush_hwstate()
1209 pa_shared = vcpu->arch.vgic_cpu.pend_act_shared; in __kvm_vgic_flush_hwstate()
1220 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) in __kvm_vgic_flush_hwstate()
1225 if (!queue_sgi(vcpu, i)) in __kvm_vgic_flush_hwstate()
1231 if (!vgic_queue_hwirq(vcpu, i)) in __kvm_vgic_flush_hwstate()
1237 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) in __kvm_vgic_flush_hwstate()
1246 vgic_enable_underflow(vcpu); in __kvm_vgic_flush_hwstate()
1248 vgic_disable_underflow(vcpu); in __kvm_vgic_flush_hwstate()
1259 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) in vgic_process_maintenance() argument
1261 u32 status = vgic_get_interrupt_status(vcpu); in vgic_process_maintenance()
1262 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_process_maintenance()
1264 struct kvm *kvm = vcpu->kvm; in vgic_process_maintenance()
1273 u64 eisr = vgic_get_eisr(vcpu); in vgic_process_maintenance()
1278 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); in vgic_process_maintenance()
1279 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); in vgic_process_maintenance()
1282 vgic_irq_clear_queued(vcpu, vlr.irq); in vgic_process_maintenance()
1285 vgic_set_lr(vcpu, lr, vlr); in vgic_process_maintenance()
1298 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); in vgic_process_maintenance()
1312 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { in vgic_process_maintenance()
1313 vgic_cpu_irq_set(vcpu, vlr.irq); in vgic_process_maintenance()
1316 vgic_dist_irq_clear_pending(vcpu, vlr.irq); in vgic_process_maintenance()
1317 vgic_cpu_irq_clear(vcpu, vlr.irq); in vgic_process_maintenance()
1326 vgic_sync_lr_elrsr(vcpu, lr, vlr); in vgic_process_maintenance()
1331 vgic_disable_underflow(vcpu); in vgic_process_maintenance()
1339 vgic_clear_eisr(vcpu); in vgic_process_maintenance()
1345 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) in __kvm_vgic_sync_hwstate() argument
1347 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in __kvm_vgic_sync_hwstate()
1348 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in __kvm_vgic_sync_hwstate()
1354 level_pending = vgic_process_maintenance(vcpu); in __kvm_vgic_sync_hwstate()
1355 elrsr = vgic_get_elrsr(vcpu); in __kvm_vgic_sync_hwstate()
1365 vlr = vgic_get_lr(vcpu, lr); in __kvm_vgic_sync_hwstate()
1374 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); in __kvm_vgic_sync_hwstate()
1377 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) in kvm_vgic_flush_hwstate() argument
1379 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_flush_hwstate()
1381 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vgic_flush_hwstate()
1385 __kvm_vgic_flush_hwstate(vcpu); in kvm_vgic_flush_hwstate()
1389 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) in kvm_vgic_sync_hwstate() argument
1391 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vgic_sync_hwstate()
1394 __kvm_vgic_sync_hwstate(vcpu); in kvm_vgic_sync_hwstate()
1397 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) in kvm_vgic_vcpu_pending_irq() argument
1399 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_vcpu_pending_irq()
1401 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vgic_vcpu_pending_irq()
1404 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); in kvm_vgic_vcpu_pending_irq()
1407 int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu) in kvm_vgic_vcpu_active_irq() argument
1409 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in kvm_vgic_vcpu_active_irq()
1411 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vgic_vcpu_active_irq()
1414 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); in kvm_vgic_vcpu_active_irq()
1420 struct kvm_vcpu *vcpu; in vgic_kick_vcpus() local
1427 kvm_for_each_vcpu(c, vcpu, kvm) { in vgic_kick_vcpus()
1428 if (kvm_vgic_vcpu_pending_irq(vcpu)) in vgic_kick_vcpus()
1429 kvm_vcpu_kick(vcpu); in vgic_kick_vcpus()
1433 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) in vgic_validate_injection() argument
1435 int edge_triggered = vgic_irq_is_edge(vcpu, irq); in vgic_validate_injection()
1443 int state = vgic_dist_irq_is_pending(vcpu, irq); in vgic_validate_injection()
1446 int state = vgic_dist_irq_get_level(vcpu, irq); in vgic_validate_injection()
1455 struct kvm_vcpu *vcpu; in vgic_update_irq_pending() local
1462 vcpu = kvm_get_vcpu(kvm, cpuid); in vgic_update_irq_pending()
1463 edge_triggered = vgic_irq_is_edge(vcpu, irq_num); in vgic_update_irq_pending()
1466 if (!vgic_validate_injection(vcpu, irq_num, level)) { in vgic_update_irq_pending()
1478 vcpu = kvm_get_vcpu(kvm, cpuid); in vgic_update_irq_pending()
1485 vgic_dist_irq_set_level(vcpu, irq_num); in vgic_update_irq_pending()
1486 vgic_dist_irq_set_pending(vcpu, irq_num); in vgic_update_irq_pending()
1489 vgic_dist_irq_clear_level(vcpu, irq_num); in vgic_update_irq_pending()
1490 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) in vgic_update_irq_pending()
1491 vgic_dist_irq_clear_pending(vcpu, irq_num); in vgic_update_irq_pending()
1498 enabled = vgic_irq_is_enabled(vcpu, irq_num); in vgic_update_irq_pending()
1505 if (!vgic_can_sample_irq(vcpu, irq_num)) { in vgic_update_irq_pending()
1515 vgic_cpu_irq_set(vcpu, irq_num); in vgic_update_irq_pending()
1588 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_vgic_vcpu_destroy() argument
1590 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in kvm_vgic_vcpu_destroy()
1602 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) in vgic_vcpu_init_maps() argument
1604 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_vcpu_init_maps()
1616 kvm_vgic_vcpu_destroy(vcpu); in vgic_vcpu_init_maps()
1646 struct kvm_vcpu *vcpu; in kvm_vgic_destroy() local
1649 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_vgic_destroy()
1650 kvm_vgic_vcpu_destroy(vcpu); in kvm_vgic_destroy()
1684 struct kvm_vcpu *vcpu; in vgic_init() local
1744 kvm_for_each_vcpu(vcpu_id, vcpu, kvm) { in vgic_init()
1745 ret = vgic_vcpu_init_maps(vcpu, nr_irqs); in vgic_init()
1754 vcpu->vcpu_id, i, 1); in vgic_init()
1757 vcpu->vcpu_id, i, in vgic_init()
1761 vgic_enable(vcpu); in vgic_init()
1795 struct kvm_vcpu *vcpu; in kvm_vgic_create() local
1821 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vgic_create()
1822 if (!mutex_trylock(&vcpu->mutex)) in kvm_vgic_create()
1827 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vgic_create()
1828 if (vcpu->arch.has_run_once) in kvm_vgic_create()
1847 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); in kvm_vgic_create()
1848 mutex_unlock(&vcpu->mutex); in kvm_vgic_create()