H A D | vgic.c | 1030 struct vgic_lr vlr) vgic_set_lr() 1032 vgic_ops->set_lr(vcpu, lr, vlr); vgic_set_lr() 1082 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); vgic_retire_lr() local 1084 vgic_irq_clear_queued(vcpu, vlr.irq); vgic_retire_lr() 1090 if (vlr.state & LR_STATE_PENDING) { vgic_retire_lr() 1091 vgic_dist_irq_set_pending(vcpu, vlr.irq); vgic_retire_lr() 1092 vlr.hwirq = 0; vgic_retire_lr() 1095 vlr.state = 0; vgic_retire_lr() 1096 vgic_set_lr(vcpu, lr_nr, vlr); vgic_retire_lr() 1111 struct vgic_lr vlr = vgic_get_lr(vcpu, i); kvm_vgic_map_is_active() local 1113 if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE) kvm_vgic_map_is_active() 1136 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); vgic_retire_disabled_irqs() local 1138 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) vgic_retire_disabled_irqs() 1144 int lr_nr, struct vgic_lr vlr) vgic_queue_irq_to_lr() 1147 vlr.state |= LR_STATE_ACTIVE; vgic_queue_irq_to_lr() 1148 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); vgic_queue_irq_to_lr() 1153 vlr.state |= LR_STATE_PENDING; vgic_queue_irq_to_lr() 1154 kvm_debug("Set pending: 0x%x\n", vlr.state); vgic_queue_irq_to_lr() 1158 vlr.state |= LR_EOI_INT; vgic_queue_irq_to_lr() 1160 if (vlr.irq >= VGIC_NR_SGIS) { vgic_queue_irq_to_lr() 1165 vlr.hwirq = map->phys_irq; vgic_queue_irq_to_lr() 1166 vlr.state |= LR_HW; vgic_queue_irq_to_lr() 1167 vlr.state &= ~LR_EOI_INT; vgic_queue_irq_to_lr() 1178 vgic_set_lr(vcpu, lr_nr, vlr); vgic_queue_irq_to_lr() 1191 struct vgic_lr vlr; vgic_queue_irq() local 1203 vlr = vgic_get_lr(vcpu, lr); vgic_queue_irq() 1204 if (vlr.irq == irq && vlr.source == sgi_source_id) { vgic_queue_irq() 1205 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); vgic_queue_irq() 1206 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); vgic_queue_irq() 1218 vlr.irq = irq; vgic_queue_irq() 1219 vlr.source = sgi_source_id; vgic_queue_irq() 1220 vlr.state = 0; vgic_queue_irq() 1221 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); vgic_queue_irq() 1312 int lr, struct vgic_lr vlr) process_queued_irq() 1327 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); process_queued_irq() 1332 vgic_irq_clear_queued(vcpu, vlr.irq); process_queued_irq() 1335 if (vgic_irq_is_edge(vcpu, vlr.irq)) { process_queued_irq() 1336 BUG_ON(!(vlr.state & LR_HW)); process_queued_irq() 1337 pending = vgic_dist_irq_is_pending(vcpu, vlr.irq); process_queued_irq() 1339 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { process_queued_irq() 1340 vgic_cpu_irq_set(vcpu, vlr.irq); process_queued_irq() 1343 vgic_dist_irq_clear_pending(vcpu, vlr.irq); process_queued_irq() 1344 vgic_cpu_irq_clear(vcpu, vlr.irq); process_queued_irq() 1352 vlr.state = 0; process_queued_irq() 1353 vlr.hwirq = 0; process_queued_irq() 1354 vgic_set_lr(vcpu, lr, vlr); process_queued_irq() 1378 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); vgic_process_maintenance() local 1380 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); vgic_process_maintenance() 1381 WARN_ON(vlr.state & LR_STATE_MASK); vgic_process_maintenance() 1390 vlr.irq - VGIC_NR_PRIVATE_IRQS); vgic_process_maintenance() 1393 level_pending |= process_queued_irq(vcpu, lr, vlr); vgic_process_maintenance() 1417 static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) vgic_sync_hwirq() argument 1422 if (!(vlr.state & LR_HW)) vgic_sync_hwirq() 1425 if (vlr.state & LR_STATE_ACTIVE) vgic_sync_hwirq() 1429 level_pending = process_queued_irq(vcpu, lr, vlr); vgic_sync_hwirq() 1447 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); __kvm_vgic_sync_hwstate() local 1449 level_pending |= vgic_sync_hwirq(vcpu, lr, vlr); __kvm_vgic_sync_hwstate() 1450 BUG_ON(vlr.irq >= dist->nr_irqs); __kvm_vgic_sync_hwstate() 1029 vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) vgic_set_lr() argument 1143 vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, int lr_nr, struct vgic_lr vlr) vgic_queue_irq_to_lr() argument 1311 process_queued_irq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) process_queued_irq() argument
|