Lines Matching refs:vcpu
37 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
43 static bool handle_mmio_misc(struct kvm_vcpu *vcpu, in handle_mmio_misc() argument
51 reg = vcpu->kvm->arch.vgic.enabled; in handle_mmio_misc()
55 vcpu->kvm->arch.vgic.enabled = reg & 1; in handle_mmio_misc()
56 vgic_update_state(vcpu->kvm); in handle_mmio_misc()
62 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; in handle_mmio_misc()
63 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1; in handle_mmio_misc()
78 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, in handle_mmio_set_enable_reg() argument
82 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, in handle_mmio_set_enable_reg()
83 vcpu->vcpu_id, ACCESS_WRITE_SETBIT); in handle_mmio_set_enable_reg()
86 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, in handle_mmio_clear_enable_reg() argument
90 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, in handle_mmio_clear_enable_reg()
91 vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); in handle_mmio_clear_enable_reg()
94 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, in handle_mmio_set_pending_reg() argument
98 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset, in handle_mmio_set_pending_reg()
99 vcpu->vcpu_id); in handle_mmio_set_pending_reg()
102 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, in handle_mmio_clear_pending_reg() argument
106 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset, in handle_mmio_clear_pending_reg()
107 vcpu->vcpu_id); in handle_mmio_clear_pending_reg()
110 static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu, in handle_mmio_set_active_reg() argument
114 return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset, in handle_mmio_set_active_reg()
115 vcpu->vcpu_id); in handle_mmio_set_active_reg()
118 static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu, in handle_mmio_clear_active_reg() argument
122 return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset, in handle_mmio_clear_active_reg()
123 vcpu->vcpu_id); in handle_mmio_clear_active_reg()
126 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, in handle_mmio_priority_reg() argument
130 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, in handle_mmio_priority_reg()
131 vcpu->vcpu_id, offset); in handle_mmio_priority_reg()
157 struct kvm_vcpu *vcpu; in vgic_set_target_reg() local
175 kvm_for_each_vcpu(c, vcpu, kvm) { in vgic_set_target_reg()
185 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu, in handle_mmio_target_reg() argument
195 roreg = 1 << vcpu->vcpu_id; in handle_mmio_target_reg()
204 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U); in handle_mmio_target_reg()
208 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U); in handle_mmio_target_reg()
209 vgic_update_state(vcpu->kvm); in handle_mmio_target_reg()
216 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, in handle_mmio_cfg_reg() argument
221 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, in handle_mmio_cfg_reg()
222 vcpu->vcpu_id, offset >> 1); in handle_mmio_cfg_reg()
227 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, in handle_mmio_sgi_reg() argument
235 vgic_dispatch_sgi(vcpu, reg); in handle_mmio_sgi_reg()
236 vgic_update_state(vcpu->kvm); in handle_mmio_sgi_reg()
244 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, in read_set_clear_sgi_pend_reg() argument
248 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in read_set_clear_sgi_pend_reg()
252 int vcpu_id = vcpu->vcpu_id; in read_set_clear_sgi_pend_reg()
266 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, in write_set_clear_sgi_pend_reg() argument
270 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in write_set_clear_sgi_pend_reg()
274 int vcpu_id = vcpu->vcpu_id; in write_set_clear_sgi_pend_reg()
297 vgic_update_state(vcpu->kvm); in write_set_clear_sgi_pend_reg()
302 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu, in handle_mmio_sgi_set() argument
307 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); in handle_mmio_sgi_set()
309 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true); in handle_mmio_sgi_set()
312 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, in handle_mmio_sgi_clear() argument
317 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); in handle_mmio_sgi_clear()
319 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); in handle_mmio_sgi_clear()
407 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) in vgic_dispatch_sgi() argument
409 struct kvm *kvm = vcpu->kvm; in vgic_dispatch_sgi()
415 vcpu_id = vcpu->vcpu_id; in vgic_dispatch_sgi()
436 kvm_for_each_vcpu(c, vcpu, kvm) { in vgic_dispatch_sgi()
439 vgic_dist_irq_set_pending(vcpu, sgi); in vgic_dispatch_sgi()
449 static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq) in vgic_v2_queue_sgi() argument
451 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_v2_queue_sgi()
453 int vcpu_id = vcpu->vcpu_id; in vgic_v2_queue_sgi()
459 if (vgic_queue_irq(vcpu, c, irq)) in vgic_v2_queue_sgi()
472 vgic_dist_irq_clear_pending(vcpu, irq); in vgic_v2_queue_sgi()
473 vgic_cpu_irq_clear(vcpu, irq); in vgic_v2_queue_sgi()
544 static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) in vgic_v2_add_sgi_source() argument
546 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_v2_add_sgi_source()
548 *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source; in vgic_v2_add_sgi_source()
573 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, in handle_cpu_mmio_misc() argument
581 vgic_get_vmcr(vcpu, &vmcr); in handle_cpu_mmio_misc()
607 vgic_set_vmcr(vcpu, &vmcr); in handle_cpu_mmio_misc()
614 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu, in handle_mmio_abpr() argument
617 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT); in handle_mmio_abpr()
620 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, in handle_cpu_mmio_ident() argument
671 struct kvm_vcpu *vcpu, *tmp_vcpu; in vgic_attr_regs_access() local
691 vcpu = kvm_get_vcpu(dev->kvm, cpuid); in vgic_attr_regs_access()
744 r->handle_mmio(vcpu, &mmio, offset); in vgic_attr_regs_access()