This source file includes following definitions.
- vgic_get_lpi
- vgic_get_irq
- vgic_irq_release
- __vgic_put_lpi_locked
- vgic_put_irq
- vgic_flush_pending_lpis
- vgic_irq_set_phys_pending
- vgic_get_phys_line_level
- vgic_irq_set_phys_active
- vgic_target_oracle
- vgic_irq_cmp
- vgic_sort_ap_list
- vgic_validate_injection
- vgic_queue_irq_unlock
- kvm_vgic_inject_irq
- kvm_vgic_map_irq
- kvm_vgic_unmap_irq
- kvm_vgic_map_phys_irq
- kvm_vgic_reset_mapped_irq
- kvm_vgic_unmap_phys_irq
- kvm_vgic_set_owner
- vgic_prune_ap_list
- vgic_fold_lr_state
- vgic_populate_lr
- vgic_clear_lr
- vgic_set_underflow
- compute_ap_list_depth
- vgic_flush_lr_state
- can_access_vgic_from_kernel
- vgic_save_state
- kvm_vgic_sync_hwstate
- vgic_restore_state
- kvm_vgic_flush_hwstate
- kvm_vgic_load
- kvm_vgic_put
- kvm_vgic_vmcr_sync
- kvm_vgic_vcpu_pending_irq
- vgic_kick_vcpus
- kvm_vgic_map_is_active
1
2
3
4
5
6 #include <linux/interrupt.h>
7 #include <linux/irq.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list_sort.h>
11 #include <linux/nospec.h>
12
13 #include <asm/kvm_hyp.h>
14
15 #include "vgic.h"
16
17 #define CREATE_TRACE_POINTS
18 #include "trace.h"
19
20 struct vgic_global kvm_vgic_global_state __ro_after_init = {
21 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
22 };
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
59 {
60 struct vgic_dist *dist = &kvm->arch.vgic;
61 struct vgic_irq *irq = NULL;
62 unsigned long flags;
63
64 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
65
66 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
67 if (irq->intid != intid)
68 continue;
69
70
71
72
73
74 vgic_get_irq_kref(irq);
75 goto out_unlock;
76 }
77 irq = NULL;
78
79 out_unlock:
80 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
81
82 return irq;
83 }
84
85
86
87
88
89
90 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
91 u32 intid)
92 {
93
94 if (intid <= VGIC_MAX_PRIVATE) {
95 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
96 return &vcpu->arch.vgic_cpu.private_irqs[intid];
97 }
98
99
100 if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
101 intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
102 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
103 }
104
105
106 if (intid >= VGIC_MIN_LPI)
107 return vgic_get_lpi(kvm, intid);
108
109 WARN(1, "Looking up struct vgic_irq for reserved INTID");
110 return NULL;
111 }
112
113
114
115
116
117
118 static void vgic_irq_release(struct kref *ref)
119 {
120 }
121
122
123
124
125 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
126 {
127 struct vgic_dist *dist = &kvm->arch.vgic;
128
129 if (!kref_put(&irq->refcount, vgic_irq_release))
130 return;
131
132 list_del(&irq->lpi_list);
133 dist->lpi_list_count--;
134
135 kfree(irq);
136 }
137
138 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
139 {
140 struct vgic_dist *dist = &kvm->arch.vgic;
141 unsigned long flags;
142
143 if (irq->intid < VGIC_MIN_LPI)
144 return;
145
146 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
147 __vgic_put_lpi_locked(kvm, irq);
148 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
149 }
150
151 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
152 {
153 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
154 struct vgic_irq *irq, *tmp;
155 unsigned long flags;
156
157 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
158
159 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
160 if (irq->intid >= VGIC_MIN_LPI) {
161 raw_spin_lock(&irq->irq_lock);
162 list_del(&irq->ap_list);
163 irq->vcpu = NULL;
164 raw_spin_unlock(&irq->irq_lock);
165 vgic_put_irq(vcpu->kvm, irq);
166 }
167 }
168
169 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
170 }
171
172 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
173 {
174 WARN_ON(irq_set_irqchip_state(irq->host_irq,
175 IRQCHIP_STATE_PENDING,
176 pending));
177 }
178
179 bool vgic_get_phys_line_level(struct vgic_irq *irq)
180 {
181 bool line_level;
182
183 BUG_ON(!irq->hw);
184
185 if (irq->get_input_level)
186 return irq->get_input_level(irq->intid);
187
188 WARN_ON(irq_get_irqchip_state(irq->host_irq,
189 IRQCHIP_STATE_PENDING,
190 &line_level));
191 return line_level;
192 }
193
194
195 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
196 {
197
198 BUG_ON(!irq->hw);
199 WARN_ON(irq_set_irqchip_state(irq->host_irq,
200 IRQCHIP_STATE_ACTIVE,
201 active));
202 }
203
204
205
206
207
208
209
210
211
212
213
214
215 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
216 {
217 lockdep_assert_held(&irq->irq_lock);
218
219
220 if (irq->active)
221 return irq->vcpu ? : irq->target_vcpu;
222
223
224
225
226
227
228
229 if (irq->enabled && irq_is_pending(irq)) {
230 if (unlikely(irq->target_vcpu &&
231 !irq->target_vcpu->kvm->arch.vgic.enabled))
232 return NULL;
233
234 return irq->target_vcpu;
235 }
236
237
238
239
240 return NULL;
241 }
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258 static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
259 {
260 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
261 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
262 bool penda, pendb;
263 int ret;
264
265
266
267
268
269 if (unlikely(irqa == irqb))
270 return 0;
271
272 raw_spin_lock(&irqa->irq_lock);
273 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
274
275 if (irqa->active || irqb->active) {
276 ret = (int)irqb->active - (int)irqa->active;
277 goto out;
278 }
279
280 penda = irqa->enabled && irq_is_pending(irqa);
281 pendb = irqb->enabled && irq_is_pending(irqb);
282
283 if (!penda || !pendb) {
284 ret = (int)pendb - (int)penda;
285 goto out;
286 }
287
288
289 ret = irqa->priority - irqb->priority;
290 out:
291 raw_spin_unlock(&irqb->irq_lock);
292 raw_spin_unlock(&irqa->irq_lock);
293 return ret;
294 }
295
296
297 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
298 {
299 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
300
301 lockdep_assert_held(&vgic_cpu->ap_list_lock);
302
303 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
304 }
305
306
307
308
309
310
311 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
312 {
313 if (irq->owner != owner)
314 return false;
315
316 switch (irq->config) {
317 case VGIC_CONFIG_LEVEL:
318 return irq->line_level != level;
319 case VGIC_CONFIG_EDGE:
320 return level;
321 }
322
323 return false;
324 }
325
326
327
328
329
330
331
332
333
334 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
335 unsigned long flags)
336 {
337 struct kvm_vcpu *vcpu;
338
339 lockdep_assert_held(&irq->irq_lock);
340
341 retry:
342 vcpu = vgic_target_oracle(irq);
343 if (irq->vcpu || !vcpu) {
344
345
346
347
348
349
350
351
352
353 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
354
355
356
357
358
359
360
361
362
363
364 if (vcpu) {
365 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
366 kvm_vcpu_kick(vcpu);
367 }
368 return false;
369 }
370
371
372
373
374
375 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
376
377
378
379 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
380 raw_spin_lock(&irq->irq_lock);
381
382
383
384
385
386
387
388
389
390
391
392
393
394 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
395 raw_spin_unlock(&irq->irq_lock);
396 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
397 flags);
398
399 raw_spin_lock_irqsave(&irq->irq_lock, flags);
400 goto retry;
401 }
402
403
404
405
406
407 vgic_get_irq_kref(irq);
408 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
409 irq->vcpu = vcpu;
410
411 raw_spin_unlock(&irq->irq_lock);
412 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
413
414 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
415 kvm_vcpu_kick(vcpu);
416
417 return true;
418 }
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
438 bool level, void *owner)
439 {
440 struct kvm_vcpu *vcpu;
441 struct vgic_irq *irq;
442 unsigned long flags;
443 int ret;
444
445 trace_vgic_update_irq_pending(cpuid, intid, level);
446
447 ret = vgic_lazy_init(kvm);
448 if (ret)
449 return ret;
450
451 vcpu = kvm_get_vcpu(kvm, cpuid);
452 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
453 return -EINVAL;
454
455 irq = vgic_get_irq(kvm, vcpu, intid);
456 if (!irq)
457 return -EINVAL;
458
459 raw_spin_lock_irqsave(&irq->irq_lock, flags);
460
461 if (!vgic_validate_injection(irq, level, owner)) {
462
463 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
464 vgic_put_irq(kvm, irq);
465 return 0;
466 }
467
468 if (irq->config == VGIC_CONFIG_LEVEL)
469 irq->line_level = level;
470 else
471 irq->pending_latch = true;
472
473 vgic_queue_irq_unlock(kvm, irq, flags);
474 vgic_put_irq(kvm, irq);
475
476 return 0;
477 }
478
479
480 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
481 unsigned int host_irq,
482 bool (*get_input_level)(int vindid))
483 {
484 struct irq_desc *desc;
485 struct irq_data *data;
486
487
488
489
490 desc = irq_to_desc(host_irq);
491 if (!desc) {
492 kvm_err("%s: no interrupt descriptor\n", __func__);
493 return -EINVAL;
494 }
495 data = irq_desc_get_irq_data(desc);
496 while (data->parent_data)
497 data = data->parent_data;
498
499 irq->hw = true;
500 irq->host_irq = host_irq;
501 irq->hwintid = data->hwirq;
502 irq->get_input_level = get_input_level;
503 return 0;
504 }
505
506
507 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
508 {
509 irq->hw = false;
510 irq->hwintid = 0;
511 irq->get_input_level = NULL;
512 }
513
514 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
515 u32 vintid, bool (*get_input_level)(int vindid))
516 {
517 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
518 unsigned long flags;
519 int ret;
520
521 BUG_ON(!irq);
522
523 raw_spin_lock_irqsave(&irq->irq_lock, flags);
524 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
525 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
526 vgic_put_irq(vcpu->kvm, irq);
527
528 return ret;
529 }
530
531
532
533
534
535
536
537
538
539
540 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
541 {
542 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
543 unsigned long flags;
544
545 if (!irq->hw)
546 goto out;
547
548 raw_spin_lock_irqsave(&irq->irq_lock, flags);
549 irq->active = false;
550 irq->pending_latch = false;
551 irq->line_level = false;
552 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
553 out:
554 vgic_put_irq(vcpu->kvm, irq);
555 }
556
557 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
558 {
559 struct vgic_irq *irq;
560 unsigned long flags;
561
562 if (!vgic_initialized(vcpu->kvm))
563 return -EAGAIN;
564
565 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
566 BUG_ON(!irq);
567
568 raw_spin_lock_irqsave(&irq->irq_lock, flags);
569 kvm_vgic_unmap_irq(irq);
570 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
571 vgic_put_irq(vcpu->kvm, irq);
572
573 return 0;
574 }
575
576
577
578
579
580
581
582
583
584
585
586 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
587 {
588 struct vgic_irq *irq;
589 unsigned long flags;
590 int ret = 0;
591
592 if (!vgic_initialized(vcpu->kvm))
593 return -EAGAIN;
594
595
596 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
597 return -EINVAL;
598
599 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
600 raw_spin_lock_irqsave(&irq->irq_lock, flags);
601 if (irq->owner && irq->owner != owner)
602 ret = -EEXIST;
603 else
604 irq->owner = owner;
605 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
606
607 return ret;
608 }
609
610
611
612
613
614
615
616
617
618 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
619 {
620 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
621 struct vgic_irq *irq, *tmp;
622
623 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
624
625 retry:
626 raw_spin_lock(&vgic_cpu->ap_list_lock);
627
628 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
629 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
630 bool target_vcpu_needs_kick = false;
631
632 raw_spin_lock(&irq->irq_lock);
633
634 BUG_ON(vcpu != irq->vcpu);
635
636 target_vcpu = vgic_target_oracle(irq);
637
638 if (!target_vcpu) {
639
640
641
642
643 list_del(&irq->ap_list);
644 irq->vcpu = NULL;
645 raw_spin_unlock(&irq->irq_lock);
646
647
648
649
650
651
652
653
654 vgic_put_irq(vcpu->kvm, irq);
655 continue;
656 }
657
658 if (target_vcpu == vcpu) {
659
660 raw_spin_unlock(&irq->irq_lock);
661 continue;
662 }
663
664
665
666 raw_spin_unlock(&irq->irq_lock);
667 raw_spin_unlock(&vgic_cpu->ap_list_lock);
668
669
670
671
672
673 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
674 vcpuA = vcpu;
675 vcpuB = target_vcpu;
676 } else {
677 vcpuA = target_vcpu;
678 vcpuB = vcpu;
679 }
680
681 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
682 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
683 SINGLE_DEPTH_NESTING);
684 raw_spin_lock(&irq->irq_lock);
685
686
687
688
689
690
691
692
693
694
695 if (target_vcpu == vgic_target_oracle(irq)) {
696 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
697
698 list_del(&irq->ap_list);
699 irq->vcpu = target_vcpu;
700 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
701 target_vcpu_needs_kick = true;
702 }
703
704 raw_spin_unlock(&irq->irq_lock);
705 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
706 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
707
708 if (target_vcpu_needs_kick) {
709 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
710 kvm_vcpu_kick(target_vcpu);
711 }
712
713 goto retry;
714 }
715
716 raw_spin_unlock(&vgic_cpu->ap_list_lock);
717 }
718
719 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
720 {
721 if (kvm_vgic_global_state.type == VGIC_V2)
722 vgic_v2_fold_lr_state(vcpu);
723 else
724 vgic_v3_fold_lr_state(vcpu);
725 }
726
727
728 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
729 struct vgic_irq *irq, int lr)
730 {
731 lockdep_assert_held(&irq->irq_lock);
732
733 if (kvm_vgic_global_state.type == VGIC_V2)
734 vgic_v2_populate_lr(vcpu, irq, lr);
735 else
736 vgic_v3_populate_lr(vcpu, irq, lr);
737 }
738
739 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
740 {
741 if (kvm_vgic_global_state.type == VGIC_V2)
742 vgic_v2_clear_lr(vcpu, lr);
743 else
744 vgic_v3_clear_lr(vcpu, lr);
745 }
746
747 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
748 {
749 if (kvm_vgic_global_state.type == VGIC_V2)
750 vgic_v2_set_underflow(vcpu);
751 else
752 vgic_v3_set_underflow(vcpu);
753 }
754
755
756 static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
757 bool *multi_sgi)
758 {
759 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
760 struct vgic_irq *irq;
761 int count = 0;
762
763 *multi_sgi = false;
764
765 lockdep_assert_held(&vgic_cpu->ap_list_lock);
766
767 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
768 int w;
769
770 raw_spin_lock(&irq->irq_lock);
771
772 w = vgic_irq_get_lr_count(irq);
773 raw_spin_unlock(&irq->irq_lock);
774
775 count += w;
776 *multi_sgi |= (w > 1);
777 }
778 return count;
779 }
780
781
782 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
783 {
784 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
785 struct vgic_irq *irq;
786 int count;
787 bool multi_sgi;
788 u8 prio = 0xff;
789
790 lockdep_assert_held(&vgic_cpu->ap_list_lock);
791
792 count = compute_ap_list_depth(vcpu, &multi_sgi);
793 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
794 vgic_sort_ap_list(vcpu);
795
796 count = 0;
797
798 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
799 raw_spin_lock(&irq->irq_lock);
800
801
802
803
804
805
806
807
808 if (multi_sgi && irq->priority > prio) {
809 _raw_spin_unlock(&irq->irq_lock);
810 break;
811 }
812
813 if (likely(vgic_target_oracle(irq) == vcpu)) {
814 vgic_populate_lr(vcpu, irq, count++);
815
816 if (irq->source)
817 prio = irq->priority;
818 }
819
820 raw_spin_unlock(&irq->irq_lock);
821
822 if (count == kvm_vgic_global_state.nr_lr) {
823 if (!list_is_last(&irq->ap_list,
824 &vgic_cpu->ap_list_head))
825 vgic_set_underflow(vcpu);
826 break;
827 }
828 }
829
830 vcpu->arch.vgic_cpu.used_lrs = count;
831
832
833 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
834 vgic_clear_lr(vcpu, count);
835 }
836
837 static inline bool can_access_vgic_from_kernel(void)
838 {
839
840
841
842
843
844 return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
845 }
846
847 static inline void vgic_save_state(struct kvm_vcpu *vcpu)
848 {
849 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
850 vgic_v2_save_state(vcpu);
851 else
852 __vgic_v3_save_state(vcpu);
853 }
854
855
856 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
857 {
858 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
859
860 WARN_ON(vgic_v4_sync_hwstate(vcpu));
861
862
863 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
864 return;
865
866 if (can_access_vgic_from_kernel())
867 vgic_save_state(vcpu);
868
869 if (vgic_cpu->used_lrs)
870 vgic_fold_lr_state(vcpu);
871 vgic_prune_ap_list(vcpu);
872 }
873
874 static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
875 {
876 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
877 vgic_v2_restore_state(vcpu);
878 else
879 __vgic_v3_restore_state(vcpu);
880 }
881
882
883 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
884 {
885 WARN_ON(vgic_v4_flush_hwstate(vcpu));
886
887
888
889
890
891
892
893
894
895
896
897
898
899 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
900 !vgic_supports_direct_msis(vcpu->kvm))
901 return;
902
903 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
904
905 if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
906 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
907 vgic_flush_lr_state(vcpu);
908 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
909 }
910
911 if (can_access_vgic_from_kernel())
912 vgic_restore_state(vcpu);
913 }
914
915 void kvm_vgic_load(struct kvm_vcpu *vcpu)
916 {
917 if (unlikely(!vgic_initialized(vcpu->kvm)))
918 return;
919
920 if (kvm_vgic_global_state.type == VGIC_V2)
921 vgic_v2_load(vcpu);
922 else
923 vgic_v3_load(vcpu);
924 }
925
926 void kvm_vgic_put(struct kvm_vcpu *vcpu)
927 {
928 if (unlikely(!vgic_initialized(vcpu->kvm)))
929 return;
930
931 if (kvm_vgic_global_state.type == VGIC_V2)
932 vgic_v2_put(vcpu);
933 else
934 vgic_v3_put(vcpu);
935 }
936
937 void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
938 {
939 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
940 return;
941
942 if (kvm_vgic_global_state.type == VGIC_V2)
943 vgic_v2_vmcr_sync(vcpu);
944 else
945 vgic_v3_vmcr_sync(vcpu);
946 }
947
948 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
949 {
950 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
951 struct vgic_irq *irq;
952 bool pending = false;
953 unsigned long flags;
954 struct vgic_vmcr vmcr;
955
956 if (!vcpu->kvm->arch.vgic.enabled)
957 return false;
958
959 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
960 return true;
961
962 vgic_get_vmcr(vcpu, &vmcr);
963
964 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
965
966 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
967 raw_spin_lock(&irq->irq_lock);
968 pending = irq_is_pending(irq) && irq->enabled &&
969 !irq->active &&
970 irq->priority < vmcr.pmr;
971 raw_spin_unlock(&irq->irq_lock);
972
973 if (pending)
974 break;
975 }
976
977 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
978
979 return pending;
980 }
981
982 void vgic_kick_vcpus(struct kvm *kvm)
983 {
984 struct kvm_vcpu *vcpu;
985 int c;
986
987
988
989
990
991 kvm_for_each_vcpu(c, vcpu, kvm) {
992 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
993 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
994 kvm_vcpu_kick(vcpu);
995 }
996 }
997 }
998
999 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
1000 {
1001 struct vgic_irq *irq;
1002 bool map_is_active;
1003 unsigned long flags;
1004
1005 if (!vgic_initialized(vcpu->kvm))
1006 return false;
1007
1008 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
1009 raw_spin_lock_irqsave(&irq->irq_lock, flags);
1010 map_is_active = irq->hw && irq->active;
1011 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1012 vgic_put_irq(vcpu->kvm, irq);
1013
1014 return map_is_active;
1015 }