This source file includes following definitions.
- ics_deliver_irq
- ics_check_resend
- write_xive
- kvmppc_xics_set_xive
- kvmppc_xics_get_xive
- kvmppc_xics_int_on
- kvmppc_xics_int_off
- icp_try_update
- icp_check_resend
- icp_try_to_deliver
- icp_deliver_irq
- icp_down_cppr
- kvmppc_h_xirr
- kvmppc_h_ipi
- kvmppc_h_ipoll
- kvmppc_h_cppr
- ics_eoi
- kvmppc_h_eoi
- kvmppc_xics_rm_complete
- kvmppc_xics_hcall
- xics_debugfs_irqmap
- xics_debug_show
- xics_debugfs_init
- kvmppc_xics_create_ics
- kvmppc_xics_create_icp
- kvmppc_xics_get_icp
- kvmppc_xics_set_icp
- xics_get_source
- xics_set_source
- kvmppc_xics_set_irq
- xics_set_attr
- xics_get_attr
- xics_has_attr
- kvmppc_xics_free
- kvmppc_xics_create
- kvmppc_xics_init
- kvmppc_xics_connect_vcpu
- kvmppc_xics_free_icp
- kvmppc_xics_set_mapped
- kvmppc_xics_clr_mapped
1
2
3
4
5
6
7 #include <linux/kernel.h>
8 #include <linux/kvm_host.h>
9 #include <linux/err.h>
10 #include <linux/gfp.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/spinlock.h>
13
14 #include <linux/uaccess.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/hvcall.h>
18 #include <asm/xics.h>
19 #include <asm/debugfs.h>
20 #include <asm/time.h>
21
22 #include <linux/seq_file.h>
23
24 #include "book3s_xics.h"
25
26 #if 1
27 #define XICS_DBG(fmt...) do { } while (0)
28 #else
29 #define XICS_DBG(fmt...) trace_printk(fmt)
30 #endif
31
32 #define ENABLE_REALMODE true
33 #define DEBUG_REALMODE false
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
62 u32 new_irq, bool check_resend);
63
64
65
66
67
68
69 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
70 {
71 struct ics_irq_state *state;
72 struct kvmppc_ics *ics;
73 u16 src;
74 u32 pq_old, pq_new;
75
76 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
77
78 ics = kvmppc_xics_find_ics(xics, irq, &src);
79 if (!ics) {
80 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
81 return -EINVAL;
82 }
83 state = &ics->irq_state[src];
84 if (!state->exists)
85 return -EINVAL;
86
87 if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
88 level = 1;
89 else if (level == KVM_INTERRUPT_UNSET)
90 level = 0;
91
92
93
94
95
96 if (!state->lsi && level == 0)
97 return 0;
98
99 do {
100 pq_old = state->pq_state;
101 if (state->lsi) {
102 if (level) {
103 if (pq_old & PQ_PRESENTED)
104
105 return 0;
106
107 pq_new = PQ_PRESENTED;
108 } else
109 pq_new = 0;
110 } else
111 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
112 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
113
114
115 if (pq_new == PQ_PRESENTED)
116 icp_deliver_irq(xics, NULL, irq, false);
117
118
119 if (state->host_irq)
120 state->intr_cpu = raw_smp_processor_id();
121
122 return 0;
123 }
124
125 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
126 struct kvmppc_icp *icp)
127 {
128 int i;
129
130 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
131 struct ics_irq_state *state = &ics->irq_state[i];
132 if (state->resend) {
133 XICS_DBG("resend %#x prio %#x\n", state->number,
134 state->priority);
135 icp_deliver_irq(xics, icp, state->number, true);
136 }
137 }
138 }
139
140 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
141 struct ics_irq_state *state,
142 u32 server, u32 priority, u32 saved_priority)
143 {
144 bool deliver;
145 unsigned long flags;
146
147 local_irq_save(flags);
148 arch_spin_lock(&ics->lock);
149
150 state->server = server;
151 state->priority = priority;
152 state->saved_priority = saved_priority;
153 deliver = false;
154 if ((state->masked_pending || state->resend) && priority != MASKED) {
155 state->masked_pending = 0;
156 state->resend = 0;
157 deliver = true;
158 }
159
160 arch_spin_unlock(&ics->lock);
161 local_irq_restore(flags);
162
163 return deliver;
164 }
165
166 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
167 {
168 struct kvmppc_xics *xics = kvm->arch.xics;
169 struct kvmppc_icp *icp;
170 struct kvmppc_ics *ics;
171 struct ics_irq_state *state;
172 u16 src;
173
174 if (!xics)
175 return -ENODEV;
176
177 ics = kvmppc_xics_find_ics(xics, irq, &src);
178 if (!ics)
179 return -EINVAL;
180 state = &ics->irq_state[src];
181
182 icp = kvmppc_xics_find_server(kvm, server);
183 if (!icp)
184 return -EINVAL;
185
186 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
187 irq, server, priority,
188 state->masked_pending, state->resend);
189
190 if (write_xive(xics, ics, state, server, priority, priority))
191 icp_deliver_irq(xics, icp, irq, false);
192
193 return 0;
194 }
195
196 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
197 {
198 struct kvmppc_xics *xics = kvm->arch.xics;
199 struct kvmppc_ics *ics;
200 struct ics_irq_state *state;
201 u16 src;
202 unsigned long flags;
203
204 if (!xics)
205 return -ENODEV;
206
207 ics = kvmppc_xics_find_ics(xics, irq, &src);
208 if (!ics)
209 return -EINVAL;
210 state = &ics->irq_state[src];
211
212 local_irq_save(flags);
213 arch_spin_lock(&ics->lock);
214 *server = state->server;
215 *priority = state->priority;
216 arch_spin_unlock(&ics->lock);
217 local_irq_restore(flags);
218
219 return 0;
220 }
221
222 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
223 {
224 struct kvmppc_xics *xics = kvm->arch.xics;
225 struct kvmppc_icp *icp;
226 struct kvmppc_ics *ics;
227 struct ics_irq_state *state;
228 u16 src;
229
230 if (!xics)
231 return -ENODEV;
232
233 ics = kvmppc_xics_find_ics(xics, irq, &src);
234 if (!ics)
235 return -EINVAL;
236 state = &ics->irq_state[src];
237
238 icp = kvmppc_xics_find_server(kvm, state->server);
239 if (!icp)
240 return -EINVAL;
241
242 if (write_xive(xics, ics, state, state->server, state->saved_priority,
243 state->saved_priority))
244 icp_deliver_irq(xics, icp, irq, false);
245
246 return 0;
247 }
248
249 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
250 {
251 struct kvmppc_xics *xics = kvm->arch.xics;
252 struct kvmppc_ics *ics;
253 struct ics_irq_state *state;
254 u16 src;
255
256 if (!xics)
257 return -ENODEV;
258
259 ics = kvmppc_xics_find_ics(xics, irq, &src);
260 if (!ics)
261 return -EINVAL;
262 state = &ics->irq_state[src];
263
264 write_xive(xics, ics, state, state->server, MASKED, state->priority);
265
266 return 0;
267 }
268
269
270
271 static inline bool icp_try_update(struct kvmppc_icp *icp,
272 union kvmppc_icp_state old,
273 union kvmppc_icp_state new,
274 bool change_self)
275 {
276 bool success;
277
278
279 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
280
281
282 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
283 if (!success)
284 goto bail;
285
286 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
287 icp->server_num,
288 old.cppr, old.mfrr, old.pending_pri, old.xisr,
289 old.need_resend, old.out_ee);
290 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
291 new.cppr, new.mfrr, new.pending_pri, new.xisr,
292 new.need_resend, new.out_ee);
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308 if (new.out_ee) {
309 kvmppc_book3s_queue_irqprio(icp->vcpu,
310 BOOK3S_INTERRUPT_EXTERNAL);
311 if (!change_self)
312 kvmppc_fast_vcpu_kick(icp->vcpu);
313 }
314 bail:
315 return success;
316 }
317
318 static void icp_check_resend(struct kvmppc_xics *xics,
319 struct kvmppc_icp *icp)
320 {
321 u32 icsid;
322
323
324 smp_rmb();
325 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
326 struct kvmppc_ics *ics = xics->ics[icsid];
327
328 if (!test_and_clear_bit(icsid, icp->resend_map))
329 continue;
330 if (!ics)
331 continue;
332 ics_check_resend(xics, ics, icp);
333 }
334 }
335
336 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
337 u32 *reject)
338 {
339 union kvmppc_icp_state old_state, new_state;
340 bool success;
341
342 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
343 icp->server_num);
344
345 do {
346 old_state = new_state = READ_ONCE(icp->state);
347
348 *reject = 0;
349
350
351 success = new_state.cppr > priority &&
352 new_state.mfrr > priority &&
353 new_state.pending_pri > priority;
354
355
356
357
358
359 if (success) {
360 *reject = new_state.xisr;
361 new_state.xisr = irq;
362 new_state.pending_pri = priority;
363 } else {
364
365
366
367
368
369 new_state.need_resend = true;
370 }
371
372 } while (!icp_try_update(icp, old_state, new_state, false));
373
374 return success;
375 }
376
377 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
378 u32 new_irq, bool check_resend)
379 {
380 struct ics_irq_state *state;
381 struct kvmppc_ics *ics;
382 u32 reject;
383 u16 src;
384 unsigned long flags;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401 again:
402
403 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
404 if (!ics) {
405 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
406 return;
407 }
408 state = &ics->irq_state[src];
409
410
411 local_irq_save(flags);
412 arch_spin_lock(&ics->lock);
413
414
415 if (!icp || state->server != icp->server_num) {
416 icp = kvmppc_xics_find_server(xics->kvm, state->server);
417 if (!icp) {
418 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
419 new_irq, state->server);
420 goto out;
421 }
422 }
423
424 if (check_resend)
425 if (!state->resend)
426 goto out;
427
428
429 state->resend = 0;
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446 if (state->priority == MASKED) {
447 XICS_DBG("irq %#x masked pending\n", new_irq);
448 state->masked_pending = 1;
449 goto out;
450 }
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
469
470
471
472 if (reject && reject != XICS_IPI) {
473 arch_spin_unlock(&ics->lock);
474 local_irq_restore(flags);
475 new_irq = reject;
476 check_resend = 0;
477 goto again;
478 }
479 } else {
480
481
482
483
484 state->resend = 1;
485
486
487
488
489
490 smp_wmb();
491 set_bit(ics->icsid, icp->resend_map);
492
493
494
495
496
497
498
499 smp_mb();
500 if (!icp->state.need_resend) {
501 state->resend = 0;
502 arch_spin_unlock(&ics->lock);
503 local_irq_restore(flags);
504 check_resend = 0;
505 goto again;
506 }
507 }
508 out:
509 arch_spin_unlock(&ics->lock);
510 local_irq_restore(flags);
511 }
512
513 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
514 u8 new_cppr)
515 {
516 union kvmppc_icp_state old_state, new_state;
517 bool resend;
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548 do {
549 old_state = new_state = READ_ONCE(icp->state);
550
551
552 new_state.cppr = new_cppr;
553
554
555
556
557
558
559
560
561
562
563 if (new_state.mfrr < new_cppr &&
564 new_state.mfrr <= new_state.pending_pri) {
565 WARN_ON(new_state.xisr != XICS_IPI &&
566 new_state.xisr != 0);
567 new_state.pending_pri = new_state.mfrr;
568 new_state.xisr = XICS_IPI;
569 }
570
571
572 resend = new_state.need_resend;
573 new_state.need_resend = 0;
574
575 } while (!icp_try_update(icp, old_state, new_state, true));
576
577
578
579
580
581
582 if (resend)
583 icp_check_resend(xics, icp);
584 }
585
586 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
587 {
588 union kvmppc_icp_state old_state, new_state;
589 struct kvmppc_icp *icp = vcpu->arch.icp;
590 u32 xirr;
591
592
593 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
594
595
596
597
598
599
600
601
602 do {
603 old_state = new_state = READ_ONCE(icp->state);
604
605 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
606 if (!old_state.xisr)
607 break;
608 new_state.cppr = new_state.pending_pri;
609 new_state.pending_pri = 0xff;
610 new_state.xisr = 0;
611
612 } while (!icp_try_update(icp, old_state, new_state, true));
613
614 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
615
616 return xirr;
617 }
618
619 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
620 unsigned long mfrr)
621 {
622 union kvmppc_icp_state old_state, new_state;
623 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
624 struct kvmppc_icp *icp;
625 u32 reject;
626 bool resend;
627 bool local;
628
629 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
630 vcpu->vcpu_id, server, mfrr);
631
632 icp = vcpu->arch.icp;
633 local = icp->server_num == server;
634 if (!local) {
635 icp = kvmppc_xics_find_server(vcpu->kvm, server);
636 if (!icp)
637 return H_PARAMETER;
638 }
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669 do {
670 old_state = new_state = READ_ONCE(icp->state);
671
672
673 new_state.mfrr = mfrr;
674
675
676 reject = 0;
677 resend = false;
678 if (mfrr < new_state.cppr) {
679
680 if (mfrr <= new_state.pending_pri) {
681 reject = new_state.xisr;
682 new_state.pending_pri = mfrr;
683 new_state.xisr = XICS_IPI;
684 }
685 }
686
687 if (mfrr > old_state.mfrr) {
688 resend = new_state.need_resend;
689 new_state.need_resend = 0;
690 }
691 } while (!icp_try_update(icp, old_state, new_state, local));
692
693
694 if (reject && reject != XICS_IPI)
695 icp_deliver_irq(xics, icp, reject, false);
696
697
698 if (resend)
699 icp_check_resend(xics, icp);
700
701 return H_SUCCESS;
702 }
703
704 static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
705 {
706 union kvmppc_icp_state state;
707 struct kvmppc_icp *icp;
708
709 icp = vcpu->arch.icp;
710 if (icp->server_num != server) {
711 icp = kvmppc_xics_find_server(vcpu->kvm, server);
712 if (!icp)
713 return H_PARAMETER;
714 }
715 state = READ_ONCE(icp->state);
716 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
717 kvmppc_set_gpr(vcpu, 5, state.mfrr);
718 return H_SUCCESS;
719 }
720
721 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
722 {
723 union kvmppc_icp_state old_state, new_state;
724 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
725 struct kvmppc_icp *icp = vcpu->arch.icp;
726 u32 reject;
727
728 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
729
730
731
732
733
734
735
736
737 if (cppr > icp->state.cppr)
738 icp_down_cppr(xics, icp, cppr);
739 else if (cppr == icp->state.cppr)
740 return;
741
742
743
744
745
746
747
748
749
750
751
752
753 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
754
755 do {
756 old_state = new_state = READ_ONCE(icp->state);
757
758 reject = 0;
759 new_state.cppr = cppr;
760
761 if (cppr <= new_state.pending_pri) {
762 reject = new_state.xisr;
763 new_state.xisr = 0;
764 new_state.pending_pri = 0xff;
765 }
766
767 } while (!icp_try_update(icp, old_state, new_state, true));
768
769
770
771
772
773 if (reject && reject != XICS_IPI)
774 icp_deliver_irq(xics, icp, reject, false);
775 }
776
777 static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
778 {
779 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
780 struct kvmppc_icp *icp = vcpu->arch.icp;
781 struct kvmppc_ics *ics;
782 struct ics_irq_state *state;
783 u16 src;
784 u32 pq_old, pq_new;
785
786
787
788
789
790
791
792
793
794 ics = kvmppc_xics_find_ics(xics, irq, &src);
795 if (!ics) {
796 XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
797 return H_PARAMETER;
798 }
799 state = &ics->irq_state[src];
800
801 if (state->lsi)
802 pq_new = state->pq_state;
803 else
804 do {
805 pq_old = state->pq_state;
806 pq_new = pq_old >> 1;
807 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
808
809 if (pq_new & PQ_PRESENTED)
810 icp_deliver_irq(xics, icp, irq, false);
811
812 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
813
814 return H_SUCCESS;
815 }
816
817 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
818 {
819 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
820 struct kvmppc_icp *icp = vcpu->arch.icp;
821 u32 irq = xirr & 0x00ffffff;
822
823 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839 icp_down_cppr(xics, icp, xirr >> 24);
840
841
842 if (irq == XICS_IPI)
843 return H_SUCCESS;
844
845 return ics_eoi(vcpu, irq);
846 }
847
848 int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
849 {
850 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
851 struct kvmppc_icp *icp = vcpu->arch.icp;
852
853 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
854 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
855
856 if (icp->rm_action & XICS_RM_KICK_VCPU) {
857 icp->n_rm_kick_vcpu++;
858 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
859 }
860 if (icp->rm_action & XICS_RM_CHECK_RESEND) {
861 icp->n_rm_check_resend++;
862 icp_check_resend(xics, icp->rm_resend_icp);
863 }
864 if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
865 icp->n_rm_notify_eoi++;
866 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
867 }
868
869 icp->rm_action = 0;
870
871 return H_SUCCESS;
872 }
873 EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
874
875 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
876 {
877 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
878 unsigned long res;
879 int rc = H_SUCCESS;
880
881
882 if (!xics || !vcpu->arch.icp)
883 return H_HARDWARE;
884
885
886 switch (req) {
887 case H_XIRR_X:
888 res = kvmppc_h_xirr(vcpu);
889 kvmppc_set_gpr(vcpu, 4, res);
890 kvmppc_set_gpr(vcpu, 5, get_tb());
891 return rc;
892 case H_IPOLL:
893 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
894 return rc;
895 }
896
897
898 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
899 return kvmppc_xics_rm_complete(vcpu, req);
900
901 switch (req) {
902 case H_XIRR:
903 res = kvmppc_h_xirr(vcpu);
904 kvmppc_set_gpr(vcpu, 4, res);
905 break;
906 case H_CPPR:
907 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
908 break;
909 case H_EOI:
910 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
911 break;
912 case H_IPI:
913 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
914 kvmppc_get_gpr(vcpu, 5));
915 break;
916 }
917
918 return rc;
919 }
920 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
921
922
923
924
925 static void xics_debugfs_irqmap(struct seq_file *m,
926 struct kvmppc_passthru_irqmap *pimap)
927 {
928 int i;
929
930 if (!pimap)
931 return;
932 seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
933 pimap->n_mapped);
934 for (i = 0; i < pimap->n_mapped; i++) {
935 seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
936 pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
937 }
938 }
939
940 static int xics_debug_show(struct seq_file *m, void *private)
941 {
942 struct kvmppc_xics *xics = m->private;
943 struct kvm *kvm = xics->kvm;
944 struct kvm_vcpu *vcpu;
945 int icsid, i;
946 unsigned long flags;
947 unsigned long t_rm_kick_vcpu, t_rm_check_resend;
948 unsigned long t_rm_notify_eoi;
949 unsigned long t_reject, t_check_resend;
950
951 if (!kvm)
952 return 0;
953
954 t_rm_kick_vcpu = 0;
955 t_rm_notify_eoi = 0;
956 t_rm_check_resend = 0;
957 t_check_resend = 0;
958 t_reject = 0;
959
960 xics_debugfs_irqmap(m, kvm->arch.pimap);
961
962 seq_printf(m, "=========\nICP state\n=========\n");
963
964 kvm_for_each_vcpu(i, vcpu, kvm) {
965 struct kvmppc_icp *icp = vcpu->arch.icp;
966 union kvmppc_icp_state state;
967
968 if (!icp)
969 continue;
970
971 state.raw = READ_ONCE(icp->state.raw);
972 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
973 icp->server_num, state.xisr,
974 state.pending_pri, state.cppr, state.mfrr,
975 state.out_ee, state.need_resend);
976 t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
977 t_rm_notify_eoi += icp->n_rm_notify_eoi;
978 t_rm_check_resend += icp->n_rm_check_resend;
979 t_check_resend += icp->n_check_resend;
980 t_reject += icp->n_reject;
981 }
982
983 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
984 t_rm_kick_vcpu, t_rm_check_resend,
985 t_rm_notify_eoi);
986 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
987 t_check_resend, t_reject);
988 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
989 struct kvmppc_ics *ics = xics->ics[icsid];
990
991 if (!ics)
992 continue;
993
994 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
995 icsid);
996
997 local_irq_save(flags);
998 arch_spin_lock(&ics->lock);
999
1000 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1001 struct ics_irq_state *irq = &ics->irq_state[i];
1002
1003 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1004 irq->number, irq->server, irq->priority,
1005 irq->saved_priority, irq->pq_state,
1006 irq->resend, irq->masked_pending);
1007
1008 }
1009 arch_spin_unlock(&ics->lock);
1010 local_irq_restore(flags);
1011 }
1012 return 0;
1013 }
1014
1015 DEFINE_SHOW_ATTRIBUTE(xics_debug);
1016
1017 static void xics_debugfs_init(struct kvmppc_xics *xics)
1018 {
1019 char *name;
1020
1021 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
1022 if (!name) {
1023 pr_err("%s: no memory for name\n", __func__);
1024 return;
1025 }
1026
1027 xics->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
1028 xics, &xics_debug_fops);
1029
1030 pr_debug("%s: created %s\n", __func__, name);
1031 kfree(name);
1032 }
1033
1034 static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1035 struct kvmppc_xics *xics, int irq)
1036 {
1037 struct kvmppc_ics *ics;
1038 int i, icsid;
1039
1040 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1041
1042 mutex_lock(&kvm->lock);
1043
1044
1045 if (xics->ics[icsid])
1046 goto out;
1047
1048
1049 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1050 if (!ics)
1051 goto out;
1052
1053 ics->icsid = icsid;
1054
1055 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1056 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1057 ics->irq_state[i].priority = MASKED;
1058 ics->irq_state[i].saved_priority = MASKED;
1059 }
1060 smp_wmb();
1061 xics->ics[icsid] = ics;
1062
1063 if (icsid > xics->max_icsid)
1064 xics->max_icsid = icsid;
1065
1066 out:
1067 mutex_unlock(&kvm->lock);
1068 return xics->ics[icsid];
1069 }
1070
1071 static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1072 {
1073 struct kvmppc_icp *icp;
1074
1075 if (!vcpu->kvm->arch.xics)
1076 return -ENODEV;
1077
1078 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1079 return -EEXIST;
1080
1081 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1082 if (!icp)
1083 return -ENOMEM;
1084
1085 icp->vcpu = vcpu;
1086 icp->server_num = server_num;
1087 icp->state.mfrr = MASKED;
1088 icp->state.pending_pri = MASKED;
1089 vcpu->arch.icp = icp;
1090
1091 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1092
1093 return 0;
1094 }
1095
1096 u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1097 {
1098 struct kvmppc_icp *icp = vcpu->arch.icp;
1099 union kvmppc_icp_state state;
1100
1101 if (!icp)
1102 return 0;
1103 state = icp->state;
1104 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1105 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1106 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1107 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1108 }
1109
1110 int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1111 {
1112 struct kvmppc_icp *icp = vcpu->arch.icp;
1113 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1114 union kvmppc_icp_state old_state, new_state;
1115 struct kvmppc_ics *ics;
1116 u8 cppr, mfrr, pending_pri;
1117 u32 xisr;
1118 u16 src;
1119 bool resend;
1120
1121 if (!icp || !xics)
1122 return -ENOENT;
1123
1124 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1125 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1126 KVM_REG_PPC_ICP_XISR_MASK;
1127 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1128 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1129
1130
1131 if (xisr == 0) {
1132 if (pending_pri != 0xff)
1133 return -EINVAL;
1134 } else if (xisr == XICS_IPI) {
1135 if (pending_pri != mfrr || pending_pri >= cppr)
1136 return -EINVAL;
1137 } else {
1138 if (pending_pri >= mfrr || pending_pri >= cppr)
1139 return -EINVAL;
1140 ics = kvmppc_xics_find_ics(xics, xisr, &src);
1141 if (!ics)
1142 return -EINVAL;
1143 }
1144
1145 new_state.raw = 0;
1146 new_state.cppr = cppr;
1147 new_state.xisr = xisr;
1148 new_state.mfrr = mfrr;
1149 new_state.pending_pri = pending_pri;
1150
1151
1152
1153
1154
1155 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 do {
1168 old_state = READ_ONCE(icp->state);
1169
1170 if (new_state.mfrr <= old_state.mfrr) {
1171 resend = false;
1172 new_state.need_resend = old_state.need_resend;
1173 } else {
1174 resend = old_state.need_resend;
1175 new_state.need_resend = 0;
1176 }
1177 } while (!icp_try_update(icp, old_state, new_state, false));
1178
1179 if (resend)
1180 icp_check_resend(xics, icp);
1181
1182 return 0;
1183 }
1184
1185 static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1186 {
1187 int ret;
1188 struct kvmppc_ics *ics;
1189 struct ics_irq_state *irqp;
1190 u64 __user *ubufp = (u64 __user *) addr;
1191 u16 idx;
1192 u64 val, prio;
1193 unsigned long flags;
1194
1195 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1196 if (!ics)
1197 return -ENOENT;
1198
1199 irqp = &ics->irq_state[idx];
1200 local_irq_save(flags);
1201 arch_spin_lock(&ics->lock);
1202 ret = -ENOENT;
1203 if (irqp->exists) {
1204 val = irqp->server;
1205 prio = irqp->priority;
1206 if (prio == MASKED) {
1207 val |= KVM_XICS_MASKED;
1208 prio = irqp->saved_priority;
1209 }
1210 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1211 if (irqp->lsi) {
1212 val |= KVM_XICS_LEVEL_SENSITIVE;
1213 if (irqp->pq_state & PQ_PRESENTED)
1214 val |= KVM_XICS_PENDING;
1215 } else if (irqp->masked_pending || irqp->resend)
1216 val |= KVM_XICS_PENDING;
1217
1218 if (irqp->pq_state & PQ_PRESENTED)
1219 val |= KVM_XICS_PRESENTED;
1220
1221 if (irqp->pq_state & PQ_QUEUED)
1222 val |= KVM_XICS_QUEUED;
1223
1224 ret = 0;
1225 }
1226 arch_spin_unlock(&ics->lock);
1227 local_irq_restore(flags);
1228
1229 if (!ret && put_user(val, ubufp))
1230 ret = -EFAULT;
1231
1232 return ret;
1233 }
1234
1235 static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1236 {
1237 struct kvmppc_ics *ics;
1238 struct ics_irq_state *irqp;
1239 u64 __user *ubufp = (u64 __user *) addr;
1240 u16 idx;
1241 u64 val;
1242 u8 prio;
1243 u32 server;
1244 unsigned long flags;
1245
1246 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1247 return -ENOENT;
1248
1249 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1250 if (!ics) {
1251 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1252 if (!ics)
1253 return -ENOMEM;
1254 }
1255 irqp = &ics->irq_state[idx];
1256 if (get_user(val, ubufp))
1257 return -EFAULT;
1258
1259 server = val & KVM_XICS_DESTINATION_MASK;
1260 prio = val >> KVM_XICS_PRIORITY_SHIFT;
1261 if (prio != MASKED &&
1262 kvmppc_xics_find_server(xics->kvm, server) == NULL)
1263 return -EINVAL;
1264
1265 local_irq_save(flags);
1266 arch_spin_lock(&ics->lock);
1267 irqp->server = server;
1268 irqp->saved_priority = prio;
1269 if (val & KVM_XICS_MASKED)
1270 prio = MASKED;
1271 irqp->priority = prio;
1272 irqp->resend = 0;
1273 irqp->masked_pending = 0;
1274 irqp->lsi = 0;
1275 irqp->pq_state = 0;
1276 if (val & KVM_XICS_LEVEL_SENSITIVE)
1277 irqp->lsi = 1;
1278
1279 if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
1280 irqp->pq_state |= PQ_PRESENTED;
1281 if (val & KVM_XICS_QUEUED)
1282 irqp->pq_state |= PQ_QUEUED;
1283 irqp->exists = 1;
1284 arch_spin_unlock(&ics->lock);
1285 local_irq_restore(flags);
1286
1287 if (val & KVM_XICS_PENDING)
1288 icp_deliver_irq(xics, NULL, irqp->number, false);
1289
1290 return 0;
1291 }
1292
1293 int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1294 bool line_status)
1295 {
1296 struct kvmppc_xics *xics = kvm->arch.xics;
1297
1298 if (!xics)
1299 return -ENODEV;
1300 return ics_deliver_irq(xics, irq, level);
1301 }
1302
1303 static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1304 {
1305 struct kvmppc_xics *xics = dev->private;
1306
1307 switch (attr->group) {
1308 case KVM_DEV_XICS_GRP_SOURCES:
1309 return xics_set_source(xics, attr->attr, attr->addr);
1310 }
1311 return -ENXIO;
1312 }
1313
1314 static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1315 {
1316 struct kvmppc_xics *xics = dev->private;
1317
1318 switch (attr->group) {
1319 case KVM_DEV_XICS_GRP_SOURCES:
1320 return xics_get_source(xics, attr->attr, attr->addr);
1321 }
1322 return -ENXIO;
1323 }
1324
1325 static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1326 {
1327 switch (attr->group) {
1328 case KVM_DEV_XICS_GRP_SOURCES:
1329 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1330 attr->attr < KVMPPC_XICS_NR_IRQS)
1331 return 0;
1332 break;
1333 }
1334 return -ENXIO;
1335 }
1336
1337 static void kvmppc_xics_free(struct kvm_device *dev)
1338 {
1339 struct kvmppc_xics *xics = dev->private;
1340 int i;
1341 struct kvm *kvm = xics->kvm;
1342
1343 debugfs_remove(xics->dentry);
1344
1345 if (kvm)
1346 kvm->arch.xics = NULL;
1347
1348 for (i = 0; i <= xics->max_icsid; i++)
1349 kfree(xics->ics[i]);
1350 kfree(xics);
1351 kfree(dev);
1352 }
1353
1354 static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1355 {
1356 struct kvmppc_xics *xics;
1357 struct kvm *kvm = dev->kvm;
1358 int ret = 0;
1359
1360 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1361 if (!xics)
1362 return -ENOMEM;
1363
1364 dev->private = xics;
1365 xics->dev = dev;
1366 xics->kvm = kvm;
1367
1368
1369 if (kvm->arch.xics)
1370 ret = -EEXIST;
1371 else
1372 kvm->arch.xics = xics;
1373
1374 if (ret) {
1375 kfree(xics);
1376 return ret;
1377 }
1378
1379 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1380 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
1381 cpu_has_feature(CPU_FTR_HVMODE)) {
1382
1383 xics->real_mode = ENABLE_REALMODE;
1384 xics->real_mode_dbg = DEBUG_REALMODE;
1385 }
1386 #endif
1387
1388 return 0;
1389 }
1390
1391 static void kvmppc_xics_init(struct kvm_device *dev)
1392 {
1393 struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1394
1395 xics_debugfs_init(xics);
1396 }
1397
1398 struct kvm_device_ops kvm_xics_ops = {
1399 .name = "kvm-xics",
1400 .create = kvmppc_xics_create,
1401 .init = kvmppc_xics_init,
1402 .destroy = kvmppc_xics_free,
1403 .set_attr = xics_set_attr,
1404 .get_attr = xics_get_attr,
1405 .has_attr = xics_has_attr,
1406 };
1407
1408 int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1409 u32 xcpu)
1410 {
1411 struct kvmppc_xics *xics = dev->private;
1412 int r = -EBUSY;
1413
1414 if (dev->ops != &kvm_xics_ops)
1415 return -EPERM;
1416 if (xics->kvm != vcpu->kvm)
1417 return -EPERM;
1418 if (vcpu->arch.irq_type)
1419 return -EBUSY;
1420
1421 r = kvmppc_xics_create_icp(vcpu, xcpu);
1422 if (!r)
1423 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1424
1425 return r;
1426 }
1427
1428 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1429 {
1430 if (!vcpu->arch.icp)
1431 return;
1432 kfree(vcpu->arch.icp);
1433 vcpu->arch.icp = NULL;
1434 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1435 }
1436
1437 void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
1438 unsigned long host_irq)
1439 {
1440 struct kvmppc_xics *xics = kvm->arch.xics;
1441 struct kvmppc_ics *ics;
1442 u16 idx;
1443
1444 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1445 if (!ics)
1446 return;
1447
1448 ics->irq_state[idx].host_irq = host_irq;
1449 ics->irq_state[idx].intr_cpu = -1;
1450 }
1451 EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
1452
1453 void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
1454 unsigned long host_irq)
1455 {
1456 struct kvmppc_xics *xics = kvm->arch.xics;
1457 struct kvmppc_ics *ics;
1458 u16 idx;
1459
1460 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1461 if (!ics)
1462 return;
1463
1464 ics->irq_state[idx].host_irq = 0;
1465 }
1466 EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);