This source file includes following definitions.
- kvmppc_xive_push_vcpu
- xive_irq_trigger
- xive_esc_irq
- kvmppc_xive_attach_escalation
- xive_provision_queue
- xive_check_provisioning
- xive_inc_q_pending
- xive_try_pick_queue
- kvmppc_xive_select_target
- xive_lock_and_mask
- xive_lock_for_unmask
- xive_finish_unmask
- xive_target_interrupt
- kvmppc_xive_set_xive
- kvmppc_xive_get_xive
- kvmppc_xive_int_on
- kvmppc_xive_int_off
- xive_restore_pending_irq
- kvmppc_xive_get_icp
- kvmppc_xive_set_icp
- kvmppc_xive_set_mapped
- kvmppc_xive_clr_mapped
- kvmppc_xive_disable_vcpu_interrupts
- xive_cleanup_single_escalation
- kvmppc_xive_cleanup_vcpu
- kvmppc_xive_connect_vcpu
- xive_pre_save_set_queued
- xive_pre_save_mask_irq
- xive_pre_save_unmask_irq
- xive_pre_save_queue
- xive_pre_save_scan
- xive_post_save_scan
- xive_get_source
- kvmppc_xive_create_src_block
- xive_check_delayed_irq
- xive_set_source
- kvmppc_xive_set_irq
- xive_set_attr
- xive_get_attr
- xive_has_attr
- kvmppc_xive_cleanup_irq
- kvmppc_xive_free_sources
- kvmppc_xive_release
- kvmppc_xive_get_device
- kvmppc_xive_create
- kvmppc_xive_debug_show_queues
- xive_debug_show
- xive_debugfs_init
- kvmppc_xive_init
- kvmppc_xive_init_module
- kvmppc_xive_exit_module
1
2
3
4
5
6 #define pr_fmt(fmt) "xive-kvm: " fmt
7
8 #include <linux/kernel.h>
9 #include <linux/kvm_host.h>
10 #include <linux/err.h>
11 #include <linux/gfp.h>
12 #include <linux/spinlock.h>
13 #include <linux/delay.h>
14 #include <linux/percpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/uaccess.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/kvm_ppc.h>
19 #include <asm/hvcall.h>
20 #include <asm/xics.h>
21 #include <asm/xive.h>
22 #include <asm/xive-regs.h>
23 #include <asm/debug.h>
24 #include <asm/debugfs.h>
25 #include <asm/time.h>
26 #include <asm/opal.h>
27
28 #include <linux/debugfs.h>
29 #include <linux/seq_file.h>
30
31 #include "book3s_xive.h"
32
33
34
35
36
37
38
39
40
41 #define XIVE_RUNTIME_CHECKS
42 #define X_PFX xive_vm_
43 #define X_STATIC static
44 #define X_STAT_PFX stat_vm_
45 #define __x_tima xive_tima
46 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
47 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
48 #define __x_writeb __raw_writeb
49 #define __x_readw __raw_readw
50 #define __x_readq __raw_readq
51 #define __x_writeq __raw_writeq
52
53 #include "book3s_xive_template.c"
54
55
56
57
58
59 #define XIVE_Q_GAP 2
60
61
62
63
64
65 void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
66 {
67 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
68 u64 pq;
69
70
71
72
73
74
75 if (!tima || !vcpu->arch.xive_cam_word)
76 return;
77
78 eieio();
79 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
80 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
81 vcpu->arch.xive_pushed = 1;
82 eieio();
83
84
85
86
87
88
89
90
91 vcpu->arch.irq_pending = 0;
92
93
94
95
96
97 if (vcpu->arch.xive_esc_on) {
98 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
99 XIVE_ESB_SET_PQ_01));
100 mb();
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 if (!(pq & XIVE_ESB_VAL_P))
124
125 vcpu->arch.xive_esc_on = 0;
126 }
127 }
128 EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
129
130
131
132
133
134 static bool xive_irq_trigger(struct xive_irq_data *xd)
135 {
136
137 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
138 return false;
139
140
141 if (WARN_ON(!xd->trig_mmio))
142 return false;
143
144 out_be64(xd->trig_mmio, 0);
145
146 return true;
147 }
148
149 static irqreturn_t xive_esc_irq(int irq, void *data)
150 {
151 struct kvm_vcpu *vcpu = data;
152
153 vcpu->arch.irq_pending = 1;
154 smp_mb();
155 if (vcpu->arch.ceded)
156 kvmppc_fast_vcpu_kick(vcpu);
157
158
159
160
161
162
163
164
165
166
167 vcpu->arch.xive_esc_on = false;
168
169
170 smp_wmb();
171
172 return IRQ_HANDLED;
173 }
174
175 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
176 bool single_escalation)
177 {
178 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
179 struct xive_q *q = &xc->queues[prio];
180 char *name = NULL;
181 int rc;
182
183
184 if (xc->esc_virq[prio])
185 return 0;
186
187
188 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
189 if (!xc->esc_virq[prio]) {
190 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
191 prio, xc->server_num);
192 return -EIO;
193 }
194
195 if (single_escalation)
196 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
197 vcpu->kvm->arch.lpid, xc->server_num);
198 else
199 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
200 vcpu->kvm->arch.lpid, xc->server_num, prio);
201 if (!name) {
202 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
203 prio, xc->server_num);
204 rc = -ENOMEM;
205 goto error;
206 }
207
208 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
209
210 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
211 IRQF_NO_THREAD, name, vcpu);
212 if (rc) {
213 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
214 prio, xc->server_num);
215 goto error;
216 }
217 xc->esc_virq_names[prio] = name;
218
219
220
221
222
223
224
225
226
227 if (single_escalation) {
228 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
229 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
230
231 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
232 vcpu->arch.xive_esc_raddr = xd->eoi_page;
233 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
234 xd->flags |= XIVE_IRQ_NO_EOI;
235 }
236
237 return 0;
238 error:
239 irq_dispose_mapping(xc->esc_virq[prio]);
240 xc->esc_virq[prio] = 0;
241 kfree(name);
242 return rc;
243 }
244
245 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
246 {
247 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
248 struct kvmppc_xive *xive = xc->xive;
249 struct xive_q *q = &xc->queues[prio];
250 void *qpage;
251 int rc;
252
253 if (WARN_ON(q->qpage))
254 return 0;
255
256
257 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
258 if (!qpage) {
259 pr_err("Failed to allocate queue %d for VCPU %d\n",
260 prio, xc->server_num);
261 return -ENOMEM;
262 }
263 memset(qpage, 0, 1 << xive->q_order);
264
265
266
267
268
269
270
271
272 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
273 xive->q_order, true);
274 if (rc)
275 pr_err("Failed to configure queue %d for VCPU %d\n",
276 prio, xc->server_num);
277 return rc;
278 }
279
280
281 static int xive_check_provisioning(struct kvm *kvm, u8 prio)
282 {
283 struct kvmppc_xive *xive = kvm->arch.xive;
284 struct kvm_vcpu *vcpu;
285 int i, rc;
286
287 lockdep_assert_held(&xive->lock);
288
289
290 if (xive->qmap & (1 << prio))
291 return 0;
292
293 pr_devel("Provisioning prio... %d\n", prio);
294
295
296 kvm_for_each_vcpu(i, vcpu, kvm) {
297 if (!vcpu->arch.xive_vcpu)
298 continue;
299 rc = xive_provision_queue(vcpu, prio);
300 if (rc == 0 && !xive->single_escalation)
301 kvmppc_xive_attach_escalation(vcpu, prio,
302 xive->single_escalation);
303 if (rc)
304 return rc;
305 }
306
307
308 mb();
309 xive->qmap |= (1 << prio);
310 return 0;
311 }
312
313 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
314 {
315 struct kvm_vcpu *vcpu;
316 struct kvmppc_xive_vcpu *xc;
317 struct xive_q *q;
318
319
320 vcpu = kvmppc_xive_find_server(kvm, server);
321 if (!vcpu) {
322 pr_warn("%s: Can't find server %d\n", __func__, server);
323 return;
324 }
325 xc = vcpu->arch.xive_vcpu;
326 if (WARN_ON(!xc))
327 return;
328
329 q = &xc->queues[prio];
330 atomic_inc(&q->pending_count);
331 }
332
333 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
334 {
335 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
336 struct xive_q *q;
337 u32 max;
338
339 if (WARN_ON(!xc))
340 return -ENXIO;
341 if (!xc->valid)
342 return -ENXIO;
343
344 q = &xc->queues[prio];
345 if (WARN_ON(!q->qpage))
346 return -ENXIO;
347
348
349 max = (q->msk + 1) - XIVE_Q_GAP;
350 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
351 }
352
353 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
354 {
355 struct kvm_vcpu *vcpu;
356 int i, rc;
357
358
359 vcpu = kvmppc_xive_find_server(kvm, *server);
360 if (!vcpu) {
361 pr_devel("Can't find server %d\n", *server);
362 return -EINVAL;
363 }
364
365 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
366
367
368 rc = xive_try_pick_queue(vcpu, prio);
369 if (rc == 0)
370 return rc;
371
372 pr_devel(" .. failed, looking up candidate...\n");
373
374
375 kvm_for_each_vcpu(i, vcpu, kvm) {
376 if (!vcpu->arch.xive_vcpu)
377 continue;
378 rc = xive_try_pick_queue(vcpu, prio);
379 if (rc == 0) {
380 *server = vcpu->arch.xive_vcpu->server_num;
381 pr_devel(" found on 0x%x/%d\n", *server, prio);
382 return rc;
383 }
384 }
385 pr_devel(" no available target !\n");
386
387
388 return -EBUSY;
389 }
390
391 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
392 struct kvmppc_xive_src_block *sb,
393 struct kvmppc_xive_irq_state *state)
394 {
395 struct xive_irq_data *xd;
396 u32 hw_num;
397 u8 old_prio;
398 u64 val;
399
400
401
402
403
404 for (;;) {
405 arch_spin_lock(&sb->lock);
406 old_prio = state->guest_priority;
407 state->guest_priority = MASKED;
408 mb();
409 if (!state->in_eoi)
410 break;
411 state->guest_priority = old_prio;
412 arch_spin_unlock(&sb->lock);
413 }
414
415
416 if (old_prio == MASKED)
417 return old_prio;
418
419
420 kvmppc_xive_select_irq(state, &hw_num, &xd);
421
422
423
424
425
426
427
428
429
430
431
432
433
434 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
435 xive_native_configure_irq(hw_num,
436 kvmppc_xive_vp(xive, state->act_server),
437 MASKED, state->number);
438
439 state->old_p = true;
440 state->old_q = false;
441 } else {
442
443 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
444 state->old_p = !!(val & 2);
445 state->old_q = !!(val & 1);
446
447
448
449
450
451 xive_native_sync_source(hw_num);
452 }
453
454 return old_prio;
455 }
456
457 static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
458 struct kvmppc_xive_irq_state *state)
459 {
460
461
462
463 for (;;) {
464 arch_spin_lock(&sb->lock);
465 if (!state->in_eoi)
466 break;
467 arch_spin_unlock(&sb->lock);
468 }
469 }
470
471 static void xive_finish_unmask(struct kvmppc_xive *xive,
472 struct kvmppc_xive_src_block *sb,
473 struct kvmppc_xive_irq_state *state,
474 u8 prio)
475 {
476 struct xive_irq_data *xd;
477 u32 hw_num;
478
479
480 if (state->guest_priority != MASKED)
481 goto bail;
482
483
484 kvmppc_xive_select_irq(state, &hw_num, &xd);
485
486
487
488
489
490 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
491 xive_native_configure_irq(hw_num,
492 kvmppc_xive_vp(xive, state->act_server),
493 state->act_priority, state->number);
494
495 if (!state->old_p)
496 xive_vm_source_eoi(hw_num, xd);
497
498 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
499 xive_irq_trigger(xd);
500 goto bail;
501 }
502
503
504 if (state->old_q)
505 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
506
507
508
509
510
511
512 if (!state->old_p)
513 xive_vm_source_eoi(hw_num, xd);
514
515
516 mb();
517 bail:
518 state->guest_priority = prio;
519 }
520
521
522
523
524
525
526
527
528 static int xive_target_interrupt(struct kvm *kvm,
529 struct kvmppc_xive_irq_state *state,
530 u32 server, u8 prio)
531 {
532 struct kvmppc_xive *xive = kvm->arch.xive;
533 u32 hw_num;
534 int rc;
535
536
537
538
539
540
541 rc = kvmppc_xive_select_target(kvm, &server, prio);
542
543
544
545
546
547 if (rc)
548 return rc;
549
550
551
552
553
554
555 if (state->act_priority != MASKED)
556 xive_inc_q_pending(kvm,
557 state->act_server,
558 state->act_priority);
559
560
561
562 state->act_priority = prio;
563 state->act_server = server;
564
565
566 kvmppc_xive_select_irq(state, &hw_num, NULL);
567
568 return xive_native_configure_irq(hw_num,
569 kvmppc_xive_vp(xive, server),
570 prio, state->number);
571 }
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613 int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
614 u32 priority)
615 {
616 struct kvmppc_xive *xive = kvm->arch.xive;
617 struct kvmppc_xive_src_block *sb;
618 struct kvmppc_xive_irq_state *state;
619 u8 new_act_prio;
620 int rc = 0;
621 u16 idx;
622
623 if (!xive)
624 return -ENODEV;
625
626 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
627 irq, server, priority);
628
629
630 if (priority != MASKED) {
631 mutex_lock(&xive->lock);
632 rc = xive_check_provisioning(xive->kvm,
633 xive_prio_from_guest(priority));
634 mutex_unlock(&xive->lock);
635 }
636 if (rc) {
637 pr_devel(" provisioning failure %d !\n", rc);
638 return rc;
639 }
640
641 sb = kvmppc_xive_find_source(xive, irq, &idx);
642 if (!sb)
643 return -EINVAL;
644 state = &sb->irq_state[idx];
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660 if (priority == MASKED)
661 xive_lock_and_mask(xive, sb, state);
662 else
663 xive_lock_for_unmask(sb, state);
664
665
666
667
668
669
670
671 new_act_prio = state->act_priority;
672 if (priority != MASKED)
673 new_act_prio = xive_prio_from_guest(priority);
674
675 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
676 new_act_prio, state->act_server, state->act_priority);
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692 if (new_act_prio != MASKED &&
693 (state->act_server != server ||
694 state->act_priority != new_act_prio))
695 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
696
697
698
699
700
701 if (priority != MASKED)
702 xive_finish_unmask(xive, sb, state, priority);
703
704
705
706
707
708 state->saved_priority = priority;
709
710 arch_spin_unlock(&sb->lock);
711 return rc;
712 }
713
714 int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
715 u32 *priority)
716 {
717 struct kvmppc_xive *xive = kvm->arch.xive;
718 struct kvmppc_xive_src_block *sb;
719 struct kvmppc_xive_irq_state *state;
720 u16 idx;
721
722 if (!xive)
723 return -ENODEV;
724
725 sb = kvmppc_xive_find_source(xive, irq, &idx);
726 if (!sb)
727 return -EINVAL;
728 state = &sb->irq_state[idx];
729 arch_spin_lock(&sb->lock);
730 *server = state->act_server;
731 *priority = state->guest_priority;
732 arch_spin_unlock(&sb->lock);
733
734 return 0;
735 }
736
737 int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
738 {
739 struct kvmppc_xive *xive = kvm->arch.xive;
740 struct kvmppc_xive_src_block *sb;
741 struct kvmppc_xive_irq_state *state;
742 u16 idx;
743
744 if (!xive)
745 return -ENODEV;
746
747 sb = kvmppc_xive_find_source(xive, irq, &idx);
748 if (!sb)
749 return -EINVAL;
750 state = &sb->irq_state[idx];
751
752 pr_devel("int_on(irq=0x%x)\n", irq);
753
754
755
756
757 if (state->act_priority == MASKED) {
758 pr_devel("int_on on untargetted interrupt\n");
759 return -EINVAL;
760 }
761
762
763 if (state->saved_priority == MASKED)
764 return 0;
765
766
767
768
769 xive_lock_for_unmask(sb, state);
770 xive_finish_unmask(xive, sb, state, state->saved_priority);
771 arch_spin_unlock(&sb->lock);
772
773 return 0;
774 }
775
776 int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
777 {
778 struct kvmppc_xive *xive = kvm->arch.xive;
779 struct kvmppc_xive_src_block *sb;
780 struct kvmppc_xive_irq_state *state;
781 u16 idx;
782
783 if (!xive)
784 return -ENODEV;
785
786 sb = kvmppc_xive_find_source(xive, irq, &idx);
787 if (!sb)
788 return -EINVAL;
789 state = &sb->irq_state[idx];
790
791 pr_devel("int_off(irq=0x%x)\n", irq);
792
793
794
795
796 state->saved_priority = xive_lock_and_mask(xive, sb, state);
797 arch_spin_unlock(&sb->lock);
798
799 return 0;
800 }
801
802 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
803 {
804 struct kvmppc_xive_src_block *sb;
805 struct kvmppc_xive_irq_state *state;
806 u16 idx;
807
808 sb = kvmppc_xive_find_source(xive, irq, &idx);
809 if (!sb)
810 return false;
811 state = &sb->irq_state[idx];
812 if (!state->valid)
813 return false;
814
815
816
817
818
819 xive_irq_trigger(&state->ipi_data);
820
821 return true;
822 }
823
824 u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
825 {
826 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
827
828 if (!xc)
829 return 0;
830
831
832 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
833 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
834 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
835 }
836
837 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
838 {
839 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
840 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
841 u8 cppr, mfrr;
842 u32 xisr;
843
844 if (!xc || !xive)
845 return -ENOENT;
846
847
848 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
849 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
850 KVM_REG_PPC_ICP_XISR_MASK;
851 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
852
853 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
854 xc->server_num, cppr, mfrr, xisr);
855
856
857
858
859
860
861 if (WARN_ON(vcpu->arch.xive_pushed))
862 return -EIO;
863
864
865 vcpu->arch.xive_saved_state.cppr = cppr;
866 xc->hw_cppr = xc->cppr = cppr;
867
868
869
870
871
872
873
874 xc->mfrr = mfrr;
875 if (mfrr < cppr)
876 xive_irq_trigger(&xc->vp_ipi_data);
877
878
879
880
881
882
883
884
885
886
887 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
888 xc->delayed_irq = xisr;
889 xive->delayed_irqs++;
890 pr_devel(" xisr restore delayed\n");
891 }
892
893 return 0;
894 }
895
896 int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
897 struct irq_desc *host_desc)
898 {
899 struct kvmppc_xive *xive = kvm->arch.xive;
900 struct kvmppc_xive_src_block *sb;
901 struct kvmppc_xive_irq_state *state;
902 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
903 unsigned int host_irq = irq_desc_get_irq(host_desc);
904 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
905 u16 idx;
906 u8 prio;
907 int rc;
908
909 if (!xive)
910 return -ENODEV;
911
912 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
913
914 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
915 if (!sb)
916 return -EINVAL;
917 state = &sb->irq_state[idx];
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932 rc = irq_set_vcpu_affinity(host_irq, state);
933 if (rc) {
934 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
935 return rc;
936 }
937
938
939
940
941
942
943 prio = xive_lock_and_mask(xive, sb, state);
944 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
945 state->old_p, state->old_q);
946
947
948 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
949
950
951
952
953
954 if (xive->ops && xive->ops->reset_mapped)
955 xive->ops->reset_mapped(kvm, guest_irq);
956
957
958 state->pt_number = hw_irq;
959 state->pt_data = irq_data_get_irq_handler_data(host_data);
960
961
962
963
964
965
966
967 xive_native_configure_irq(hw_irq,
968 kvmppc_xive_vp(xive, state->act_server),
969 state->act_priority, state->number);
970
971
972
973
974
975
976
977
978 if (prio != MASKED && !state->old_p)
979 xive_vm_source_eoi(hw_irq, state->pt_data);
980
981
982 state->old_p = state->old_q = false;
983
984
985 mb();
986 state->guest_priority = prio;
987 arch_spin_unlock(&sb->lock);
988
989 return 0;
990 }
991 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
992
993 int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
994 struct irq_desc *host_desc)
995 {
996 struct kvmppc_xive *xive = kvm->arch.xive;
997 struct kvmppc_xive_src_block *sb;
998 struct kvmppc_xive_irq_state *state;
999 unsigned int host_irq = irq_desc_get_irq(host_desc);
1000 u16 idx;
1001 u8 prio;
1002 int rc;
1003
1004 if (!xive)
1005 return -ENODEV;
1006
1007 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1008
1009 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1010 if (!sb)
1011 return -EINVAL;
1012 state = &sb->irq_state[idx];
1013
1014
1015
1016
1017
1018
1019 prio = xive_lock_and_mask(xive, sb, state);
1020 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1021 state->old_p, state->old_q);
1022
1023
1024
1025
1026
1027
1028 if (state->old_p)
1029 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1030
1031
1032 rc = irq_set_vcpu_affinity(host_irq, NULL);
1033 if (rc) {
1034 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1035 return rc;
1036 }
1037
1038
1039 state->pt_number = 0;
1040 state->pt_data = NULL;
1041
1042
1043
1044
1045
1046 if (xive->ops && xive->ops->reset_mapped) {
1047 xive->ops->reset_mapped(kvm, guest_irq);
1048 }
1049
1050
1051 xive_native_configure_irq(state->ipi_number,
1052 kvmppc_xive_vp(xive, state->act_server),
1053 state->act_priority, state->number);
1054
1055
1056
1057
1058
1059
1060 if (prio == MASKED || state->old_p)
1061 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1062 else
1063 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1064
1065
1066 mb();
1067 state->guest_priority = prio;
1068 arch_spin_unlock(&sb->lock);
1069
1070 return 0;
1071 }
1072 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1073
1074 void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1075 {
1076 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1077 struct kvm *kvm = vcpu->kvm;
1078 struct kvmppc_xive *xive = kvm->arch.xive;
1079 int i, j;
1080
1081 for (i = 0; i <= xive->max_sbid; i++) {
1082 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1083
1084 if (!sb)
1085 continue;
1086 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1087 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1088
1089 if (!state->valid)
1090 continue;
1091 if (state->act_priority == MASKED)
1092 continue;
1093 if (state->act_server != xc->server_num)
1094 continue;
1095
1096
1097 arch_spin_lock(&sb->lock);
1098 state->act_priority = MASKED;
1099 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1100 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1101 if (state->pt_number) {
1102 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1103 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1104 }
1105 arch_spin_unlock(&sb->lock);
1106 }
1107 }
1108
1109
1110 if (vcpu->arch.xive_esc_on) {
1111 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1112 XIVE_ESB_SET_PQ_01));
1113 vcpu->arch.xive_esc_on = false;
1114 }
1115
1116
1117
1118
1119
1120
1121 vcpu->arch.xive_esc_vaddr = 0;
1122 vcpu->arch.xive_esc_raddr = 0;
1123 }
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1134 struct kvmppc_xive_vcpu *xc, int irq)
1135 {
1136 struct irq_data *d = irq_get_irq_data(irq);
1137 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1138
1139
1140
1141
1142
1143
1144 xd->stale_p = false;
1145 smp_mb();
1146 if (!vcpu->arch.xive_esc_on)
1147 xd->stale_p = true;
1148 }
1149
1150 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1151 {
1152 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1153 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1154 int i;
1155
1156 if (!kvmppc_xics_enabled(vcpu))
1157 return;
1158
1159 if (!xc)
1160 return;
1161
1162 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1163
1164
1165 xc->valid = false;
1166 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1167
1168
1169 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1170
1171
1172 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1173 if (xc->esc_virq[i]) {
1174 if (xc->xive->single_escalation)
1175 xive_cleanup_single_escalation(vcpu, xc,
1176 xc->esc_virq[i]);
1177 free_irq(xc->esc_virq[i], vcpu);
1178 irq_dispose_mapping(xc->esc_virq[i]);
1179 kfree(xc->esc_virq_names[i]);
1180 }
1181 }
1182
1183
1184 xive_native_disable_vp(xc->vp_id);
1185
1186
1187 vcpu->arch.xive_cam_word = 0;
1188
1189
1190 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1191 struct xive_q *q = &xc->queues[i];
1192
1193 xive_native_disable_queue(xc->vp_id, q, i);
1194 if (q->qpage) {
1195 free_pages((unsigned long)q->qpage,
1196 xive->q_page_order);
1197 q->qpage = NULL;
1198 }
1199 }
1200
1201
1202 if (xc->vp_ipi) {
1203 xive_cleanup_irq_data(&xc->vp_ipi_data);
1204 xive_native_free_irq(xc->vp_ipi);
1205 }
1206
1207 kfree(xc);
1208
1209
1210 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1211 vcpu->arch.xive_vcpu = NULL;
1212 }
1213
1214 int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1215 struct kvm_vcpu *vcpu, u32 cpu)
1216 {
1217 struct kvmppc_xive *xive = dev->private;
1218 struct kvmppc_xive_vcpu *xc;
1219 int i, r = -EBUSY;
1220 u32 vp_id;
1221
1222 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1223
1224 if (dev->ops != &kvm_xive_ops) {
1225 pr_devel("Wrong ops !\n");
1226 return -EPERM;
1227 }
1228 if (xive->kvm != vcpu->kvm)
1229 return -EPERM;
1230 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1231 return -EBUSY;
1232 if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
1233 pr_devel("Out of bounds !\n");
1234 return -EINVAL;
1235 }
1236
1237
1238 mutex_lock(&xive->lock);
1239
1240 vp_id = kvmppc_xive_vp(xive, cpu);
1241 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1242 pr_devel("Duplicate !\n");
1243 r = -EEXIST;
1244 goto bail;
1245 }
1246
1247 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1248 if (!xc) {
1249 r = -ENOMEM;
1250 goto bail;
1251 }
1252
1253 vcpu->arch.xive_vcpu = xc;
1254 xc->xive = xive;
1255 xc->vcpu = vcpu;
1256 xc->server_num = cpu;
1257 xc->vp_id = vp_id;
1258 xc->mfrr = 0xff;
1259 xc->valid = true;
1260
1261 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1262 if (r)
1263 goto bail;
1264
1265
1266 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1267 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1268
1269
1270 xc->vp_ipi = xive_native_alloc_irq();
1271 if (!xc->vp_ipi) {
1272 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1273 r = -EIO;
1274 goto bail;
1275 }
1276 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1277
1278 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1279 if (r)
1280 goto bail;
1281
1282
1283
1284
1285
1286 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1287 if (r) {
1288 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1289 goto bail;
1290 }
1291
1292
1293
1294
1295
1296
1297
1298
1299 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1300 struct xive_q *q = &xc->queues[i];
1301
1302
1303 if (i == 7 && xive->single_escalation)
1304 break;
1305
1306
1307 if (xive->qmap & (1 << i)) {
1308 r = xive_provision_queue(vcpu, i);
1309 if (r == 0 && !xive->single_escalation)
1310 kvmppc_xive_attach_escalation(
1311 vcpu, i, xive->single_escalation);
1312 if (r)
1313 goto bail;
1314 } else {
1315 r = xive_native_configure_queue(xc->vp_id,
1316 q, i, NULL, 0, true);
1317 if (r) {
1318 pr_err("Failed to configure queue %d for VCPU %d\n",
1319 i, cpu);
1320 goto bail;
1321 }
1322 }
1323 }
1324
1325
1326 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1327 if (r)
1328 goto bail;
1329
1330
1331 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1332 if (!r)
1333 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1334
1335 bail:
1336 mutex_unlock(&xive->lock);
1337 if (r) {
1338 kvmppc_xive_cleanup_vcpu(vcpu);
1339 return r;
1340 }
1341
1342 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1343 return 0;
1344 }
1345
1346
1347
1348
1349 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1350 {
1351 struct kvmppc_xive_src_block *sb;
1352 struct kvmppc_xive_irq_state *state;
1353 u16 idx;
1354
1355 sb = kvmppc_xive_find_source(xive, irq, &idx);
1356 if (!sb)
1357 return;
1358
1359 state = &sb->irq_state[idx];
1360
1361
1362 if (!state->valid) {
1363 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1364 return;
1365 }
1366
1367
1368
1369
1370
1371
1372 if (!state->saved_p)
1373 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1374
1375
1376 state->in_queue = true;
1377 }
1378
1379 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1380 struct kvmppc_xive_src_block *sb,
1381 u32 irq)
1382 {
1383 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1384
1385 if (!state->valid)
1386 return;
1387
1388
1389 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1390
1391
1392 state->saved_p = state->old_p;
1393 state->saved_q = state->old_q;
1394
1395
1396 arch_spin_unlock(&sb->lock);
1397 }
1398
1399 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1400 struct kvmppc_xive_src_block *sb,
1401 u32 irq)
1402 {
1403 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1404
1405 if (!state->valid)
1406 return;
1407
1408
1409
1410
1411
1412
1413 xive_lock_for_unmask(sb, state);
1414
1415
1416 if (state->saved_scan_prio != MASKED)
1417 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1418
1419
1420 arch_spin_unlock(&sb->lock);
1421 }
1422
1423 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1424 {
1425 u32 idx = q->idx;
1426 u32 toggle = q->toggle;
1427 u32 irq;
1428
1429 do {
1430 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1431 if (irq > XICS_IPI)
1432 xive_pre_save_set_queued(xive, irq);
1433 } while(irq);
1434 }
1435
1436 static void xive_pre_save_scan(struct kvmppc_xive *xive)
1437 {
1438 struct kvm_vcpu *vcpu = NULL;
1439 int i, j;
1440
1441
1442
1443
1444
1445 for (i = 0; i <= xive->max_sbid; i++) {
1446 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1447 if (!sb)
1448 continue;
1449 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1450 xive_pre_save_mask_irq(xive, sb, j);
1451 }
1452
1453
1454 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1455 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1456 if (!xc)
1457 continue;
1458 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1459 if (xc->queues[j].qpage)
1460 xive_pre_save_queue(xive, &xc->queues[j]);
1461 }
1462 }
1463
1464
1465 for (i = 0; i <= xive->max_sbid; i++) {
1466 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1467 if (!sb)
1468 continue;
1469 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1470 xive_pre_save_unmask_irq(xive, sb, j);
1471 }
1472 }
1473
1474 static void xive_post_save_scan(struct kvmppc_xive *xive)
1475 {
1476 u32 i, j;
1477
1478
1479 for (i = 0; i <= xive->max_sbid; i++) {
1480 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1481 if (!sb)
1482 continue;
1483 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1484 sb->irq_state[j].in_queue = false;
1485 }
1486
1487
1488 xive->saved_src_count = 0;
1489 }
1490
1491
1492
1493
1494 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1495 {
1496 struct kvmppc_xive_src_block *sb;
1497 struct kvmppc_xive_irq_state *state;
1498 u64 __user *ubufp = (u64 __user *) addr;
1499 u64 val, prio;
1500 u16 idx;
1501
1502 sb = kvmppc_xive_find_source(xive, irq, &idx);
1503 if (!sb)
1504 return -ENOENT;
1505
1506 state = &sb->irq_state[idx];
1507
1508 if (!state->valid)
1509 return -ENOENT;
1510
1511 pr_devel("get_source(%ld)...\n", irq);
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 if (xive->saved_src_count == 0)
1530 xive_pre_save_scan(xive);
1531 xive->saved_src_count++;
1532
1533
1534 val = state->act_server;
1535 prio = state->saved_scan_prio;
1536
1537 if (prio == MASKED) {
1538 val |= KVM_XICS_MASKED;
1539 prio = state->saved_priority;
1540 }
1541 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1542 if (state->lsi) {
1543 val |= KVM_XICS_LEVEL_SENSITIVE;
1544 if (state->saved_p)
1545 val |= KVM_XICS_PENDING;
1546 } else {
1547 if (state->saved_p)
1548 val |= KVM_XICS_PRESENTED;
1549
1550 if (state->saved_q)
1551 val |= KVM_XICS_QUEUED;
1552
1553
1554
1555
1556
1557
1558
1559 if (state->in_queue || (prio == MASKED && state->saved_q))
1560 val |= KVM_XICS_PENDING;
1561 }
1562
1563
1564
1565
1566
1567 if (xive->saved_src_count == xive->src_count)
1568 xive_post_save_scan(xive);
1569
1570
1571 if (put_user(val, ubufp))
1572 return -EFAULT;
1573
1574 return 0;
1575 }
1576
1577 struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1578 struct kvmppc_xive *xive, int irq)
1579 {
1580 struct kvmppc_xive_src_block *sb;
1581 int i, bid;
1582
1583 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1584
1585 mutex_lock(&xive->lock);
1586
1587
1588 if (xive->src_blocks[bid])
1589 goto out;
1590
1591
1592 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1593 if (!sb)
1594 goto out;
1595
1596 sb->id = bid;
1597
1598 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1599 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1600 sb->irq_state[i].eisn = 0;
1601 sb->irq_state[i].guest_priority = MASKED;
1602 sb->irq_state[i].saved_priority = MASKED;
1603 sb->irq_state[i].act_priority = MASKED;
1604 }
1605 smp_wmb();
1606 xive->src_blocks[bid] = sb;
1607
1608 if (bid > xive->max_sbid)
1609 xive->max_sbid = bid;
1610
1611 out:
1612 mutex_unlock(&xive->lock);
1613 return xive->src_blocks[bid];
1614 }
1615
1616 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1617 {
1618 struct kvm *kvm = xive->kvm;
1619 struct kvm_vcpu *vcpu = NULL;
1620 int i;
1621
1622 kvm_for_each_vcpu(i, vcpu, kvm) {
1623 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1624
1625 if (!xc)
1626 continue;
1627
1628 if (xc->delayed_irq == irq) {
1629 xc->delayed_irq = 0;
1630 xive->delayed_irqs--;
1631 return true;
1632 }
1633 }
1634 return false;
1635 }
1636
1637 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1638 {
1639 struct kvmppc_xive_src_block *sb;
1640 struct kvmppc_xive_irq_state *state;
1641 u64 __user *ubufp = (u64 __user *) addr;
1642 u16 idx;
1643 u64 val;
1644 u8 act_prio, guest_prio;
1645 u32 server;
1646 int rc = 0;
1647
1648 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1649 return -ENOENT;
1650
1651 pr_devel("set_source(irq=0x%lx)\n", irq);
1652
1653
1654 sb = kvmppc_xive_find_source(xive, irq, &idx);
1655 if (!sb) {
1656 pr_devel("No source, creating source block...\n");
1657 sb = kvmppc_xive_create_src_block(xive, irq);
1658 if (!sb) {
1659 pr_devel("Failed to create block...\n");
1660 return -ENOMEM;
1661 }
1662 }
1663 state = &sb->irq_state[idx];
1664
1665
1666 if (get_user(val, ubufp)) {
1667 pr_devel("fault getting user info !\n");
1668 return -EFAULT;
1669 }
1670
1671 server = val & KVM_XICS_DESTINATION_MASK;
1672 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1673
1674 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1675 val, server, guest_prio);
1676
1677
1678
1679
1680
1681 if (!state->ipi_number) {
1682 state->ipi_number = xive_native_alloc_irq();
1683 if (state->ipi_number == 0) {
1684 pr_devel("Failed to allocate IPI !\n");
1685 return -ENOMEM;
1686 }
1687 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1688 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1689 }
1690
1691
1692
1693
1694
1695
1696
1697
1698 state->guest_priority = 0;
1699 xive_lock_and_mask(xive, sb, state);
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709 act_prio = xive_prio_from_guest(guest_prio);
1710 state->act_priority = MASKED;
1711
1712
1713
1714
1715
1716
1717 arch_spin_unlock(&sb->lock);
1718
1719
1720 if (act_prio != MASKED) {
1721
1722 mutex_lock(&xive->lock);
1723 rc = xive_check_provisioning(xive->kvm, act_prio);
1724 mutex_unlock(&xive->lock);
1725
1726
1727 if (rc == 0)
1728 rc = xive_target_interrupt(xive->kvm, state,
1729 server, act_prio);
1730
1731
1732
1733
1734
1735 }
1736
1737
1738
1739
1740
1741 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1742 val |= KVM_XICS_PENDING;
1743 pr_devel(" Found delayed ! forcing PENDING !\n");
1744 }
1745
1746
1747 state->old_p = false;
1748 state->old_q = false;
1749 state->lsi = false;
1750 state->asserted = false;
1751
1752
1753 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1754 state->lsi = true;
1755 if (val & KVM_XICS_PENDING)
1756 state->asserted = true;
1757 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1758 }
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1771 state->old_p = true;
1772 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1773 state->old_q = true;
1774
1775 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1776
1777
1778
1779
1780
1781
1782 if (val & KVM_XICS_MASKED) {
1783 pr_devel(" masked, saving prio\n");
1784 state->guest_priority = MASKED;
1785 state->saved_priority = guest_prio;
1786 } else {
1787 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1788 xive_finish_unmask(xive, sb, state, guest_prio);
1789 state->saved_priority = guest_prio;
1790 }
1791
1792
1793 if (!state->valid)
1794 xive->src_count++;
1795 state->valid = true;
1796
1797 return 0;
1798 }
1799
1800 int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1801 bool line_status)
1802 {
1803 struct kvmppc_xive *xive = kvm->arch.xive;
1804 struct kvmppc_xive_src_block *sb;
1805 struct kvmppc_xive_irq_state *state;
1806 u16 idx;
1807
1808 if (!xive)
1809 return -ENODEV;
1810
1811 sb = kvmppc_xive_find_source(xive, irq, &idx);
1812 if (!sb)
1813 return -EINVAL;
1814
1815
1816 state = &sb->irq_state[idx];
1817 if (!state->valid)
1818 return -EINVAL;
1819
1820
1821 if (state->pt_number)
1822 return -EINVAL;
1823
1824 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1825 state->asserted = 1;
1826 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1827 state->asserted = 0;
1828 return 0;
1829 }
1830
1831
1832 xive_irq_trigger(&state->ipi_data);
1833
1834 return 0;
1835 }
1836
1837 static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1838 {
1839 struct kvmppc_xive *xive = dev->private;
1840
1841
1842 switch (attr->group) {
1843 case KVM_DEV_XICS_GRP_SOURCES:
1844 return xive_set_source(xive, attr->attr, attr->addr);
1845 }
1846 return -ENXIO;
1847 }
1848
1849 static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1850 {
1851 struct kvmppc_xive *xive = dev->private;
1852
1853
1854 switch (attr->group) {
1855 case KVM_DEV_XICS_GRP_SOURCES:
1856 return xive_get_source(xive, attr->attr, attr->addr);
1857 }
1858 return -ENXIO;
1859 }
1860
1861 static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1862 {
1863
1864 switch (attr->group) {
1865 case KVM_DEV_XICS_GRP_SOURCES:
1866 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1867 attr->attr < KVMPPC_XICS_NR_IRQS)
1868 return 0;
1869 break;
1870 }
1871 return -ENXIO;
1872 }
1873
1874 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1875 {
1876 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1877 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1878 }
1879
1880 void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1881 {
1882 int i;
1883
1884 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1885 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1886
1887 if (!state->valid)
1888 continue;
1889
1890 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1891 xive_cleanup_irq_data(&state->ipi_data);
1892 xive_native_free_irq(state->ipi_number);
1893
1894
1895 if (state->pt_number)
1896 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1897
1898 state->valid = false;
1899 }
1900 }
1901
1902
1903
1904
1905 static void kvmppc_xive_release(struct kvm_device *dev)
1906 {
1907 struct kvmppc_xive *xive = dev->private;
1908 struct kvm *kvm = xive->kvm;
1909 struct kvm_vcpu *vcpu;
1910 int i;
1911
1912 pr_devel("Releasing xive device\n");
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 debugfs_remove(xive->dentry);
1924
1925
1926
1927
1928 kvm_for_each_vcpu(i, vcpu, kvm) {
1929
1930
1931
1932
1933
1934
1935
1936
1937 mutex_lock(&vcpu->mutex);
1938 kvmppc_xive_cleanup_vcpu(vcpu);
1939 mutex_unlock(&vcpu->mutex);
1940 }
1941
1942
1943
1944
1945
1946
1947
1948 kvm->arch.xive = NULL;
1949
1950
1951 for (i = 0; i <= xive->max_sbid; i++) {
1952 if (xive->src_blocks[i])
1953 kvmppc_xive_free_sources(xive->src_blocks[i]);
1954 kfree(xive->src_blocks[i]);
1955 xive->src_blocks[i] = NULL;
1956 }
1957
1958 if (xive->vp_base != XIVE_INVALID_VP)
1959 xive_native_free_vp_block(xive->vp_base);
1960
1961
1962
1963
1964
1965
1966
1967
1968 kfree(dev);
1969 }
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980 struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
1981 {
1982 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
1983 &kvm->arch.xive_devices.native :
1984 &kvm->arch.xive_devices.xics_on_xive;
1985 struct kvmppc_xive *xive = *kvm_xive_device;
1986
1987 if (!xive) {
1988 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1989 *kvm_xive_device = xive;
1990 } else {
1991 memset(xive, 0, sizeof(*xive));
1992 }
1993
1994 return xive;
1995 }
1996
1997
1998
1999
2000 static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2001 {
2002 struct kvmppc_xive *xive;
2003 struct kvm *kvm = dev->kvm;
2004 int ret = 0;
2005
2006 pr_devel("Creating xive for partition\n");
2007
2008
2009 if (kvm->arch.xive)
2010 return -EEXIST;
2011
2012 xive = kvmppc_xive_get_device(kvm, type);
2013 if (!xive)
2014 return -ENOMEM;
2015
2016 dev->private = xive;
2017 xive->dev = dev;
2018 xive->kvm = kvm;
2019 mutex_init(&xive->lock);
2020
2021
2022 xive->q_order = xive_native_default_eq_shift();
2023 if (xive->q_order < PAGE_SHIFT)
2024 xive->q_page_order = 0;
2025 else
2026 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2027
2028
2029 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
2030 pr_devel("VP_Base=%x\n", xive->vp_base);
2031
2032 if (xive->vp_base == XIVE_INVALID_VP)
2033 ret = -ENOMEM;
2034
2035 xive->single_escalation = xive_native_has_single_escalation();
2036
2037 if (ret)
2038 return ret;
2039
2040 kvm->arch.xive = xive;
2041 return 0;
2042 }
2043
2044 int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2045 {
2046 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2047 unsigned int i;
2048
2049 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2050 struct xive_q *q = &xc->queues[i];
2051 u32 i0, i1, idx;
2052
2053 if (!q->qpage && !xc->esc_virq[i])
2054 continue;
2055
2056 seq_printf(m, " [q%d]: ", i);
2057
2058 if (q->qpage) {
2059 idx = q->idx;
2060 i0 = be32_to_cpup(q->qpage + idx);
2061 idx = (idx + 1) & q->msk;
2062 i1 = be32_to_cpup(q->qpage + idx);
2063 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2064 i0, i1);
2065 }
2066 if (xc->esc_virq[i]) {
2067 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2068 struct xive_irq_data *xd =
2069 irq_data_get_irq_handler_data(d);
2070 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2071
2072 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
2073 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
2074 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
2075 xc->esc_virq[i], pq, xd->eoi_page);
2076 seq_puts(m, "\n");
2077 }
2078 }
2079 return 0;
2080 }
2081
2082 static int xive_debug_show(struct seq_file *m, void *private)
2083 {
2084 struct kvmppc_xive *xive = m->private;
2085 struct kvm *kvm = xive->kvm;
2086 struct kvm_vcpu *vcpu;
2087 u64 t_rm_h_xirr = 0;
2088 u64 t_rm_h_ipoll = 0;
2089 u64 t_rm_h_cppr = 0;
2090 u64 t_rm_h_eoi = 0;
2091 u64 t_rm_h_ipi = 0;
2092 u64 t_vm_h_xirr = 0;
2093 u64 t_vm_h_ipoll = 0;
2094 u64 t_vm_h_cppr = 0;
2095 u64 t_vm_h_eoi = 0;
2096 u64 t_vm_h_ipi = 0;
2097 unsigned int i;
2098
2099 if (!kvm)
2100 return 0;
2101
2102 seq_printf(m, "=========\nVCPU state\n=========\n");
2103
2104 kvm_for_each_vcpu(i, vcpu, kvm) {
2105 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2106
2107 if (!xc)
2108 continue;
2109
2110 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
2111 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2112 xc->server_num, xc->cppr, xc->hw_cppr,
2113 xc->mfrr, xc->pending,
2114 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2115
2116 kvmppc_xive_debug_show_queues(m, vcpu);
2117
2118 t_rm_h_xirr += xc->stat_rm_h_xirr;
2119 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2120 t_rm_h_cppr += xc->stat_rm_h_cppr;
2121 t_rm_h_eoi += xc->stat_rm_h_eoi;
2122 t_rm_h_ipi += xc->stat_rm_h_ipi;
2123 t_vm_h_xirr += xc->stat_vm_h_xirr;
2124 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2125 t_vm_h_cppr += xc->stat_vm_h_cppr;
2126 t_vm_h_eoi += xc->stat_vm_h_eoi;
2127 t_vm_h_ipi += xc->stat_vm_h_ipi;
2128 }
2129
2130 seq_printf(m, "Hcalls totals\n");
2131 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2132 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2133 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2134 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2135 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2136
2137 return 0;
2138 }
2139
2140 DEFINE_SHOW_ATTRIBUTE(xive_debug);
2141
2142 static void xive_debugfs_init(struct kvmppc_xive *xive)
2143 {
2144 char *name;
2145
2146 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2147 if (!name) {
2148 pr_err("%s: no memory for name\n", __func__);
2149 return;
2150 }
2151
2152 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2153 xive, &xive_debug_fops);
2154
2155 pr_debug("%s: created %s\n", __func__, name);
2156 kfree(name);
2157 }
2158
2159 static void kvmppc_xive_init(struct kvm_device *dev)
2160 {
2161 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2162
2163
2164 xive_debugfs_init(xive);
2165 }
2166
2167 struct kvm_device_ops kvm_xive_ops = {
2168 .name = "kvm-xive",
2169 .create = kvmppc_xive_create,
2170 .init = kvmppc_xive_init,
2171 .release = kvmppc_xive_release,
2172 .set_attr = xive_set_attr,
2173 .get_attr = xive_get_attr,
2174 .has_attr = xive_has_attr,
2175 };
2176
2177 void kvmppc_xive_init_module(void)
2178 {
2179 __xive_vm_h_xirr = xive_vm_h_xirr;
2180 __xive_vm_h_ipoll = xive_vm_h_ipoll;
2181 __xive_vm_h_ipi = xive_vm_h_ipi;
2182 __xive_vm_h_cppr = xive_vm_h_cppr;
2183 __xive_vm_h_eoi = xive_vm_h_eoi;
2184 }
2185
2186 void kvmppc_xive_exit_module(void)
2187 {
2188 __xive_vm_h_xirr = NULL;
2189 __xive_vm_h_ipoll = NULL;
2190 __xive_vm_h_ipi = NULL;
2191 __xive_vm_h_cppr = NULL;
2192 __xive_vm_h_eoi = NULL;
2193 }