This source file includes following definitions.
- GLUE
- GLUE
- GLUE
1
2
3
4
5
6
7
8 #define XGLUE(a,b) a##b
9 #define GLUE(a,b) XGLUE(a,b)
10
11
12 #define XICS_DUMMY 1
13
14 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
15 {
16 u8 cppr;
17 u16 ack;
18
19
20
21
22
23 eieio();
24
25
26 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
27
28
29 mb();
30
31
32
33
34 if (!((ack >> 8) & TM_QW1_NSR_EO))
35 return;
36
37
38 cppr = ack & 0xff;
39 if (cppr < 8)
40 xc->pending |= 1 << cppr;
41
42 #ifdef XIVE_RUNTIME_CHECKS
43
44 if (cppr >= xc->hw_cppr)
45 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
46 smp_processor_id(), cppr, xc->hw_cppr);
47 #endif
48
49
50
51
52
53
54 xc->hw_cppr = cppr;
55 }
56
57 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
58 {
59 u64 val;
60
61 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
62 offset |= offset << 4;
63
64 val =__x_readq(__x_eoi_page(xd) + offset);
65 #ifdef __LITTLE_ENDIAN__
66 val >>= 64-8;
67 #endif
68 return (u8)val;
69 }
70
71
72 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
73 {
74
75 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
76 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
77 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
78 opal_int_eoi(hw_irq);
79 else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
80
81
82
83
84
85 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
86 } else {
87 uint64_t eoi_val;
88
89
90
91
92
93
94
95
96
97
98 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
99
100
101 if ((eoi_val & 1) && __x_trig_page(xd))
102 __x_writeq(0, __x_trig_page(xd));
103 }
104 }
105
106 enum {
107 scan_fetch,
108 scan_poll,
109 scan_eoi,
110 };
111
112 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
113 u8 pending, int scan_type)
114 {
115 u32 hirq = 0;
116 u8 prio = 0xff;
117
118
119 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
120 struct xive_q *q;
121 u32 idx, toggle;
122 __be32 *qpage;
123
124
125
126
127
128 prio = ffs(pending) - 1;
129
130
131 if (prio >= xc->cppr || prio > 7) {
132 if (xc->mfrr < xc->cppr) {
133 prio = xc->mfrr;
134 hirq = XICS_IPI;
135 }
136 break;
137 }
138
139
140 q = &xc->queues[prio];
141 idx = q->idx;
142 toggle = q->toggle;
143
144
145
146
147
148
149
150 qpage = READ_ONCE(q->qpage);
151
152 skip_ipi:
153
154
155
156
157 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
174 if (scan_type == scan_fetch) {
175 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
176 &xc->vp_ipi_data);
177 q->idx = idx;
178 q->toggle = toggle;
179 }
180
181 #ifdef XIVE_RUNTIME_CHECKS
182 WARN_ON(hirq && hirq != XICS_IPI);
183 #endif
184 if (hirq)
185 goto skip_ipi;
186 }
187
188
189 if (hirq == XICS_DUMMY)
190 goto skip_ipi;
191
192
193 if (!hirq) {
194 pending &= ~(1 << prio);
195
196
197
198
199
200 if (atomic_read(&q->pending_count)) {
201 int p = atomic_xchg(&q->pending_count, 0);
202 if (p) {
203 #ifdef XIVE_RUNTIME_CHECKS
204 WARN_ON(p > atomic_read(&q->count));
205 #endif
206 atomic_sub(p, &q->count);
207 }
208 }
209 }
210
211
212
213
214
215
216 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
217 prio = xc->mfrr;
218 hirq = XICS_IPI;
219 break;
220 }
221
222
223 if (scan_type == scan_fetch) {
224 q->idx = idx;
225 q->toggle = toggle;
226 }
227 }
228
229
230 if (scan_type == scan_poll)
231 return hirq;
232
233
234 xc->pending = pending;
235
236
237
238
239
240
241 if (scan_type == scan_eoi)
242 return hirq;
243
244
245
246
247
248
249
250
251
252
253 if (hirq)
254 xc->cppr = prio;
255
256
257
258
259
260
261 if (xc->cppr != xc->hw_cppr) {
262 xc->hw_cppr = xc->cppr;
263 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
264 }
265
266 return hirq;
267 }
268
269 X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
270 {
271 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
272 u8 old_cppr;
273 u32 hirq;
274
275 pr_devel("H_XIRR\n");
276
277 xc->GLUE(X_STAT_PFX,h_xirr)++;
278
279
280 GLUE(X_PFX,ack_pending)(xc);
281
282 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
283 xc->pending, xc->hw_cppr, xc->cppr);
284
285
286 old_cppr = xive_prio_to_guest(xc->cppr);
287
288
289 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
290
291 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
292 hirq, xc->hw_cppr, xc->cppr);
293
294 #ifdef XIVE_RUNTIME_CHECKS
295
296 if (hirq & 0xff000000)
297 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
298 #endif
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315 vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
316
317 return H_SUCCESS;
318 }
319
320 X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
321 {
322 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
323 u8 pending = xc->pending;
324 u32 hirq;
325
326 pr_devel("H_IPOLL(server=%ld)\n", server);
327
328 xc->GLUE(X_STAT_PFX,h_ipoll)++;
329
330
331 if (xc->server_num != server) {
332 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
333 if (!vcpu)
334 return H_PARAMETER;
335 xc = vcpu->arch.xive_vcpu;
336
337
338 pending = 0xff;
339 } else {
340
341 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
342 u8 pipr = be64_to_cpu(qw1) & 0xff;
343 if (pipr < 8)
344 pending |= 1 << pipr;
345 }
346
347 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
348
349
350 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
351
352 return H_SUCCESS;
353 }
354
355 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
356 {
357 u8 pending, prio;
358
359 pending = xc->pending;
360 if (xc->mfrr != 0xff) {
361 if (xc->mfrr < 8)
362 pending |= 1 << xc->mfrr;
363 else
364 pending |= 0x80;
365 }
366 if (!pending)
367 return;
368 prio = ffs(pending) - 1;
369
370 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
371 }
372
373 static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
374 struct kvmppc_xive_vcpu *xc)
375 {
376 unsigned int prio;
377
378
379 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
380 struct xive_q *q = &xc->queues[prio];
381 struct kvmppc_xive_irq_state *state;
382 struct kvmppc_xive_src_block *sb;
383 u32 idx, toggle, entry, irq, hw_num;
384 struct xive_irq_data *xd;
385 __be32 *qpage;
386 u16 src;
387
388 idx = q->idx;
389 toggle = q->toggle;
390 qpage = READ_ONCE(q->qpage);
391 if (!qpage)
392 continue;
393
394
395 for (;;) {
396 entry = be32_to_cpup(qpage + idx);
397
398
399 if ((entry >> 31) == toggle)
400 break;
401 irq = entry & 0x7fffffff;
402
403
404 if (irq == XICS_DUMMY || irq == XICS_IPI)
405 goto next;
406 sb = kvmppc_xive_find_source(xive, irq, &src);
407 if (!sb)
408 goto next;
409 state = &sb->irq_state[src];
410
411
412 if (xc->server_num == state->act_server)
413 goto next;
414
415
416
417
418
419 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
420
421
422 kvmppc_xive_select_irq(state, &hw_num, &xd);
423
424
425 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
426 GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
427
428
429 GLUE(X_PFX,source_eoi)(hw_num, xd);
430
431 next:
432 idx = (idx + 1) & q->msk;
433 if (idx == 0)
434 toggle ^= 1;
435 }
436 }
437 }
438
439 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
440 {
441 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
442 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
443 u8 old_cppr;
444
445 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
446
447 xc->GLUE(X_STAT_PFX,h_cppr)++;
448
449
450 cppr = xive_prio_from_guest(cppr);
451
452
453 old_cppr = xc->cppr;
454 xc->cppr = cppr;
455
456
457
458
459
460 smp_mb();
461
462 if (cppr > old_cppr) {
463
464
465
466
467
468
469 GLUE(X_PFX,push_pending_to_hw)(xc);
470 } else {
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
489 }
490
491
492 xc->hw_cppr = cppr;
493 __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
494
495 return H_SUCCESS;
496 }
497
498 X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
499 {
500 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
501 struct kvmppc_xive_src_block *sb;
502 struct kvmppc_xive_irq_state *state;
503 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
504 struct xive_irq_data *xd;
505 u8 new_cppr = xirr >> 24;
506 u32 irq = xirr & 0x00ffffff, hw_num;
507 u16 src;
508 int rc = 0;
509
510 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
511
512 xc->GLUE(X_STAT_PFX,h_eoi)++;
513
514 xc->cppr = xive_prio_from_guest(new_cppr);
515
516
517
518
519
520
521
522 if (irq == XICS_IPI || irq == 0) {
523
524
525
526
527
528 smp_mb();
529 goto bail;
530 }
531
532
533 sb = kvmppc_xive_find_source(xive, irq, &src);
534 if (!sb) {
535 pr_devel(" source not found !\n");
536 rc = H_PARAMETER;
537
538 smp_mb();
539 goto bail;
540 }
541 state = &sb->irq_state[src];
542 kvmppc_xive_select_irq(state, &hw_num, &xd);
543
544 state->in_eoi = true;
545
546
547
548
549
550
551
552 smp_mb();
553
554 again:
555 if (state->guest_priority == MASKED) {
556 arch_spin_lock(&sb->lock);
557 if (state->guest_priority != MASKED) {
558 arch_spin_unlock(&sb->lock);
559 goto again;
560 }
561 pr_devel(" EOI on saved P...\n");
562
563
564 state->old_p = false;
565
566 arch_spin_unlock(&sb->lock);
567 } else {
568 pr_devel(" EOI on source...\n");
569
570
571 GLUE(X_PFX,source_eoi)(hw_num, xd);
572
573
574 if (state->lsi && state->asserted)
575 __x_writeq(0, __x_trig_page(xd));
576
577 }
578
579
580
581
582
583
584
585
586
587 mb();
588 state->in_eoi = false;
589 bail:
590
591
592 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
593 GLUE(X_PFX,push_pending_to_hw)(xc);
594 pr_devel(" after scan pending=%02x\n", xc->pending);
595
596
597 xc->hw_cppr = xc->cppr;
598 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
599
600 return rc;
601 }
602
603 X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
604 unsigned long mfrr)
605 {
606 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
607
608 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
609
610 xc->GLUE(X_STAT_PFX,h_ipi)++;
611
612
613 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
614 if (!vcpu)
615 return H_PARAMETER;
616 xc = vcpu->arch.xive_vcpu;
617
618
619 xc->mfrr = mfrr;
620
621
622
623
624
625
626
627
628
629
630
631 mb();
632
633
634 if (mfrr < xc->cppr)
635 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
636
637 return H_SUCCESS;
638 }