This source file includes following definitions.
- kvm_pmu_idx_is_64bit
- kvm_pmc_to_vcpu
- kvm_pmu_pmc_is_chained
- kvm_pmu_idx_is_high_counter
- kvm_pmu_get_canonical_pmc
- kvm_pmu_idx_has_chain_evtype
- kvm_pmu_get_pair_counter_value
- kvm_pmu_get_counter_value
- kvm_pmu_set_counter_value
- kvm_pmu_release_perf_event
- kvm_pmu_stop_counter
- kvm_pmu_vcpu_init
- kvm_pmu_vcpu_reset
- kvm_pmu_vcpu_destroy
- kvm_pmu_valid_counter_mask
- kvm_pmu_enable_counter_mask
- kvm_pmu_disable_counter_mask
- kvm_pmu_overflow_status
- kvm_pmu_update_state
- kvm_pmu_should_notify_user
- kvm_pmu_update_run
- kvm_pmu_flush_hwstate
- kvm_pmu_sync_hwstate
- kvm_pmu_perf_overflow
- kvm_pmu_software_increment
- kvm_pmu_handle_pmcr
- kvm_pmu_counter_is_enabled
- kvm_pmu_create_perf_event
- kvm_pmu_update_pmc_chained
- kvm_pmu_set_counter_event_type
- kvm_arm_support_pmu_v3
- kvm_arm_pmu_v3_enable
- kvm_arm_pmu_v3_init
- pmu_irq_is_valid
- kvm_arm_pmu_v3_set_attr
- kvm_arm_pmu_v3_get_attr
- kvm_arm_pmu_v3_has_attr
1
2
3
4
5
6
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/perf_event.h>
11 #include <linux/perf/arm_pmu.h>
12 #include <linux/uaccess.h>
13 #include <asm/kvm_emulate.h>
14 #include <kvm/arm_pmu.h>
15 #include <kvm/arm_vgic.h>
16
17 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
18
19 #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
20
21
22
23
24
25
26 static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
27 {
28 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
29 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
30 }
31
32 static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
33 {
34 struct kvm_pmu *pmu;
35 struct kvm_vcpu_arch *vcpu_arch;
36
37 pmc -= pmc->idx;
38 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
39 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
40 return container_of(vcpu_arch, struct kvm_vcpu, arch);
41 }
42
43
44
45
46
47 static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
48 {
49 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
50
51 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
52 }
53
54
55
56
57
58 static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
59 {
60 return select_idx & 0x1;
61 }
62
63
64
65
66
67
68
69
70 static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
71 {
72 if (kvm_pmu_pmc_is_chained(pmc) &&
73 kvm_pmu_idx_is_high_counter(pmc->idx))
74 return pmc - 1;
75
76 return pmc;
77 }
78
79
80
81
82
83
84 static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
85 {
86 u64 eventsel, reg;
87
88 select_idx |= 0x1;
89
90 if (select_idx == ARMV8_PMU_CYCLE_IDX)
91 return false;
92
93 reg = PMEVTYPER0_EL0 + select_idx;
94 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
95
96 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
97 }
98
99
100
101
102
103
104 static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
105 struct kvm_pmc *pmc)
106 {
107 u64 counter, counter_high, reg, enabled, running;
108
109 if (kvm_pmu_pmc_is_chained(pmc)) {
110 pmc = kvm_pmu_get_canonical_pmc(pmc);
111 reg = PMEVCNTR0_EL0 + pmc->idx;
112
113 counter = __vcpu_sys_reg(vcpu, reg);
114 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
115
116 counter = lower_32_bits(counter) | (counter_high << 32);
117 } else {
118 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
119 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
120 counter = __vcpu_sys_reg(vcpu, reg);
121 }
122
123
124
125
126
127 if (pmc->perf_event)
128 counter += perf_event_read_value(pmc->perf_event, &enabled,
129 &running);
130
131 return counter;
132 }
133
134
135
136
137
138
139 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
140 {
141 u64 counter;
142 struct kvm_pmu *pmu = &vcpu->arch.pmu;
143 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
144
145 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
146
147 if (kvm_pmu_pmc_is_chained(pmc) &&
148 kvm_pmu_idx_is_high_counter(select_idx))
149 counter = upper_32_bits(counter);
150 else if (select_idx != ARMV8_PMU_CYCLE_IDX)
151 counter = lower_32_bits(counter);
152
153 return counter;
154 }
155
156
157
158
159
160
161
162 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
163 {
164 u64 reg;
165
166 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
167 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
168 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
169
170
171 kvm_pmu_create_perf_event(vcpu, select_idx);
172 }
173
174
175
176
177
178 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
179 {
180 pmc = kvm_pmu_get_canonical_pmc(pmc);
181 if (pmc->perf_event) {
182 perf_event_disable(pmc->perf_event);
183 perf_event_release_kernel(pmc->perf_event);
184 pmc->perf_event = NULL;
185 }
186 }
187
188
189
190
191
192
193
194 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
195 {
196 u64 counter, reg, val;
197
198 pmc = kvm_pmu_get_canonical_pmc(pmc);
199 if (!pmc->perf_event)
200 return;
201
202 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
203
204 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
205 reg = PMCCNTR_EL0;
206 val = counter;
207 } else {
208 reg = PMEVCNTR0_EL0 + pmc->idx;
209 val = lower_32_bits(counter);
210 }
211
212 __vcpu_sys_reg(vcpu, reg) = val;
213
214 if (kvm_pmu_pmc_is_chained(pmc))
215 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
216
217 kvm_pmu_release_perf_event(pmc);
218 }
219
220
221
222
223
224
225 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
226 {
227 int i;
228 struct kvm_pmu *pmu = &vcpu->arch.pmu;
229
230 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
231 pmu->pmc[i].idx = i;
232 }
233
234
235
236
237
238
239 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
240 {
241 int i;
242 struct kvm_pmu *pmu = &vcpu->arch.pmu;
243
244 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
245 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
246
247 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
248 }
249
250
251
252
253
254
255 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
256 {
257 int i;
258 struct kvm_pmu *pmu = &vcpu->arch.pmu;
259
260 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
261 kvm_pmu_release_perf_event(&pmu->pmc[i]);
262 }
263
264 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
265 {
266 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
267
268 val &= ARMV8_PMU_PMCR_N_MASK;
269 if (val == 0)
270 return BIT(ARMV8_PMU_CYCLE_IDX);
271 else
272 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
273 }
274
275
276
277
278
279
280
281
282 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
283 {
284 int i;
285 struct kvm_pmu *pmu = &vcpu->arch.pmu;
286 struct kvm_pmc *pmc;
287
288 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
289 return;
290
291 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
292 if (!(val & BIT(i)))
293 continue;
294
295 pmc = &pmu->pmc[i];
296
297
298
299
300
301 if (kvm_pmu_pmc_is_chained(pmc) &&
302 kvm_pmu_idx_is_high_counter(i)) {
303 kvm_pmu_create_perf_event(vcpu, i);
304 continue;
305 }
306
307
308 if (pmc->perf_event) {
309 perf_event_enable(pmc->perf_event);
310 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
311 kvm_debug("fail to enable perf event\n");
312 }
313 }
314 }
315
316
317
318
319
320
321
322
323 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
324 {
325 int i;
326 struct kvm_pmu *pmu = &vcpu->arch.pmu;
327 struct kvm_pmc *pmc;
328
329 if (!val)
330 return;
331
332 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
333 if (!(val & BIT(i)))
334 continue;
335
336 pmc = &pmu->pmc[i];
337
338
339
340
341
342 if (kvm_pmu_pmc_is_chained(pmc) &&
343 kvm_pmu_idx_is_high_counter(i)) {
344 kvm_pmu_create_perf_event(vcpu, i);
345 continue;
346 }
347
348
349 if (pmc->perf_event)
350 perf_event_disable(pmc->perf_event);
351 }
352 }
353
354 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
355 {
356 u64 reg = 0;
357
358 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
359 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
360 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
361 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
362 reg &= kvm_pmu_valid_counter_mask(vcpu);
363 }
364
365 return reg;
366 }
367
368 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
369 {
370 struct kvm_pmu *pmu = &vcpu->arch.pmu;
371 bool overflow;
372
373 if (!kvm_arm_pmu_v3_ready(vcpu))
374 return;
375
376 overflow = !!kvm_pmu_overflow_status(vcpu);
377 if (pmu->irq_level == overflow)
378 return;
379
380 pmu->irq_level = overflow;
381
382 if (likely(irqchip_in_kernel(vcpu->kvm))) {
383 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
384 pmu->irq_num, overflow, pmu);
385 WARN_ON(ret);
386 }
387 }
388
389 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
390 {
391 struct kvm_pmu *pmu = &vcpu->arch.pmu;
392 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
393 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
394
395 if (likely(irqchip_in_kernel(vcpu->kvm)))
396 return false;
397
398 return pmu->irq_level != run_level;
399 }
400
401
402
403
404 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
405 {
406 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
407
408
409 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
410 if (vcpu->arch.pmu.irq_level)
411 regs->device_irq_level |= KVM_ARM_DEV_PMU;
412 }
413
414
415
416
417
418
419
420
421 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
422 {
423 kvm_pmu_update_state(vcpu);
424 }
425
426
427
428
429
430
431
432
433 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
434 {
435 kvm_pmu_update_state(vcpu);
436 }
437
438
439
440
441 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
442 struct perf_sample_data *data,
443 struct pt_regs *regs)
444 {
445 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
446 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
447 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
448 int idx = pmc->idx;
449 u64 period;
450
451 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
452
453
454
455
456
457 period = -(local64_read(&perf_event->count));
458
459 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
460 period &= GENMASK(31, 0);
461
462 local64_set(&perf_event->hw.period_left, 0);
463 perf_event->attr.sample_period = period;
464 perf_event->hw.sample_period = period;
465
466 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
467
468 if (kvm_pmu_overflow_status(vcpu)) {
469 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
470 kvm_vcpu_kick(vcpu);
471 }
472
473 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
474 }
475
476
477
478
479
480
481 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
482 {
483 struct kvm_pmu *pmu = &vcpu->arch.pmu;
484 int i;
485
486 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
487 return;
488
489
490 val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
491
492 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
493 u64 type, reg;
494
495 if (!(val & BIT(i)))
496 continue;
497
498
499 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
500 type &= ARMV8_PMU_EVTYPE_EVENT;
501 if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
502 continue;
503
504
505 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
506 reg = lower_32_bits(reg);
507 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
508
509 if (reg)
510 continue;
511
512 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
513
514 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
515 reg = lower_32_bits(reg);
516 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
517 if (!reg)
518 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
519 } else {
520
521 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
522 }
523 }
524 }
525
526
527
528
529
530
531 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
532 {
533 u64 mask;
534 int i;
535
536 mask = kvm_pmu_valid_counter_mask(vcpu);
537 if (val & ARMV8_PMU_PMCR_E) {
538 kvm_pmu_enable_counter_mask(vcpu,
539 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
540 } else {
541 kvm_pmu_disable_counter_mask(vcpu, mask);
542 }
543
544 if (val & ARMV8_PMU_PMCR_C)
545 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
546
547 if (val & ARMV8_PMU_PMCR_P) {
548 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
549 kvm_pmu_set_counter_value(vcpu, i, 0);
550 }
551 }
552
553 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
554 {
555 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
556 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
557 }
558
559
560
561
562
563
564 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
565 {
566 struct kvm_pmu *pmu = &vcpu->arch.pmu;
567 struct kvm_pmc *pmc;
568 struct perf_event *event;
569 struct perf_event_attr attr;
570 u64 eventsel, counter, reg, data;
571
572
573
574
575
576
577 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
578
579 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
580 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
581 data = __vcpu_sys_reg(vcpu, reg);
582
583 kvm_pmu_stop_counter(vcpu, pmc);
584 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
585
586
587 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
588 pmc->idx != ARMV8_PMU_CYCLE_IDX)
589 return;
590
591 memset(&attr, 0, sizeof(struct perf_event_attr));
592 attr.type = PERF_TYPE_RAW;
593 attr.size = sizeof(attr);
594 attr.pinned = 1;
595 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
596 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
597 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
598 attr.exclude_hv = 1;
599 attr.exclude_host = 1;
600 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
601 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
602
603 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
604
605 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
606
607
608
609
610
611 attr.sample_period = (-counter) & GENMASK(63, 0);
612 if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
613 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
614
615 event = perf_event_create_kernel_counter(&attr, -1, current,
616 kvm_pmu_perf_overflow,
617 pmc + 1);
618 } else {
619
620 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
621 attr.sample_period = (-counter) & GENMASK(63, 0);
622 else
623 attr.sample_period = (-counter) & GENMASK(31, 0);
624
625 event = perf_event_create_kernel_counter(&attr, -1, current,
626 kvm_pmu_perf_overflow, pmc);
627 }
628
629 if (IS_ERR(event)) {
630 pr_err_once("kvm: pmu event creation failed %ld\n",
631 PTR_ERR(event));
632 return;
633 }
634
635 pmc->perf_event = event;
636 }
637
638
639
640
641
642
643
644
645
646 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
647 {
648 struct kvm_pmu *pmu = &vcpu->arch.pmu;
649 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
650
651 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
652
653
654
655
656 if (!kvm_pmu_pmc_is_chained(pmc))
657 kvm_pmu_stop_counter(vcpu, pmc);
658
659 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
660 } else {
661 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
662 }
663 }
664
665
666
667
668
669
670
671
672
673
674
675 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
676 u64 select_idx)
677 {
678 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
679
680 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
681 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
682
683 __vcpu_sys_reg(vcpu, reg) = event_type;
684
685 kvm_pmu_update_pmc_chained(vcpu, select_idx);
686 kvm_pmu_create_perf_event(vcpu, select_idx);
687 }
688
689 bool kvm_arm_support_pmu_v3(void)
690 {
691
692
693
694
695
696 return (perf_num_counters() > 0);
697 }
698
699 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
700 {
701 if (!vcpu->arch.pmu.created)
702 return 0;
703
704
705
706
707
708
709 if (irqchip_in_kernel(vcpu->kvm)) {
710 int irq = vcpu->arch.pmu.irq_num;
711 if (!kvm_arm_pmu_irq_initialized(vcpu))
712 return -EINVAL;
713
714
715
716
717
718
719
720 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
721 return -EINVAL;
722 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
723 return -EINVAL;
724 }
725
726 kvm_pmu_vcpu_reset(vcpu);
727 vcpu->arch.pmu.ready = true;
728
729 return 0;
730 }
731
732 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
733 {
734 if (!kvm_arm_support_pmu_v3())
735 return -ENODEV;
736
737 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
738 return -ENXIO;
739
740 if (vcpu->arch.pmu.created)
741 return -EBUSY;
742
743 if (irqchip_in_kernel(vcpu->kvm)) {
744 int ret;
745
746
747
748
749
750
751 if (!vgic_initialized(vcpu->kvm))
752 return -ENODEV;
753
754 if (!kvm_arm_pmu_irq_initialized(vcpu))
755 return -ENXIO;
756
757 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
758 &vcpu->arch.pmu);
759 if (ret)
760 return ret;
761 }
762
763 vcpu->arch.pmu.created = true;
764 return 0;
765 }
766
767
768
769
770
771
772 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
773 {
774 int i;
775 struct kvm_vcpu *vcpu;
776
777 kvm_for_each_vcpu(i, vcpu, kvm) {
778 if (!kvm_arm_pmu_irq_initialized(vcpu))
779 continue;
780
781 if (irq_is_ppi(irq)) {
782 if (vcpu->arch.pmu.irq_num != irq)
783 return false;
784 } else {
785 if (vcpu->arch.pmu.irq_num == irq)
786 return false;
787 }
788 }
789
790 return true;
791 }
792
793 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
794 {
795 switch (attr->attr) {
796 case KVM_ARM_VCPU_PMU_V3_IRQ: {
797 int __user *uaddr = (int __user *)(long)attr->addr;
798 int irq;
799
800 if (!irqchip_in_kernel(vcpu->kvm))
801 return -EINVAL;
802
803 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
804 return -ENODEV;
805
806 if (get_user(irq, uaddr))
807 return -EFAULT;
808
809
810 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
811 return -EINVAL;
812
813 if (!pmu_irq_is_valid(vcpu->kvm, irq))
814 return -EINVAL;
815
816 if (kvm_arm_pmu_irq_initialized(vcpu))
817 return -EBUSY;
818
819 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
820 vcpu->arch.pmu.irq_num = irq;
821 return 0;
822 }
823 case KVM_ARM_VCPU_PMU_V3_INIT:
824 return kvm_arm_pmu_v3_init(vcpu);
825 }
826
827 return -ENXIO;
828 }
829
830 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
831 {
832 switch (attr->attr) {
833 case KVM_ARM_VCPU_PMU_V3_IRQ: {
834 int __user *uaddr = (int __user *)(long)attr->addr;
835 int irq;
836
837 if (!irqchip_in_kernel(vcpu->kvm))
838 return -EINVAL;
839
840 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
841 return -ENODEV;
842
843 if (!kvm_arm_pmu_irq_initialized(vcpu))
844 return -ENXIO;
845
846 irq = vcpu->arch.pmu.irq_num;
847 return put_user(irq, uaddr);
848 }
849 }
850
851 return -ENXIO;
852 }
853
854 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
855 {
856 switch (attr->attr) {
857 case KVM_ARM_VCPU_PMU_V3_IRQ:
858 case KVM_ARM_VCPU_PMU_V3_INIT:
859 if (kvm_arm_support_pmu_v3() &&
860 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
861 return 0;
862 }
863
864 return -ENXIO;
865 }