This source file includes following definitions.
- amd_pmu_event_map
- amd_pmu_addr_offset
- amd_get_event_code
- amd_is_pair_event_code
- amd_core_hw_config
- amd_is_nb_event
- amd_has_nb
- amd_pmu_hw_config
- __amd_put_nb_event_constraints
- __amd_get_nb_event_constraints
- amd_alloc_nb
- amd_pmu_cpu_prepare
- amd_pmu_cpu_starting
- amd_pmu_cpu_dead
- amd_pmu_wait_on_overflow
- amd_pmu_disable_all
- amd_pmu_disable_event
- amd_pmu_handle_irq
- amd_get_event_constraints
- amd_put_event_constraints
- amd_get_event_constraints_f15h
- amd_get_event_constraints_f17h
- amd_event_sysfs_show
- amd_core_pmu_init
- amd_pmu_init
- amd_pmu_enable_virt
- amd_pmu_disable_virt
1
2 #include <linux/perf_event.h>
3 #include <linux/export.h>
4 #include <linux/types.h>
5 #include <linux/init.h>
6 #include <linux/slab.h>
7 #include <linux/delay.h>
8 #include <linux/jiffies.h>
9 #include <asm/apicdef.h>
10 #include <asm/nmi.h>
11
12 #include "../perf_event.h"
13
14 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
15 static unsigned long perf_nmi_window;
16
17 static __initconst const u64 amd_hw_cache_event_ids
18 [PERF_COUNT_HW_CACHE_MAX]
19 [PERF_COUNT_HW_CACHE_OP_MAX]
20 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
21 {
22 [ C(L1D) ] = {
23 [ C(OP_READ) ] = {
24 [ C(RESULT_ACCESS) ] = 0x0040,
25 [ C(RESULT_MISS) ] = 0x0141,
26 },
27 [ C(OP_WRITE) ] = {
28 [ C(RESULT_ACCESS) ] = 0,
29 [ C(RESULT_MISS) ] = 0,
30 },
31 [ C(OP_PREFETCH) ] = {
32 [ C(RESULT_ACCESS) ] = 0x0267,
33 [ C(RESULT_MISS) ] = 0x0167,
34 },
35 },
36 [ C(L1I ) ] = {
37 [ C(OP_READ) ] = {
38 [ C(RESULT_ACCESS) ] = 0x0080,
39 [ C(RESULT_MISS) ] = 0x0081,
40 },
41 [ C(OP_WRITE) ] = {
42 [ C(RESULT_ACCESS) ] = -1,
43 [ C(RESULT_MISS) ] = -1,
44 },
45 [ C(OP_PREFETCH) ] = {
46 [ C(RESULT_ACCESS) ] = 0x014B,
47 [ C(RESULT_MISS) ] = 0,
48 },
49 },
50 [ C(LL ) ] = {
51 [ C(OP_READ) ] = {
52 [ C(RESULT_ACCESS) ] = 0x037D,
53 [ C(RESULT_MISS) ] = 0x037E,
54 },
55 [ C(OP_WRITE) ] = {
56 [ C(RESULT_ACCESS) ] = 0x017F,
57 [ C(RESULT_MISS) ] = 0,
58 },
59 [ C(OP_PREFETCH) ] = {
60 [ C(RESULT_ACCESS) ] = 0,
61 [ C(RESULT_MISS) ] = 0,
62 },
63 },
64 [ C(DTLB) ] = {
65 [ C(OP_READ) ] = {
66 [ C(RESULT_ACCESS) ] = 0x0040,
67 [ C(RESULT_MISS) ] = 0x0746,
68 },
69 [ C(OP_WRITE) ] = {
70 [ C(RESULT_ACCESS) ] = 0,
71 [ C(RESULT_MISS) ] = 0,
72 },
73 [ C(OP_PREFETCH) ] = {
74 [ C(RESULT_ACCESS) ] = 0,
75 [ C(RESULT_MISS) ] = 0,
76 },
77 },
78 [ C(ITLB) ] = {
79 [ C(OP_READ) ] = {
80 [ C(RESULT_ACCESS) ] = 0x0080,
81 [ C(RESULT_MISS) ] = 0x0385,
82 },
83 [ C(OP_WRITE) ] = {
84 [ C(RESULT_ACCESS) ] = -1,
85 [ C(RESULT_MISS) ] = -1,
86 },
87 [ C(OP_PREFETCH) ] = {
88 [ C(RESULT_ACCESS) ] = -1,
89 [ C(RESULT_MISS) ] = -1,
90 },
91 },
92 [ C(BPU ) ] = {
93 [ C(OP_READ) ] = {
94 [ C(RESULT_ACCESS) ] = 0x00c2,
95 [ C(RESULT_MISS) ] = 0x00c3,
96 },
97 [ C(OP_WRITE) ] = {
98 [ C(RESULT_ACCESS) ] = -1,
99 [ C(RESULT_MISS) ] = -1,
100 },
101 [ C(OP_PREFETCH) ] = {
102 [ C(RESULT_ACCESS) ] = -1,
103 [ C(RESULT_MISS) ] = -1,
104 },
105 },
106 [ C(NODE) ] = {
107 [ C(OP_READ) ] = {
108 [ C(RESULT_ACCESS) ] = 0xb8e9,
109 [ C(RESULT_MISS) ] = 0x98e9,
110 },
111 [ C(OP_WRITE) ] = {
112 [ C(RESULT_ACCESS) ] = -1,
113 [ C(RESULT_MISS) ] = -1,
114 },
115 [ C(OP_PREFETCH) ] = {
116 [ C(RESULT_ACCESS) ] = -1,
117 [ C(RESULT_MISS) ] = -1,
118 },
119 },
120 };
121
122 static __initconst const u64 amd_hw_cache_event_ids_f17h
123 [PERF_COUNT_HW_CACHE_MAX]
124 [PERF_COUNT_HW_CACHE_OP_MAX]
125 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
126 [C(L1D)] = {
127 [C(OP_READ)] = {
128 [C(RESULT_ACCESS)] = 0x0040,
129 [C(RESULT_MISS)] = 0xc860,
130 },
131 [C(OP_WRITE)] = {
132 [C(RESULT_ACCESS)] = 0,
133 [C(RESULT_MISS)] = 0,
134 },
135 [C(OP_PREFETCH)] = {
136 [C(RESULT_ACCESS)] = 0xff5a,
137 [C(RESULT_MISS)] = 0,
138 },
139 },
140 [C(L1I)] = {
141 [C(OP_READ)] = {
142 [C(RESULT_ACCESS)] = 0x0080,
143 [C(RESULT_MISS)] = 0x0081,
144 },
145 [C(OP_WRITE)] = {
146 [C(RESULT_ACCESS)] = -1,
147 [C(RESULT_MISS)] = -1,
148 },
149 [C(OP_PREFETCH)] = {
150 [C(RESULT_ACCESS)] = 0,
151 [C(RESULT_MISS)] = 0,
152 },
153 },
154 [C(LL)] = {
155 [C(OP_READ)] = {
156 [C(RESULT_ACCESS)] = 0,
157 [C(RESULT_MISS)] = 0,
158 },
159 [C(OP_WRITE)] = {
160 [C(RESULT_ACCESS)] = 0,
161 [C(RESULT_MISS)] = 0,
162 },
163 [C(OP_PREFETCH)] = {
164 [C(RESULT_ACCESS)] = 0,
165 [C(RESULT_MISS)] = 0,
166 },
167 },
168 [C(DTLB)] = {
169 [C(OP_READ)] = {
170 [C(RESULT_ACCESS)] = 0xff45,
171 [C(RESULT_MISS)] = 0xf045,
172 },
173 [C(OP_WRITE)] = {
174 [C(RESULT_ACCESS)] = 0,
175 [C(RESULT_MISS)] = 0,
176 },
177 [C(OP_PREFETCH)] = {
178 [C(RESULT_ACCESS)] = 0,
179 [C(RESULT_MISS)] = 0,
180 },
181 },
182 [C(ITLB)] = {
183 [C(OP_READ)] = {
184 [C(RESULT_ACCESS)] = 0x0084,
185 [C(RESULT_MISS)] = 0xff85,
186 },
187 [C(OP_WRITE)] = {
188 [C(RESULT_ACCESS)] = -1,
189 [C(RESULT_MISS)] = -1,
190 },
191 [C(OP_PREFETCH)] = {
192 [C(RESULT_ACCESS)] = -1,
193 [C(RESULT_MISS)] = -1,
194 },
195 },
196 [C(BPU)] = {
197 [C(OP_READ)] = {
198 [C(RESULT_ACCESS)] = 0x00c2,
199 [C(RESULT_MISS)] = 0x00c3,
200 },
201 [C(OP_WRITE)] = {
202 [C(RESULT_ACCESS)] = -1,
203 [C(RESULT_MISS)] = -1,
204 },
205 [C(OP_PREFETCH)] = {
206 [C(RESULT_ACCESS)] = -1,
207 [C(RESULT_MISS)] = -1,
208 },
209 },
210 [C(NODE)] = {
211 [C(OP_READ)] = {
212 [C(RESULT_ACCESS)] = 0,
213 [C(RESULT_MISS)] = 0,
214 },
215 [C(OP_WRITE)] = {
216 [C(RESULT_ACCESS)] = -1,
217 [C(RESULT_MISS)] = -1,
218 },
219 [C(OP_PREFETCH)] = {
220 [C(RESULT_ACCESS)] = -1,
221 [C(RESULT_MISS)] = -1,
222 },
223 },
224 };
225
226
227
228
229 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
230 {
231 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
232 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
233 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
234 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
235 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
236 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
237 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0,
238 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1,
239 };
240
241
242
243
244 static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
245 {
246 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
247 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
248 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
249 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
250 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
251 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
252 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
253 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
254 };
255
256 static u64 amd_pmu_event_map(int hw_event)
257 {
258 if (boot_cpu_data.x86 >= 0x17)
259 return amd_f17h_perfmon_event_map[hw_event];
260
261 return amd_perfmon_event_map[hw_event];
262 }
263
264
265
266
267 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
268 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
269
270
271
272
273
274
275
276
277 static inline int amd_pmu_addr_offset(int index, bool eventsel)
278 {
279 int offset;
280
281 if (!index)
282 return index;
283
284 if (eventsel)
285 offset = event_offsets[index];
286 else
287 offset = count_offsets[index];
288
289 if (offset)
290 return offset;
291
292 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
293 offset = index;
294 else
295 offset = index << 1;
296
297 if (eventsel)
298 event_offsets[index] = offset;
299 else
300 count_offsets[index] = offset;
301
302 return offset;
303 }
304
305
306
307
308 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
309 {
310 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
311 }
312
313 static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
314 {
315 if (!(x86_pmu.flags & PMU_FL_PAIR))
316 return false;
317
318 switch (amd_get_event_code(hwc)) {
319 case 0x003: return true;
320 default: return false;
321 }
322 }
323
324 static int amd_core_hw_config(struct perf_event *event)
325 {
326 if (event->attr.exclude_host && event->attr.exclude_guest)
327
328
329
330
331
332 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
333 ARCH_PERFMON_EVENTSEL_OS);
334 else if (event->attr.exclude_host)
335 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
336 else if (event->attr.exclude_guest)
337 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
338
339 return 0;
340 }
341
342 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
343 {
344 return (hwc->config & 0xe0) == 0xe0;
345 }
346
347 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
348 {
349 struct amd_nb *nb = cpuc->amd_nb;
350
351 return nb && nb->nb_id != -1;
352 }
353
354 static int amd_pmu_hw_config(struct perf_event *event)
355 {
356 int ret;
357
358
359 if (event->attr.precise_ip && get_ibs_caps())
360 return -ENOENT;
361
362 if (has_branch_stack(event))
363 return -EOPNOTSUPP;
364
365 ret = x86_pmu_hw_config(event);
366 if (ret)
367 return ret;
368
369 if (event->attr.type == PERF_TYPE_RAW)
370 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
371
372 return amd_core_hw_config(event);
373 }
374
375 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
376 struct perf_event *event)
377 {
378 struct amd_nb *nb = cpuc->amd_nb;
379 int i;
380
381
382
383
384
385
386
387
388
389 for (i = 0; i < x86_pmu.num_counters; i++) {
390 if (cmpxchg(nb->owners + i, event, NULL) == event)
391 break;
392 }
393 }
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431 static struct event_constraint *
432 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
433 struct event_constraint *c)
434 {
435 struct hw_perf_event *hwc = &event->hw;
436 struct amd_nb *nb = cpuc->amd_nb;
437 struct perf_event *old;
438 int idx, new = -1;
439
440 if (!c)
441 c = &unconstrained;
442
443 if (cpuc->is_fake)
444 return c;
445
446
447
448
449
450
451
452
453
454
455
456 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
457 if (new == -1 || hwc->idx == idx)
458
459 old = cmpxchg(nb->owners + idx, NULL, event);
460 else if (nb->owners[idx] == event)
461
462 old = event;
463 else
464 continue;
465
466 if (old && old != event)
467 continue;
468
469
470 if (new != -1)
471 cmpxchg(nb->owners + new, event, NULL);
472 new = idx;
473
474
475 if (old == event)
476 break;
477 }
478
479 if (new == -1)
480 return &emptyconstraint;
481
482 return &nb->event_constraints[new];
483 }
484
485 static struct amd_nb *amd_alloc_nb(int cpu)
486 {
487 struct amd_nb *nb;
488 int i;
489
490 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
491 if (!nb)
492 return NULL;
493
494 nb->nb_id = -1;
495
496
497
498
499 for (i = 0; i < x86_pmu.num_counters; i++) {
500 __set_bit(i, nb->event_constraints[i].idxmsk);
501 nb->event_constraints[i].weight = 1;
502 }
503 return nb;
504 }
505
506 static int amd_pmu_cpu_prepare(int cpu)
507 {
508 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
509
510 WARN_ON_ONCE(cpuc->amd_nb);
511
512 if (!x86_pmu.amd_nb_constraints)
513 return 0;
514
515 cpuc->amd_nb = amd_alloc_nb(cpu);
516 if (!cpuc->amd_nb)
517 return -ENOMEM;
518
519 return 0;
520 }
521
522 static void amd_pmu_cpu_starting(int cpu)
523 {
524 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
525 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
526 struct amd_nb *nb;
527 int i, nb_id;
528
529 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
530
531 if (!x86_pmu.amd_nb_constraints)
532 return;
533
534 nb_id = amd_get_nb_id(cpu);
535 WARN_ON_ONCE(nb_id == BAD_APICID);
536
537 for_each_online_cpu(i) {
538 nb = per_cpu(cpu_hw_events, i).amd_nb;
539 if (WARN_ON_ONCE(!nb))
540 continue;
541
542 if (nb->nb_id == nb_id) {
543 *onln = cpuc->amd_nb;
544 cpuc->amd_nb = nb;
545 break;
546 }
547 }
548
549 cpuc->amd_nb->nb_id = nb_id;
550 cpuc->amd_nb->refcnt++;
551 }
552
553 static void amd_pmu_cpu_dead(int cpu)
554 {
555 struct cpu_hw_events *cpuhw;
556
557 if (!x86_pmu.amd_nb_constraints)
558 return;
559
560 cpuhw = &per_cpu(cpu_hw_events, cpu);
561
562 if (cpuhw->amd_nb) {
563 struct amd_nb *nb = cpuhw->amd_nb;
564
565 if (nb->nb_id == -1 || --nb->refcnt == 0)
566 kfree(nb);
567
568 cpuhw->amd_nb = NULL;
569 }
570 }
571
572
573
574
575
576
577
578
579 #define OVERFLOW_WAIT_COUNT 50
580
581 static void amd_pmu_wait_on_overflow(int idx)
582 {
583 unsigned int i;
584 u64 counter;
585
586
587
588
589
590
591 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
592 rdmsrl(x86_pmu_event_addr(idx), counter);
593 if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
594 break;
595
596
597 udelay(1);
598 }
599 }
600
601 static void amd_pmu_disable_all(void)
602 {
603 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
604 int idx;
605
606 x86_pmu_disable_all();
607
608
609
610
611
612
613 if (in_nmi())
614 return;
615
616
617
618
619
620
621
622 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
623 if (!test_bit(idx, cpuc->active_mask))
624 continue;
625
626 amd_pmu_wait_on_overflow(idx);
627 }
628 }
629
630 static void amd_pmu_disable_event(struct perf_event *event)
631 {
632 x86_pmu_disable_event(event);
633
634
635
636
637
638
639
640
641 if (in_nmi())
642 return;
643
644 amd_pmu_wait_on_overflow(event->hw.idx);
645 }
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665 static int amd_pmu_handle_irq(struct pt_regs *regs)
666 {
667 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
668 int active, handled;
669
670
671
672
673
674
675 active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
676
677
678 handled = x86_pmu_handle_irq(regs);
679
680
681
682
683
684 if (handled) {
685 this_cpu_write(perf_nmi_tstamp,
686 jiffies + perf_nmi_window);
687
688 return handled;
689 }
690
691 if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
692 return NMI_DONE;
693
694 return NMI_HANDLED;
695 }
696
697 static struct event_constraint *
698 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
699 struct perf_event *event)
700 {
701
702
703
704 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
705 return &unconstrained;
706
707 return __amd_get_nb_event_constraints(cpuc, event, NULL);
708 }
709
710 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
711 struct perf_event *event)
712 {
713 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
714 __amd_put_nb_event_constraints(cpuc, event);
715 }
716
717 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
718 PMU_FORMAT_ATTR(umask, "config:8-15" );
719 PMU_FORMAT_ATTR(edge, "config:18" );
720 PMU_FORMAT_ATTR(inv, "config:23" );
721 PMU_FORMAT_ATTR(cmask, "config:24-31" );
722
723 static struct attribute *amd_format_attr[] = {
724 &format_attr_event.attr,
725 &format_attr_umask.attr,
726 &format_attr_edge.attr,
727 &format_attr_inv.attr,
728 &format_attr_cmask.attr,
729 NULL,
730 };
731
732
733
734 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
735
736 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
737 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
738 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
739 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
740 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
741 #define AMD_EVENT_EX_LS 0x000000C0ULL
742 #define AMD_EVENT_DE 0x000000D0ULL
743 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
800 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
801 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
802 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
803 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
804 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
805
806 static struct event_constraint *
807 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
808 struct perf_event *event)
809 {
810 struct hw_perf_event *hwc = &event->hw;
811 unsigned int event_code = amd_get_event_code(hwc);
812
813 switch (event_code & AMD_EVENT_TYPE_MASK) {
814 case AMD_EVENT_FP:
815 switch (event_code) {
816 case 0x000:
817 if (!(hwc->config & 0x0000F000ULL))
818 break;
819 if (!(hwc->config & 0x00000F00ULL))
820 break;
821 return &amd_f15_PMC3;
822 case 0x004:
823 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
824 break;
825 return &amd_f15_PMC3;
826 case 0x003:
827 case 0x00B:
828 case 0x00D:
829 return &amd_f15_PMC3;
830 }
831 return &amd_f15_PMC53;
832 case AMD_EVENT_LS:
833 case AMD_EVENT_DC:
834 case AMD_EVENT_EX_LS:
835 switch (event_code) {
836 case 0x023:
837 case 0x043:
838 case 0x045:
839 case 0x046:
840 case 0x054:
841 case 0x055:
842 return &amd_f15_PMC20;
843 case 0x02D:
844 return &amd_f15_PMC3;
845 case 0x02E:
846 return &amd_f15_PMC30;
847 case 0x031:
848 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
849 return &amd_f15_PMC20;
850 return &emptyconstraint;
851 case 0x1C0:
852 return &amd_f15_PMC53;
853 default:
854 return &amd_f15_PMC50;
855 }
856 case AMD_EVENT_CU:
857 case AMD_EVENT_IC_DE:
858 case AMD_EVENT_DE:
859 switch (event_code) {
860 case 0x08F:
861 case 0x187:
862 case 0x188:
863 return &amd_f15_PMC0;
864 case 0x0DB ... 0x0DF:
865 case 0x1D6:
866 case 0x1D8:
867 return &amd_f15_PMC50;
868 default:
869 return &amd_f15_PMC20;
870 }
871 case AMD_EVENT_NB:
872
873 return &emptyconstraint;
874 default:
875 return &emptyconstraint;
876 }
877 }
878
879 static struct event_constraint pair_constraint;
880
881 static struct event_constraint *
882 amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
883 struct perf_event *event)
884 {
885 struct hw_perf_event *hwc = &event->hw;
886
887 if (amd_is_pair_event_code(hwc))
888 return &pair_constraint;
889
890 return &unconstrained;
891 }
892
893 static ssize_t amd_event_sysfs_show(char *page, u64 config)
894 {
895 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
896 (config & AMD64_EVENTSEL_EVENT) >> 24;
897
898 return x86_event_sysfs_show(page, config, event);
899 }
900
901 static __initconst const struct x86_pmu amd_pmu = {
902 .name = "AMD",
903 .handle_irq = amd_pmu_handle_irq,
904 .disable_all = amd_pmu_disable_all,
905 .enable_all = x86_pmu_enable_all,
906 .enable = x86_pmu_enable_event,
907 .disable = amd_pmu_disable_event,
908 .hw_config = amd_pmu_hw_config,
909 .schedule_events = x86_schedule_events,
910 .eventsel = MSR_K7_EVNTSEL0,
911 .perfctr = MSR_K7_PERFCTR0,
912 .addr_offset = amd_pmu_addr_offset,
913 .event_map = amd_pmu_event_map,
914 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
915 .num_counters = AMD64_NUM_COUNTERS,
916 .cntval_bits = 48,
917 .cntval_mask = (1ULL << 48) - 1,
918 .apic = 1,
919
920 .max_period = (1ULL << 47) - 1,
921 .get_event_constraints = amd_get_event_constraints,
922 .put_event_constraints = amd_put_event_constraints,
923
924 .format_attrs = amd_format_attr,
925 .events_sysfs_show = amd_event_sysfs_show,
926
927 .cpu_prepare = amd_pmu_cpu_prepare,
928 .cpu_starting = amd_pmu_cpu_starting,
929 .cpu_dead = amd_pmu_cpu_dead,
930
931 .amd_nb_constraints = 1,
932 };
933
934 static int __init amd_core_pmu_init(void)
935 {
936 u64 even_ctr_mask = 0ULL;
937 int i;
938
939 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
940 return 0;
941
942
943 perf_nmi_window = msecs_to_jiffies(100);
944
945
946
947
948
949
950 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
951 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
952 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
953
954
955
956
957 x86_pmu.amd_nb_constraints = 0;
958
959 if (boot_cpu_data.x86 == 0x15) {
960 pr_cont("Fam15h ");
961 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
962 }
963 if (boot_cpu_data.x86 >= 0x17) {
964 pr_cont("Fam17h+ ");
965
966
967
968
969
970
971 for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
972 even_ctr_mask |= 1 << i;
973
974 pair_constraint = (struct event_constraint)
975 __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
976 x86_pmu.num_counters / 2, 0,
977 PERF_X86_EVENT_PAIR);
978
979 x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
980 x86_pmu.flags |= PMU_FL_PAIR;
981 }
982
983 pr_cont("core perfctr, ");
984 return 0;
985 }
986
987 __init int amd_pmu_init(void)
988 {
989 int ret;
990
991
992 if (boot_cpu_data.x86 < 6)
993 return -ENODEV;
994
995 x86_pmu = amd_pmu;
996
997 ret = amd_core_pmu_init();
998 if (ret)
999 return ret;
1000
1001 if (num_possible_cpus() == 1) {
1002
1003
1004
1005
1006 x86_pmu.amd_nb_constraints = 0;
1007 }
1008
1009 if (boot_cpu_data.x86 >= 0x17)
1010 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
1011 else
1012 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
1013
1014 return 0;
1015 }
1016
1017 void amd_pmu_enable_virt(void)
1018 {
1019 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1020
1021 cpuc->perf_ctr_virt_mask = 0;
1022
1023
1024 amd_pmu_disable_all();
1025 x86_pmu_enable_all(0);
1026 }
1027 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
1028
1029 void amd_pmu_disable_virt(void)
1030 {
1031 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1032
1033
1034
1035
1036
1037
1038
1039 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
1040
1041
1042 amd_pmu_disable_all();
1043 x86_pmu_enable_all(0);
1044 }
1045 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);