This source file includes following definitions.
- uncore_pcibus_to_physid
- uncore_free_pcibus_map
- __find_pci2phy_map
- uncore_event_show
- uncore_pmu_to_box
- uncore_msr_read_counter
- uncore_mmio_exit_box
- uncore_mmio_read_counter
- uncore_get_constraint
- uncore_put_constraint
- uncore_shared_reg_config
- uncore_assign_hw_event
- uncore_perf_event_update
- uncore_pmu_hrtimer
- uncore_pmu_start_hrtimer
- uncore_pmu_cancel_hrtimer
- uncore_pmu_init_hrtimer
- uncore_alloc_box
- is_box_event
- uncore_collect_events
- uncore_get_event_constraint
- uncore_put_event_constraint
- uncore_assign_events
- uncore_pmu_event_start
- uncore_pmu_event_stop
- uncore_pmu_event_add
- uncore_pmu_event_del
- uncore_pmu_event_read
- uncore_validate_group
- uncore_pmu_event_init
- uncore_pmu_enable
- uncore_pmu_disable
- uncore_get_attr_cpumask
- uncore_pmu_register
- uncore_pmu_unregister
- uncore_free_boxes
- uncore_type_exit
- uncore_types_exit
- uncore_type_init
- uncore_types_init
- uncore_pci_probe
- uncore_pci_remove
- uncore_pci_init
- uncore_pci_exit
- uncore_change_type_ctx
- uncore_change_context
- uncore_box_unref
- uncore_event_cpu_offline
- allocate_boxes
- uncore_box_ref
- uncore_event_cpu_online
- type_pmu_register
- uncore_msr_pmus_register
- uncore_cpu_init
- uncore_mmio_init
- intel_uncore_init
- intel_uncore_exit
1
2 #include <linux/module.h>
3
4 #include <asm/cpu_device_id.h>
5 #include <asm/intel-family.h>
6 #include "uncore.h"
7
8 static struct intel_uncore_type *empty_uncore[] = { NULL, };
9 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
10 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
11 struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
12
13 static bool pcidrv_registered;
14 struct pci_driver *uncore_pci_driver;
15
16 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
17 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
18 struct pci_extra_dev *uncore_extra_pci_dev;
19 static int max_dies;
20
21
22 static cpumask_t uncore_cpu_mask;
23
24
25 static struct event_constraint uncore_constraint_fixed =
26 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
27 struct event_constraint uncore_constraint_empty =
28 EVENT_CONSTRAINT(0, 0, 0);
29
30 MODULE_LICENSE("GPL");
31
32 int uncore_pcibus_to_physid(struct pci_bus *bus)
33 {
34 struct pci2phy_map *map;
35 int phys_id = -1;
36
37 raw_spin_lock(&pci2phy_map_lock);
38 list_for_each_entry(map, &pci2phy_map_head, list) {
39 if (map->segment == pci_domain_nr(bus)) {
40 phys_id = map->pbus_to_physid[bus->number];
41 break;
42 }
43 }
44 raw_spin_unlock(&pci2phy_map_lock);
45
46 return phys_id;
47 }
48
49 static void uncore_free_pcibus_map(void)
50 {
51 struct pci2phy_map *map, *tmp;
52
53 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
54 list_del(&map->list);
55 kfree(map);
56 }
57 }
58
59 struct pci2phy_map *__find_pci2phy_map(int segment)
60 {
61 struct pci2phy_map *map, *alloc = NULL;
62 int i;
63
64 lockdep_assert_held(&pci2phy_map_lock);
65
66 lookup:
67 list_for_each_entry(map, &pci2phy_map_head, list) {
68 if (map->segment == segment)
69 goto end;
70 }
71
72 if (!alloc) {
73 raw_spin_unlock(&pci2phy_map_lock);
74 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
75 raw_spin_lock(&pci2phy_map_lock);
76
77 if (!alloc)
78 return NULL;
79
80 goto lookup;
81 }
82
83 map = alloc;
84 alloc = NULL;
85 map->segment = segment;
86 for (i = 0; i < 256; i++)
87 map->pbus_to_physid[i] = -1;
88 list_add_tail(&map->list, &pci2phy_map_head);
89
90 end:
91 kfree(alloc);
92 return map;
93 }
94
95 ssize_t uncore_event_show(struct kobject *kobj,
96 struct kobj_attribute *attr, char *buf)
97 {
98 struct uncore_event_desc *event =
99 container_of(attr, struct uncore_event_desc, attr);
100 return sprintf(buf, "%s", event->config);
101 }
102
103 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
104 {
105 unsigned int dieid = topology_logical_die_id(cpu);
106
107
108
109
110
111 return dieid < max_dies ? pmu->boxes[dieid] : NULL;
112 }
113
114 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
115 {
116 u64 count;
117
118 rdmsrl(event->hw.event_base, count);
119
120 return count;
121 }
122
123 void uncore_mmio_exit_box(struct intel_uncore_box *box)
124 {
125 if (box->io_addr)
126 iounmap(box->io_addr);
127 }
128
129 u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
130 struct perf_event *event)
131 {
132 if (!box->io_addr)
133 return 0;
134
135 return readq(box->io_addr + event->hw.event_base);
136 }
137
138
139
140
141 struct event_constraint *
142 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
143 {
144 struct intel_uncore_extra_reg *er;
145 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
146 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
147 unsigned long flags;
148 bool ok = false;
149
150
151
152
153
154
155 if (reg1->idx == EXTRA_REG_NONE ||
156 (!uncore_box_is_fake(box) && reg1->alloc))
157 return NULL;
158
159 er = &box->shared_regs[reg1->idx];
160 raw_spin_lock_irqsave(&er->lock, flags);
161 if (!atomic_read(&er->ref) ||
162 (er->config1 == reg1->config && er->config2 == reg2->config)) {
163 atomic_inc(&er->ref);
164 er->config1 = reg1->config;
165 er->config2 = reg2->config;
166 ok = true;
167 }
168 raw_spin_unlock_irqrestore(&er->lock, flags);
169
170 if (ok) {
171 if (!uncore_box_is_fake(box))
172 reg1->alloc = 1;
173 return NULL;
174 }
175
176 return &uncore_constraint_empty;
177 }
178
179 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
180 {
181 struct intel_uncore_extra_reg *er;
182 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
183
184
185
186
187
188
189
190
191
192 if (uncore_box_is_fake(box) || !reg1->alloc)
193 return;
194
195 er = &box->shared_regs[reg1->idx];
196 atomic_dec(&er->ref);
197 reg1->alloc = 0;
198 }
199
200 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
201 {
202 struct intel_uncore_extra_reg *er;
203 unsigned long flags;
204 u64 config;
205
206 er = &box->shared_regs[idx];
207
208 raw_spin_lock_irqsave(&er->lock, flags);
209 config = er->config;
210 raw_spin_unlock_irqrestore(&er->lock, flags);
211
212 return config;
213 }
214
215 static void uncore_assign_hw_event(struct intel_uncore_box *box,
216 struct perf_event *event, int idx)
217 {
218 struct hw_perf_event *hwc = &event->hw;
219
220 hwc->idx = idx;
221 hwc->last_tag = ++box->tags[idx];
222
223 if (uncore_pmc_fixed(hwc->idx)) {
224 hwc->event_base = uncore_fixed_ctr(box);
225 hwc->config_base = uncore_fixed_ctl(box);
226 return;
227 }
228
229 hwc->config_base = uncore_event_ctl(box, hwc->idx);
230 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
231 }
232
233 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
234 {
235 u64 prev_count, new_count, delta;
236 int shift;
237
238 if (uncore_pmc_freerunning(event->hw.idx))
239 shift = 64 - uncore_freerunning_bits(box, event);
240 else if (uncore_pmc_fixed(event->hw.idx))
241 shift = 64 - uncore_fixed_ctr_bits(box);
242 else
243 shift = 64 - uncore_perf_ctr_bits(box);
244
245
246 again:
247 prev_count = local64_read(&event->hw.prev_count);
248 new_count = uncore_read_counter(box, event);
249 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
250 goto again;
251
252 delta = (new_count << shift) - (prev_count << shift);
253 delta >>= shift;
254
255 local64_add(delta, &event->count);
256 }
257
258
259
260
261
262
263 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
264 {
265 struct intel_uncore_box *box;
266 struct perf_event *event;
267 unsigned long flags;
268 int bit;
269
270 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
271 if (!box->n_active || box->cpu != smp_processor_id())
272 return HRTIMER_NORESTART;
273
274
275
276
277 local_irq_save(flags);
278
279
280
281
282
283 list_for_each_entry(event, &box->active_list, active_entry) {
284 uncore_perf_event_update(box, event);
285 }
286
287 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
288 uncore_perf_event_update(box, box->events[bit]);
289
290 local_irq_restore(flags);
291
292 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
293 return HRTIMER_RESTART;
294 }
295
296 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
297 {
298 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
299 HRTIMER_MODE_REL_PINNED);
300 }
301
302 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
303 {
304 hrtimer_cancel(&box->hrtimer);
305 }
306
307 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
308 {
309 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
310 box->hrtimer.function = uncore_pmu_hrtimer;
311 }
312
313 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
314 int node)
315 {
316 int i, size, numshared = type->num_shared_regs ;
317 struct intel_uncore_box *box;
318
319 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
320
321 box = kzalloc_node(size, GFP_KERNEL, node);
322 if (!box)
323 return NULL;
324
325 for (i = 0; i < numshared; i++)
326 raw_spin_lock_init(&box->shared_regs[i].lock);
327
328 uncore_pmu_init_hrtimer(box);
329 box->cpu = -1;
330 box->pci_phys_id = -1;
331 box->dieid = -1;
332
333
334 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
335
336 INIT_LIST_HEAD(&box->active_list);
337
338 return box;
339 }
340
341
342
343
344
345 static int uncore_pmu_event_init(struct perf_event *event);
346
347 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
348 {
349 return &box->pmu->pmu == event->pmu;
350 }
351
352 static int
353 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
354 bool dogrp)
355 {
356 struct perf_event *event;
357 int n, max_count;
358
359 max_count = box->pmu->type->num_counters;
360 if (box->pmu->type->fixed_ctl)
361 max_count++;
362
363 if (box->n_events >= max_count)
364 return -EINVAL;
365
366 n = box->n_events;
367
368 if (is_box_event(box, leader)) {
369 box->event_list[n] = leader;
370 n++;
371 }
372
373 if (!dogrp)
374 return n;
375
376 for_each_sibling_event(event, leader) {
377 if (!is_box_event(box, event) ||
378 event->state <= PERF_EVENT_STATE_OFF)
379 continue;
380
381 if (n >= max_count)
382 return -EINVAL;
383
384 box->event_list[n] = event;
385 n++;
386 }
387 return n;
388 }
389
390 static struct event_constraint *
391 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
392 {
393 struct intel_uncore_type *type = box->pmu->type;
394 struct event_constraint *c;
395
396 if (type->ops->get_constraint) {
397 c = type->ops->get_constraint(box, event);
398 if (c)
399 return c;
400 }
401
402 if (event->attr.config == UNCORE_FIXED_EVENT)
403 return &uncore_constraint_fixed;
404
405 if (type->constraints) {
406 for_each_event_constraint(c, type->constraints) {
407 if ((event->hw.config & c->cmask) == c->code)
408 return c;
409 }
410 }
411
412 return &type->unconstrainted;
413 }
414
415 static void uncore_put_event_constraint(struct intel_uncore_box *box,
416 struct perf_event *event)
417 {
418 if (box->pmu->type->ops->put_constraint)
419 box->pmu->type->ops->put_constraint(box, event);
420 }
421
422 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
423 {
424 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
425 struct event_constraint *c;
426 int i, wmin, wmax, ret = 0;
427 struct hw_perf_event *hwc;
428
429 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
430
431 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
432 c = uncore_get_event_constraint(box, box->event_list[i]);
433 box->event_constraint[i] = c;
434 wmin = min(wmin, c->weight);
435 wmax = max(wmax, c->weight);
436 }
437
438
439 for (i = 0; i < n; i++) {
440 hwc = &box->event_list[i]->hw;
441 c = box->event_constraint[i];
442
443
444 if (hwc->idx == -1)
445 break;
446
447
448 if (!test_bit(hwc->idx, c->idxmsk))
449 break;
450
451
452 if (test_bit(hwc->idx, used_mask))
453 break;
454
455 __set_bit(hwc->idx, used_mask);
456 if (assign)
457 assign[i] = hwc->idx;
458 }
459
460 if (i != n)
461 ret = perf_assign_events(box->event_constraint, n,
462 wmin, wmax, n, assign);
463
464 if (!assign || ret) {
465 for (i = 0; i < n; i++)
466 uncore_put_event_constraint(box, box->event_list[i]);
467 }
468 return ret ? -EINVAL : 0;
469 }
470
471 void uncore_pmu_event_start(struct perf_event *event, int flags)
472 {
473 struct intel_uncore_box *box = uncore_event_to_box(event);
474 int idx = event->hw.idx;
475
476 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
477 return;
478
479
480
481
482
483
484
485 if (uncore_pmc_freerunning(event->hw.idx)) {
486 list_add_tail(&event->active_entry, &box->active_list);
487 local64_set(&event->hw.prev_count,
488 uncore_read_counter(box, event));
489 if (box->n_active++ == 0)
490 uncore_pmu_start_hrtimer(box);
491 return;
492 }
493
494 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
495 return;
496
497 event->hw.state = 0;
498 box->events[idx] = event;
499 box->n_active++;
500 __set_bit(idx, box->active_mask);
501
502 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
503 uncore_enable_event(box, event);
504
505 if (box->n_active == 1)
506 uncore_pmu_start_hrtimer(box);
507 }
508
509 void uncore_pmu_event_stop(struct perf_event *event, int flags)
510 {
511 struct intel_uncore_box *box = uncore_event_to_box(event);
512 struct hw_perf_event *hwc = &event->hw;
513
514
515 if (uncore_pmc_freerunning(hwc->idx)) {
516 list_del(&event->active_entry);
517 if (--box->n_active == 0)
518 uncore_pmu_cancel_hrtimer(box);
519 uncore_perf_event_update(box, event);
520 return;
521 }
522
523 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
524 uncore_disable_event(box, event);
525 box->n_active--;
526 box->events[hwc->idx] = NULL;
527 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
528 hwc->state |= PERF_HES_STOPPED;
529
530 if (box->n_active == 0)
531 uncore_pmu_cancel_hrtimer(box);
532 }
533
534 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
535
536
537
538
539 uncore_perf_event_update(box, event);
540 hwc->state |= PERF_HES_UPTODATE;
541 }
542 }
543
544 int uncore_pmu_event_add(struct perf_event *event, int flags)
545 {
546 struct intel_uncore_box *box = uncore_event_to_box(event);
547 struct hw_perf_event *hwc = &event->hw;
548 int assign[UNCORE_PMC_IDX_MAX];
549 int i, n, ret;
550
551 if (!box)
552 return -ENODEV;
553
554
555
556
557
558
559 if (uncore_pmc_freerunning(hwc->idx)) {
560 if (flags & PERF_EF_START)
561 uncore_pmu_event_start(event, 0);
562 return 0;
563 }
564
565 ret = n = uncore_collect_events(box, event, false);
566 if (ret < 0)
567 return ret;
568
569 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
570 if (!(flags & PERF_EF_START))
571 hwc->state |= PERF_HES_ARCH;
572
573 ret = uncore_assign_events(box, assign, n);
574 if (ret)
575 return ret;
576
577
578 for (i = 0; i < box->n_events; i++) {
579 event = box->event_list[i];
580 hwc = &event->hw;
581
582 if (hwc->idx == assign[i] &&
583 hwc->last_tag == box->tags[assign[i]])
584 continue;
585
586
587
588
589 if (hwc->state & PERF_HES_STOPPED)
590 hwc->state |= PERF_HES_ARCH;
591
592 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
593 }
594
595
596 for (i = 0; i < n; i++) {
597 event = box->event_list[i];
598 hwc = &event->hw;
599
600 if (hwc->idx != assign[i] ||
601 hwc->last_tag != box->tags[assign[i]])
602 uncore_assign_hw_event(box, event, assign[i]);
603 else if (i < box->n_events)
604 continue;
605
606 if (hwc->state & PERF_HES_ARCH)
607 continue;
608
609 uncore_pmu_event_start(event, 0);
610 }
611 box->n_events = n;
612
613 return 0;
614 }
615
616 void uncore_pmu_event_del(struct perf_event *event, int flags)
617 {
618 struct intel_uncore_box *box = uncore_event_to_box(event);
619 int i;
620
621 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
622
623
624
625
626
627
628 if (uncore_pmc_freerunning(event->hw.idx))
629 return;
630
631 for (i = 0; i < box->n_events; i++) {
632 if (event == box->event_list[i]) {
633 uncore_put_event_constraint(box, event);
634
635 for (++i; i < box->n_events; i++)
636 box->event_list[i - 1] = box->event_list[i];
637
638 --box->n_events;
639 break;
640 }
641 }
642
643 event->hw.idx = -1;
644 event->hw.last_tag = ~0ULL;
645 }
646
647 void uncore_pmu_event_read(struct perf_event *event)
648 {
649 struct intel_uncore_box *box = uncore_event_to_box(event);
650 uncore_perf_event_update(box, event);
651 }
652
653
654
655
656
657 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
658 struct perf_event *event)
659 {
660 struct perf_event *leader = event->group_leader;
661 struct intel_uncore_box *fake_box;
662 int ret = -EINVAL, n;
663
664
665 if (uncore_pmc_freerunning(event->hw.idx))
666 return 0;
667
668 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
669 if (!fake_box)
670 return -ENOMEM;
671
672 fake_box->pmu = pmu;
673
674
675
676
677
678
679 n = uncore_collect_events(fake_box, leader, true);
680 if (n < 0)
681 goto out;
682
683 fake_box->n_events = n;
684 n = uncore_collect_events(fake_box, event, false);
685 if (n < 0)
686 goto out;
687
688 fake_box->n_events = n;
689
690 ret = uncore_assign_events(fake_box, NULL, n);
691 out:
692 kfree(fake_box);
693 return ret;
694 }
695
696 static int uncore_pmu_event_init(struct perf_event *event)
697 {
698 struct intel_uncore_pmu *pmu;
699 struct intel_uncore_box *box;
700 struct hw_perf_event *hwc = &event->hw;
701 int ret;
702
703 if (event->attr.type != event->pmu->type)
704 return -ENOENT;
705
706 pmu = uncore_event_to_pmu(event);
707
708 if (pmu->func_id < 0)
709 return -ENOENT;
710
711
712 if (hwc->sample_period)
713 return -EINVAL;
714
715
716
717
718
719 if (event->cpu < 0)
720 return -EINVAL;
721 box = uncore_pmu_to_box(pmu, event->cpu);
722 if (!box || box->cpu < 0)
723 return -EINVAL;
724 event->cpu = box->cpu;
725 event->pmu_private = box;
726
727 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
728
729 event->hw.idx = -1;
730 event->hw.last_tag = ~0ULL;
731 event->hw.extra_reg.idx = EXTRA_REG_NONE;
732 event->hw.branch_reg.idx = EXTRA_REG_NONE;
733
734 if (event->attr.config == UNCORE_FIXED_EVENT) {
735
736 if (!pmu->type->fixed_ctl)
737 return -EINVAL;
738
739
740
741
742 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
743 return -EINVAL;
744
745
746 hwc->config = 0ULL;
747 } else if (is_freerunning_event(event)) {
748 hwc->config = event->attr.config;
749 if (!check_valid_freerunning_event(box, event))
750 return -EINVAL;
751 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
752
753
754
755
756
757
758 event->hw.event_base = uncore_freerunning_counter(box, event);
759 } else {
760 hwc->config = event->attr.config &
761 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
762 if (pmu->type->ops->hw_config) {
763 ret = pmu->type->ops->hw_config(box, event);
764 if (ret)
765 return ret;
766 }
767 }
768
769 if (event->group_leader != event)
770 ret = uncore_validate_group(pmu, event);
771 else
772 ret = 0;
773
774 return ret;
775 }
776
777 static void uncore_pmu_enable(struct pmu *pmu)
778 {
779 struct intel_uncore_pmu *uncore_pmu;
780 struct intel_uncore_box *box;
781
782 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
783 if (!uncore_pmu)
784 return;
785
786 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
787 if (!box)
788 return;
789
790 if (uncore_pmu->type->ops->enable_box)
791 uncore_pmu->type->ops->enable_box(box);
792 }
793
794 static void uncore_pmu_disable(struct pmu *pmu)
795 {
796 struct intel_uncore_pmu *uncore_pmu;
797 struct intel_uncore_box *box;
798
799 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
800 if (!uncore_pmu)
801 return;
802
803 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
804 if (!box)
805 return;
806
807 if (uncore_pmu->type->ops->disable_box)
808 uncore_pmu->type->ops->disable_box(box);
809 }
810
811 static ssize_t uncore_get_attr_cpumask(struct device *dev,
812 struct device_attribute *attr, char *buf)
813 {
814 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
815 }
816
817 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
818
819 static struct attribute *uncore_pmu_attrs[] = {
820 &dev_attr_cpumask.attr,
821 NULL,
822 };
823
824 static const struct attribute_group uncore_pmu_attr_group = {
825 .attrs = uncore_pmu_attrs,
826 };
827
828 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
829 {
830 int ret;
831
832 if (!pmu->type->pmu) {
833 pmu->pmu = (struct pmu) {
834 .attr_groups = pmu->type->attr_groups,
835 .task_ctx_nr = perf_invalid_context,
836 .pmu_enable = uncore_pmu_enable,
837 .pmu_disable = uncore_pmu_disable,
838 .event_init = uncore_pmu_event_init,
839 .add = uncore_pmu_event_add,
840 .del = uncore_pmu_event_del,
841 .start = uncore_pmu_event_start,
842 .stop = uncore_pmu_event_stop,
843 .read = uncore_pmu_event_read,
844 .module = THIS_MODULE,
845 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
846 };
847 } else {
848 pmu->pmu = *pmu->type->pmu;
849 pmu->pmu.attr_groups = pmu->type->attr_groups;
850 }
851
852 if (pmu->type->num_boxes == 1) {
853 if (strlen(pmu->type->name) > 0)
854 sprintf(pmu->name, "uncore_%s", pmu->type->name);
855 else
856 sprintf(pmu->name, "uncore");
857 } else {
858 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
859 pmu->pmu_idx);
860 }
861
862 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
863 if (!ret)
864 pmu->registered = true;
865 return ret;
866 }
867
868 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
869 {
870 if (!pmu->registered)
871 return;
872 perf_pmu_unregister(&pmu->pmu);
873 pmu->registered = false;
874 }
875
876 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
877 {
878 int die;
879
880 for (die = 0; die < max_dies; die++)
881 kfree(pmu->boxes[die]);
882 kfree(pmu->boxes);
883 }
884
885 static void uncore_type_exit(struct intel_uncore_type *type)
886 {
887 struct intel_uncore_pmu *pmu = type->pmus;
888 int i;
889
890 if (pmu) {
891 for (i = 0; i < type->num_boxes; i++, pmu++) {
892 uncore_pmu_unregister(pmu);
893 uncore_free_boxes(pmu);
894 }
895 kfree(type->pmus);
896 type->pmus = NULL;
897 }
898 kfree(type->events_group);
899 type->events_group = NULL;
900 }
901
902 static void uncore_types_exit(struct intel_uncore_type **types)
903 {
904 for (; *types; types++)
905 uncore_type_exit(*types);
906 }
907
908 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
909 {
910 struct intel_uncore_pmu *pmus;
911 size_t size;
912 int i, j;
913
914 pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
915 if (!pmus)
916 return -ENOMEM;
917
918 size = max_dies * sizeof(struct intel_uncore_box *);
919
920 for (i = 0; i < type->num_boxes; i++) {
921 pmus[i].func_id = setid ? i : -1;
922 pmus[i].pmu_idx = i;
923 pmus[i].type = type;
924 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
925 if (!pmus[i].boxes)
926 goto err;
927 }
928
929 type->pmus = pmus;
930 type->unconstrainted = (struct event_constraint)
931 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
932 0, type->num_counters, 0, 0);
933
934 if (type->event_descs) {
935 struct {
936 struct attribute_group group;
937 struct attribute *attrs[];
938 } *attr_group;
939 for (i = 0; type->event_descs[i].attr.attr.name; i++);
940
941 attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
942 GFP_KERNEL);
943 if (!attr_group)
944 goto err;
945
946 attr_group->group.name = "events";
947 attr_group->group.attrs = attr_group->attrs;
948
949 for (j = 0; j < i; j++)
950 attr_group->attrs[j] = &type->event_descs[j].attr.attr;
951
952 type->events_group = &attr_group->group;
953 }
954
955 type->pmu_group = &uncore_pmu_attr_group;
956
957 return 0;
958
959 err:
960 for (i = 0; i < type->num_boxes; i++)
961 kfree(pmus[i].boxes);
962 kfree(pmus);
963
964 return -ENOMEM;
965 }
966
967 static int __init
968 uncore_types_init(struct intel_uncore_type **types, bool setid)
969 {
970 int ret;
971
972 for (; *types; types++) {
973 ret = uncore_type_init(*types, setid);
974 if (ret)
975 return ret;
976 }
977 return 0;
978 }
979
980
981
982
983 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
984 {
985 struct intel_uncore_type *type;
986 struct intel_uncore_pmu *pmu = NULL;
987 struct intel_uncore_box *box;
988 int phys_id, die, ret;
989
990 phys_id = uncore_pcibus_to_physid(pdev->bus);
991 if (phys_id < 0)
992 return -ENODEV;
993
994 die = (topology_max_die_per_package() > 1) ? phys_id :
995 topology_phys_to_logical_pkg(phys_id);
996 if (die < 0)
997 return -EINVAL;
998
999 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
1000 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
1001
1002 uncore_extra_pci_dev[die].dev[idx] = pdev;
1003 pci_set_drvdata(pdev, NULL);
1004 return 0;
1005 }
1006
1007 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
1008
1009
1010
1011
1012
1013
1014 if (id->driver_data & ~0xffff) {
1015 struct pci_driver *pci_drv = pdev->driver;
1016 const struct pci_device_id *ids = pci_drv->id_table;
1017 unsigned int devfn;
1018
1019 while (ids && ids->vendor) {
1020 if ((ids->vendor == pdev->vendor) &&
1021 (ids->device == pdev->device)) {
1022 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
1023 UNCORE_PCI_DEV_FUNC(ids->driver_data));
1024 if (devfn == pdev->devfn) {
1025 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
1026 break;
1027 }
1028 }
1029 ids++;
1030 }
1031 if (pmu == NULL)
1032 return -ENODEV;
1033 } else {
1034
1035
1036
1037
1038 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
1039 }
1040
1041 if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
1042 return -EINVAL;
1043
1044 box = uncore_alloc_box(type, NUMA_NO_NODE);
1045 if (!box)
1046 return -ENOMEM;
1047
1048 if (pmu->func_id < 0)
1049 pmu->func_id = pdev->devfn;
1050 else
1051 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
1052
1053 atomic_inc(&box->refcnt);
1054 box->pci_phys_id = phys_id;
1055 box->dieid = die;
1056 box->pci_dev = pdev;
1057 box->pmu = pmu;
1058 uncore_box_init(box);
1059 pci_set_drvdata(pdev, box);
1060
1061 pmu->boxes[die] = box;
1062 if (atomic_inc_return(&pmu->activeboxes) > 1)
1063 return 0;
1064
1065
1066 ret = uncore_pmu_register(pmu);
1067 if (ret) {
1068 pci_set_drvdata(pdev, NULL);
1069 pmu->boxes[die] = NULL;
1070 uncore_box_exit(box);
1071 kfree(box);
1072 }
1073 return ret;
1074 }
1075
1076 static void uncore_pci_remove(struct pci_dev *pdev)
1077 {
1078 struct intel_uncore_box *box;
1079 struct intel_uncore_pmu *pmu;
1080 int i, phys_id, die;
1081
1082 phys_id = uncore_pcibus_to_physid(pdev->bus);
1083
1084 box = pci_get_drvdata(pdev);
1085 if (!box) {
1086 die = (topology_max_die_per_package() > 1) ? phys_id :
1087 topology_phys_to_logical_pkg(phys_id);
1088 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
1089 if (uncore_extra_pci_dev[die].dev[i] == pdev) {
1090 uncore_extra_pci_dev[die].dev[i] = NULL;
1091 break;
1092 }
1093 }
1094 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
1095 return;
1096 }
1097
1098 pmu = box->pmu;
1099 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
1100 return;
1101
1102 pci_set_drvdata(pdev, NULL);
1103 pmu->boxes[box->dieid] = NULL;
1104 if (atomic_dec_return(&pmu->activeboxes) == 0)
1105 uncore_pmu_unregister(pmu);
1106 uncore_box_exit(box);
1107 kfree(box);
1108 }
1109
1110 static int __init uncore_pci_init(void)
1111 {
1112 size_t size;
1113 int ret;
1114
1115 size = max_dies * sizeof(struct pci_extra_dev);
1116 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1117 if (!uncore_extra_pci_dev) {
1118 ret = -ENOMEM;
1119 goto err;
1120 }
1121
1122 ret = uncore_types_init(uncore_pci_uncores, false);
1123 if (ret)
1124 goto errtype;
1125
1126 uncore_pci_driver->probe = uncore_pci_probe;
1127 uncore_pci_driver->remove = uncore_pci_remove;
1128
1129 ret = pci_register_driver(uncore_pci_driver);
1130 if (ret)
1131 goto errtype;
1132
1133 pcidrv_registered = true;
1134 return 0;
1135
1136 errtype:
1137 uncore_types_exit(uncore_pci_uncores);
1138 kfree(uncore_extra_pci_dev);
1139 uncore_extra_pci_dev = NULL;
1140 uncore_free_pcibus_map();
1141 err:
1142 uncore_pci_uncores = empty_uncore;
1143 return ret;
1144 }
1145
1146 static void uncore_pci_exit(void)
1147 {
1148 if (pcidrv_registered) {
1149 pcidrv_registered = false;
1150 pci_unregister_driver(uncore_pci_driver);
1151 uncore_types_exit(uncore_pci_uncores);
1152 kfree(uncore_extra_pci_dev);
1153 uncore_free_pcibus_map();
1154 }
1155 }
1156
1157 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1158 int new_cpu)
1159 {
1160 struct intel_uncore_pmu *pmu = type->pmus;
1161 struct intel_uncore_box *box;
1162 int i, die;
1163
1164 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
1165 for (i = 0; i < type->num_boxes; i++, pmu++) {
1166 box = pmu->boxes[die];
1167 if (!box)
1168 continue;
1169
1170 if (old_cpu < 0) {
1171 WARN_ON_ONCE(box->cpu != -1);
1172 box->cpu = new_cpu;
1173 continue;
1174 }
1175
1176 WARN_ON_ONCE(box->cpu != old_cpu);
1177 box->cpu = -1;
1178 if (new_cpu < 0)
1179 continue;
1180
1181 uncore_pmu_cancel_hrtimer(box);
1182 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1183 box->cpu = new_cpu;
1184 }
1185 }
1186
1187 static void uncore_change_context(struct intel_uncore_type **uncores,
1188 int old_cpu, int new_cpu)
1189 {
1190 for (; *uncores; uncores++)
1191 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1192 }
1193
1194 static void uncore_box_unref(struct intel_uncore_type **types, int id)
1195 {
1196 struct intel_uncore_type *type;
1197 struct intel_uncore_pmu *pmu;
1198 struct intel_uncore_box *box;
1199 int i;
1200
1201 for (; *types; types++) {
1202 type = *types;
1203 pmu = type->pmus;
1204 for (i = 0; i < type->num_boxes; i++, pmu++) {
1205 box = pmu->boxes[id];
1206 if (box && atomic_dec_return(&box->refcnt) == 0)
1207 uncore_box_exit(box);
1208 }
1209 }
1210 }
1211
1212 static int uncore_event_cpu_offline(unsigned int cpu)
1213 {
1214 int die, target;
1215
1216
1217 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1218 goto unref;
1219
1220 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
1221
1222
1223 if (target < nr_cpu_ids)
1224 cpumask_set_cpu(target, &uncore_cpu_mask);
1225 else
1226 target = -1;
1227
1228 uncore_change_context(uncore_msr_uncores, cpu, target);
1229 uncore_change_context(uncore_mmio_uncores, cpu, target);
1230 uncore_change_context(uncore_pci_uncores, cpu, target);
1231
1232 unref:
1233
1234 die = topology_logical_die_id(cpu);
1235 uncore_box_unref(uncore_msr_uncores, die);
1236 uncore_box_unref(uncore_mmio_uncores, die);
1237 return 0;
1238 }
1239
1240 static int allocate_boxes(struct intel_uncore_type **types,
1241 unsigned int die, unsigned int cpu)
1242 {
1243 struct intel_uncore_box *box, *tmp;
1244 struct intel_uncore_type *type;
1245 struct intel_uncore_pmu *pmu;
1246 LIST_HEAD(allocated);
1247 int i;
1248
1249
1250 for (; *types; types++) {
1251 type = *types;
1252 pmu = type->pmus;
1253 for (i = 0; i < type->num_boxes; i++, pmu++) {
1254 if (pmu->boxes[die])
1255 continue;
1256 box = uncore_alloc_box(type, cpu_to_node(cpu));
1257 if (!box)
1258 goto cleanup;
1259 box->pmu = pmu;
1260 box->dieid = die;
1261 list_add(&box->active_list, &allocated);
1262 }
1263 }
1264
1265 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1266 list_del_init(&box->active_list);
1267 box->pmu->boxes[die] = box;
1268 }
1269 return 0;
1270
1271 cleanup:
1272 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1273 list_del_init(&box->active_list);
1274 kfree(box);
1275 }
1276 return -ENOMEM;
1277 }
1278
1279 static int uncore_box_ref(struct intel_uncore_type **types,
1280 int id, unsigned int cpu)
1281 {
1282 struct intel_uncore_type *type;
1283 struct intel_uncore_pmu *pmu;
1284 struct intel_uncore_box *box;
1285 int i, ret;
1286
1287 ret = allocate_boxes(types, id, cpu);
1288 if (ret)
1289 return ret;
1290
1291 for (; *types; types++) {
1292 type = *types;
1293 pmu = type->pmus;
1294 for (i = 0; i < type->num_boxes; i++, pmu++) {
1295 box = pmu->boxes[id];
1296 if (box && atomic_inc_return(&box->refcnt) == 1)
1297 uncore_box_init(box);
1298 }
1299 }
1300 return 0;
1301 }
1302
1303 static int uncore_event_cpu_online(unsigned int cpu)
1304 {
1305 int die, target, msr_ret, mmio_ret;
1306
1307 die = topology_logical_die_id(cpu);
1308 msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
1309 mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
1310 if (msr_ret && mmio_ret)
1311 return -ENOMEM;
1312
1313
1314
1315
1316
1317 target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
1318 if (target < nr_cpu_ids)
1319 return 0;
1320
1321 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1322
1323 if (!msr_ret)
1324 uncore_change_context(uncore_msr_uncores, -1, cpu);
1325 if (!mmio_ret)
1326 uncore_change_context(uncore_mmio_uncores, -1, cpu);
1327 uncore_change_context(uncore_pci_uncores, -1, cpu);
1328 return 0;
1329 }
1330
1331 static int __init type_pmu_register(struct intel_uncore_type *type)
1332 {
1333 int i, ret;
1334
1335 for (i = 0; i < type->num_boxes; i++) {
1336 ret = uncore_pmu_register(&type->pmus[i]);
1337 if (ret)
1338 return ret;
1339 }
1340 return 0;
1341 }
1342
1343 static int __init uncore_msr_pmus_register(void)
1344 {
1345 struct intel_uncore_type **types = uncore_msr_uncores;
1346 int ret;
1347
1348 for (; *types; types++) {
1349 ret = type_pmu_register(*types);
1350 if (ret)
1351 return ret;
1352 }
1353 return 0;
1354 }
1355
1356 static int __init uncore_cpu_init(void)
1357 {
1358 int ret;
1359
1360 ret = uncore_types_init(uncore_msr_uncores, true);
1361 if (ret)
1362 goto err;
1363
1364 ret = uncore_msr_pmus_register();
1365 if (ret)
1366 goto err;
1367 return 0;
1368 err:
1369 uncore_types_exit(uncore_msr_uncores);
1370 uncore_msr_uncores = empty_uncore;
1371 return ret;
1372 }
1373
1374 static int __init uncore_mmio_init(void)
1375 {
1376 struct intel_uncore_type **types = uncore_mmio_uncores;
1377 int ret;
1378
1379 ret = uncore_types_init(types, true);
1380 if (ret)
1381 goto err;
1382
1383 for (; *types; types++) {
1384 ret = type_pmu_register(*types);
1385 if (ret)
1386 goto err;
1387 }
1388 return 0;
1389 err:
1390 uncore_types_exit(uncore_mmio_uncores);
1391 uncore_mmio_uncores = empty_uncore;
1392 return ret;
1393 }
1394
1395
1396 #define X86_UNCORE_MODEL_MATCH(model, init) \
1397 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1398
1399 struct intel_uncore_init_fun {
1400 void (*cpu_init)(void);
1401 int (*pci_init)(void);
1402 void (*mmio_init)(void);
1403 };
1404
1405 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1406 .cpu_init = nhm_uncore_cpu_init,
1407 };
1408
1409 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1410 .cpu_init = snb_uncore_cpu_init,
1411 .pci_init = snb_uncore_pci_init,
1412 };
1413
1414 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1415 .cpu_init = snb_uncore_cpu_init,
1416 .pci_init = ivb_uncore_pci_init,
1417 };
1418
1419 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1420 .cpu_init = snb_uncore_cpu_init,
1421 .pci_init = hsw_uncore_pci_init,
1422 };
1423
1424 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1425 .cpu_init = snb_uncore_cpu_init,
1426 .pci_init = bdw_uncore_pci_init,
1427 };
1428
1429 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1430 .cpu_init = snbep_uncore_cpu_init,
1431 .pci_init = snbep_uncore_pci_init,
1432 };
1433
1434 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1435 .cpu_init = nhmex_uncore_cpu_init,
1436 };
1437
1438 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1439 .cpu_init = ivbep_uncore_cpu_init,
1440 .pci_init = ivbep_uncore_pci_init,
1441 };
1442
1443 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1444 .cpu_init = hswep_uncore_cpu_init,
1445 .pci_init = hswep_uncore_pci_init,
1446 };
1447
1448 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1449 .cpu_init = bdx_uncore_cpu_init,
1450 .pci_init = bdx_uncore_pci_init,
1451 };
1452
1453 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1454 .cpu_init = knl_uncore_cpu_init,
1455 .pci_init = knl_uncore_pci_init,
1456 };
1457
1458 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1459 .cpu_init = skl_uncore_cpu_init,
1460 .pci_init = skl_uncore_pci_init,
1461 };
1462
1463 static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1464 .cpu_init = skx_uncore_cpu_init,
1465 .pci_init = skx_uncore_pci_init,
1466 };
1467
1468 static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
1469 .cpu_init = icl_uncore_cpu_init,
1470 .pci_init = skl_uncore_pci_init,
1471 };
1472
1473 static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
1474 .cpu_init = snr_uncore_cpu_init,
1475 .pci_init = snr_uncore_pci_init,
1476 .mmio_init = snr_uncore_mmio_init,
1477 };
1478
1479 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1480 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
1481 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
1482 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
1483 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
1484 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
1485 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
1486 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL, hsw_uncore_init),
1487 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_L, hsw_uncore_init),
1488 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_G, hsw_uncore_init),
1489 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL, bdw_uncore_init),
1490 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, bdw_uncore_init),
1491 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
1492 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
1493 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
1494 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
1495 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
1496 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
1497 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, bdx_uncore_init),
1498 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
1499 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
1500 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE, skl_uncore_init),
1501 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, skl_uncore_init),
1502 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
1503 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, skl_uncore_init),
1504 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE, skl_uncore_init),
1505 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, icl_uncore_init),
1506 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init),
1507 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE, icl_uncore_init),
1508 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_D, snr_uncore_init),
1509 {},
1510 };
1511
1512 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1513
1514 static int __init intel_uncore_init(void)
1515 {
1516 const struct x86_cpu_id *id;
1517 struct intel_uncore_init_fun *uncore_init;
1518 int pret = 0, cret = 0, mret = 0, ret;
1519
1520 id = x86_match_cpu(intel_uncore_match);
1521 if (!id)
1522 return -ENODEV;
1523
1524 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1525 return -ENODEV;
1526
1527 max_dies = topology_max_packages() * topology_max_die_per_package();
1528
1529 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1530 if (uncore_init->pci_init) {
1531 pret = uncore_init->pci_init();
1532 if (!pret)
1533 pret = uncore_pci_init();
1534 }
1535
1536 if (uncore_init->cpu_init) {
1537 uncore_init->cpu_init();
1538 cret = uncore_cpu_init();
1539 }
1540
1541 if (uncore_init->mmio_init) {
1542 uncore_init->mmio_init();
1543 mret = uncore_mmio_init();
1544 }
1545
1546 if (cret && pret && mret)
1547 return -ENODEV;
1548
1549
1550 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1551 "perf/x86/intel/uncore:online",
1552 uncore_event_cpu_online,
1553 uncore_event_cpu_offline);
1554 if (ret)
1555 goto err;
1556 return 0;
1557
1558 err:
1559 uncore_types_exit(uncore_msr_uncores);
1560 uncore_types_exit(uncore_mmio_uncores);
1561 uncore_pci_exit();
1562 return ret;
1563 }
1564 module_init(intel_uncore_init);
1565
1566 static void __exit intel_uncore_exit(void)
1567 {
1568 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1569 uncore_types_exit(uncore_msr_uncores);
1570 uncore_types_exit(uncore_mmio_uncores);
1571 uncore_pci_exit();
1572 }
1573 module_exit(intel_uncore_exit);