This source file includes following definitions.
- uncore_pmc_fixed
- uncore_pmc_freerunning
- uncore_mmio_box_ctl
- uncore_pci_box_ctl
- uncore_pci_fixed_ctl
- uncore_pci_fixed_ctr
- uncore_pci_event_ctl
- uncore_pci_perf_ctr
- uncore_msr_box_offset
- uncore_msr_box_ctl
- uncore_msr_fixed_ctl
- uncore_msr_fixed_ctr
- uncore_freerunning_idx
- uncore_freerunning_type
- uncore_freerunning_counter
- uncore_msr_event_ctl
- uncore_msr_perf_ctr
- uncore_fixed_ctl
- uncore_fixed_ctr
- uncore_event_ctl
- uncore_perf_ctr
- uncore_perf_ctr_bits
- uncore_fixed_ctr_bits
- uncore_freerunning_bits
- uncore_num_freerunning
- uncore_num_freerunning_types
- check_valid_freerunning_event
- uncore_num_counters
- is_freerunning_event
- uncore_freerunning_hw_config
- uncore_disable_event
- uncore_enable_event
- uncore_read_counter
- uncore_box_init
- uncore_box_exit
- uncore_box_is_fake
- uncore_event_to_pmu
- uncore_event_to_box
1
2 #include <linux/slab.h>
3 #include <linux/pci.h>
4 #include <asm/apicdef.h>
5 #include <linux/io-64-nonatomic-lo-hi.h>
6
7 #include <linux/perf_event.h>
8 #include "../perf_event.h"
9
10 #define UNCORE_PMU_NAME_LEN 32
11 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
12 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
13
14 #define UNCORE_FIXED_EVENT 0xff
15 #define UNCORE_PMC_IDX_MAX_GENERIC 8
16 #define UNCORE_PMC_IDX_MAX_FIXED 1
17 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1
18 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
19 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
20 UNCORE_PMC_IDX_MAX_FIXED)
21 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
22 UNCORE_PMC_IDX_MAX_FREERUNNING)
23
24 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
25 ((dev << 24) | (func << 16) | (type << 8) | idx)
26 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
27 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
28 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
29 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
30 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
31 #define UNCORE_EXTRA_PCI_DEV 0xff
32 #define UNCORE_EXTRA_PCI_DEV_MAX 4
33
34 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
35
36 struct pci_extra_dev {
37 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
38 };
39
40 struct intel_uncore_ops;
41 struct intel_uncore_pmu;
42 struct intel_uncore_box;
43 struct uncore_event_desc;
44 struct freerunning_counters;
45
46 struct intel_uncore_type {
47 const char *name;
48 int num_counters;
49 int num_boxes;
50 int perf_ctr_bits;
51 int fixed_ctr_bits;
52 int num_freerunning_types;
53 unsigned perf_ctr;
54 unsigned event_ctl;
55 unsigned event_mask;
56 unsigned event_mask_ext;
57 unsigned fixed_ctr;
58 unsigned fixed_ctl;
59 unsigned box_ctl;
60 union {
61 unsigned msr_offset;
62 unsigned mmio_offset;
63 };
64 unsigned num_shared_regs:8;
65 unsigned single_fixed:1;
66 unsigned pair_ctr_ctl:1;
67 unsigned *msr_offsets;
68 struct event_constraint unconstrainted;
69 struct event_constraint *constraints;
70 struct intel_uncore_pmu *pmus;
71 struct intel_uncore_ops *ops;
72 struct uncore_event_desc *event_descs;
73 struct freerunning_counters *freerunning;
74 const struct attribute_group *attr_groups[4];
75 struct pmu *pmu;
76 };
77
78 #define pmu_group attr_groups[0]
79 #define format_group attr_groups[1]
80 #define events_group attr_groups[2]
81
82 struct intel_uncore_ops {
83 void (*init_box)(struct intel_uncore_box *);
84 void (*exit_box)(struct intel_uncore_box *);
85 void (*disable_box)(struct intel_uncore_box *);
86 void (*enable_box)(struct intel_uncore_box *);
87 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
88 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
89 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
90 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
91 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
92 struct perf_event *);
93 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
94 };
95
96 struct intel_uncore_pmu {
97 struct pmu pmu;
98 char name[UNCORE_PMU_NAME_LEN];
99 int pmu_idx;
100 int func_id;
101 bool registered;
102 atomic_t activeboxes;
103 struct intel_uncore_type *type;
104 struct intel_uncore_box **boxes;
105 };
106
107 struct intel_uncore_extra_reg {
108 raw_spinlock_t lock;
109 u64 config, config1, config2;
110 atomic_t ref;
111 };
112
113 struct intel_uncore_box {
114 int pci_phys_id;
115 int dieid;
116 int n_active;
117 int n_events;
118 int cpu;
119 unsigned long flags;
120 atomic_t refcnt;
121 struct perf_event *events[UNCORE_PMC_IDX_MAX];
122 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
123 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
124 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
125 u64 tags[UNCORE_PMC_IDX_MAX];
126 struct pci_dev *pci_dev;
127 struct intel_uncore_pmu *pmu;
128 u64 hrtimer_duration;
129 struct hrtimer hrtimer;
130 struct list_head list;
131 struct list_head active_list;
132 void __iomem *io_addr;
133 struct intel_uncore_extra_reg shared_regs[0];
134 };
135
136
137 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
138 #define CFL_UNC_CBO_7_PER_CTR0 0xf76
139
140 #define UNCORE_BOX_FLAG_INITIATED 0
141
142 #define UNCORE_BOX_FLAG_CTL_OFFS8 1
143
144 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
145
146 struct uncore_event_desc {
147 struct kobj_attribute attr;
148 const char *config;
149 };
150
151 struct freerunning_counters {
152 unsigned int counter_base;
153 unsigned int counter_offset;
154 unsigned int box_offset;
155 unsigned int num_counters;
156 unsigned int bits;
157 };
158
159 struct pci2phy_map {
160 struct list_head list;
161 int segment;
162 int pbus_to_physid[256];
163 };
164
165 struct pci2phy_map *__find_pci2phy_map(int segment);
166 int uncore_pcibus_to_physid(struct pci_bus *bus);
167
168 ssize_t uncore_event_show(struct kobject *kobj,
169 struct kobj_attribute *attr, char *buf);
170
171 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
172 { \
173 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
174 .config = _config, \
175 }
176
177 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
178 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
179 struct kobj_attribute *attr, \
180 char *page) \
181 { \
182 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
183 return sprintf(page, _format "\n"); \
184 } \
185 static struct kobj_attribute format_attr_##_var = \
186 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
187
188 static inline bool uncore_pmc_fixed(int idx)
189 {
190 return idx == UNCORE_PMC_IDX_FIXED;
191 }
192
193 static inline bool uncore_pmc_freerunning(int idx)
194 {
195 return idx == UNCORE_PMC_IDX_FREERUNNING;
196 }
197
198 static inline
199 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
200 {
201 return box->pmu->type->box_ctl +
202 box->pmu->type->mmio_offset * box->pmu->pmu_idx;
203 }
204
205 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
206 {
207 return box->pmu->type->box_ctl;
208 }
209
210 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
211 {
212 return box->pmu->type->fixed_ctl;
213 }
214
215 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
216 {
217 return box->pmu->type->fixed_ctr;
218 }
219
220 static inline
221 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
222 {
223 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
224 return idx * 8 + box->pmu->type->event_ctl;
225
226 return idx * 4 + box->pmu->type->event_ctl;
227 }
228
229 static inline
230 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
231 {
232 return idx * 8 + box->pmu->type->perf_ctr;
233 }
234
235 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
236 {
237 struct intel_uncore_pmu *pmu = box->pmu;
238 return pmu->type->msr_offsets ?
239 pmu->type->msr_offsets[pmu->pmu_idx] :
240 pmu->type->msr_offset * pmu->pmu_idx;
241 }
242
243 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
244 {
245 if (!box->pmu->type->box_ctl)
246 return 0;
247 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
248 }
249
250 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
251 {
252 if (!box->pmu->type->fixed_ctl)
253 return 0;
254 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
255 }
256
257 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
258 {
259 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
260 }
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291 static inline unsigned int uncore_freerunning_idx(u64 config)
292 {
293 return ((config >> 8) & 0xf);
294 }
295
296 #define UNCORE_FREERUNNING_UMASK_START 0x10
297
298 static inline unsigned int uncore_freerunning_type(u64 config)
299 {
300 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
301 }
302
303 static inline
304 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
305 struct perf_event *event)
306 {
307 unsigned int type = uncore_freerunning_type(event->hw.config);
308 unsigned int idx = uncore_freerunning_idx(event->hw.config);
309 struct intel_uncore_pmu *pmu = box->pmu;
310
311 return pmu->type->freerunning[type].counter_base +
312 pmu->type->freerunning[type].counter_offset * idx +
313 pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
314 }
315
316 static inline
317 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
318 {
319 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
320 return CFL_UNC_CBO_7_PERFEVTSEL0 +
321 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
322 } else {
323 return box->pmu->type->event_ctl +
324 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
325 uncore_msr_box_offset(box);
326 }
327 }
328
329 static inline
330 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
331 {
332 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
333 return CFL_UNC_CBO_7_PER_CTR0 +
334 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
335 } else {
336 return box->pmu->type->perf_ctr +
337 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
338 uncore_msr_box_offset(box);
339 }
340 }
341
342 static inline
343 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
344 {
345 if (box->pci_dev || box->io_addr)
346 return uncore_pci_fixed_ctl(box);
347 else
348 return uncore_msr_fixed_ctl(box);
349 }
350
351 static inline
352 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
353 {
354 if (box->pci_dev || box->io_addr)
355 return uncore_pci_fixed_ctr(box);
356 else
357 return uncore_msr_fixed_ctr(box);
358 }
359
360 static inline
361 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
362 {
363 if (box->pci_dev || box->io_addr)
364 return uncore_pci_event_ctl(box, idx);
365 else
366 return uncore_msr_event_ctl(box, idx);
367 }
368
369 static inline
370 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
371 {
372 if (box->pci_dev || box->io_addr)
373 return uncore_pci_perf_ctr(box, idx);
374 else
375 return uncore_msr_perf_ctr(box, idx);
376 }
377
378 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
379 {
380 return box->pmu->type->perf_ctr_bits;
381 }
382
383 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
384 {
385 return box->pmu->type->fixed_ctr_bits;
386 }
387
388 static inline
389 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
390 struct perf_event *event)
391 {
392 unsigned int type = uncore_freerunning_type(event->hw.config);
393
394 return box->pmu->type->freerunning[type].bits;
395 }
396
397 static inline int uncore_num_freerunning(struct intel_uncore_box *box,
398 struct perf_event *event)
399 {
400 unsigned int type = uncore_freerunning_type(event->hw.config);
401
402 return box->pmu->type->freerunning[type].num_counters;
403 }
404
405 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
406 struct perf_event *event)
407 {
408 return box->pmu->type->num_freerunning_types;
409 }
410
411 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
412 struct perf_event *event)
413 {
414 unsigned int type = uncore_freerunning_type(event->hw.config);
415 unsigned int idx = uncore_freerunning_idx(event->hw.config);
416
417 return (type < uncore_num_freerunning_types(box, event)) &&
418 (idx < uncore_num_freerunning(box, event));
419 }
420
421 static inline int uncore_num_counters(struct intel_uncore_box *box)
422 {
423 return box->pmu->type->num_counters;
424 }
425
426 static inline bool is_freerunning_event(struct perf_event *event)
427 {
428 u64 cfg = event->attr.config;
429
430 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
431 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
432 }
433
434
435 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
436 struct perf_event *event)
437 {
438 if (is_freerunning_event(event))
439 return 0;
440
441 return -EINVAL;
442 }
443
444 static inline void uncore_disable_event(struct intel_uncore_box *box,
445 struct perf_event *event)
446 {
447 box->pmu->type->ops->disable_event(box, event);
448 }
449
450 static inline void uncore_enable_event(struct intel_uncore_box *box,
451 struct perf_event *event)
452 {
453 box->pmu->type->ops->enable_event(box, event);
454 }
455
456 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
457 struct perf_event *event)
458 {
459 return box->pmu->type->ops->read_counter(box, event);
460 }
461
462 static inline void uncore_box_init(struct intel_uncore_box *box)
463 {
464 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
465 if (box->pmu->type->ops->init_box)
466 box->pmu->type->ops->init_box(box);
467 }
468 }
469
470 static inline void uncore_box_exit(struct intel_uncore_box *box)
471 {
472 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
473 if (box->pmu->type->ops->exit_box)
474 box->pmu->type->ops->exit_box(box);
475 }
476 }
477
478 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
479 {
480 return (box->dieid < 0);
481 }
482
483 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
484 {
485 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
486 }
487
488 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
489 {
490 return event->pmu_private;
491 }
492
493 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
494 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
495 void uncore_mmio_exit_box(struct intel_uncore_box *box);
496 u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
497 struct perf_event *event);
498 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
499 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
500 void uncore_pmu_event_start(struct perf_event *event, int flags);
501 void uncore_pmu_event_stop(struct perf_event *event, int flags);
502 int uncore_pmu_event_add(struct perf_event *event, int flags);
503 void uncore_pmu_event_del(struct perf_event *event, int flags);
504 void uncore_pmu_event_read(struct perf_event *event);
505 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
506 struct event_constraint *
507 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
508 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
509 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
510
511 extern struct intel_uncore_type **uncore_msr_uncores;
512 extern struct intel_uncore_type **uncore_pci_uncores;
513 extern struct intel_uncore_type **uncore_mmio_uncores;
514 extern struct pci_driver *uncore_pci_driver;
515 extern raw_spinlock_t pci2phy_map_lock;
516 extern struct list_head pci2phy_map_head;
517 extern struct pci_extra_dev *uncore_extra_pci_dev;
518 extern struct event_constraint uncore_constraint_empty;
519
520
521 int snb_uncore_pci_init(void);
522 int ivb_uncore_pci_init(void);
523 int hsw_uncore_pci_init(void);
524 int bdw_uncore_pci_init(void);
525 int skl_uncore_pci_init(void);
526 void snb_uncore_cpu_init(void);
527 void nhm_uncore_cpu_init(void);
528 void skl_uncore_cpu_init(void);
529 void icl_uncore_cpu_init(void);
530 int snb_pci2phy_map_init(int devid);
531
532
533 int snbep_uncore_pci_init(void);
534 void snbep_uncore_cpu_init(void);
535 int ivbep_uncore_pci_init(void);
536 void ivbep_uncore_cpu_init(void);
537 int hswep_uncore_pci_init(void);
538 void hswep_uncore_cpu_init(void);
539 int bdx_uncore_pci_init(void);
540 void bdx_uncore_cpu_init(void);
541 int knl_uncore_pci_init(void);
542 void knl_uncore_cpu_init(void);
543 int skx_uncore_pci_init(void);
544 void skx_uncore_cpu_init(void);
545 int snr_uncore_pci_init(void);
546 void snr_uncore_cpu_init(void);
547 void snr_uncore_mmio_init(void);
548
549
550 void nhmex_uncore_cpu_init(void);