1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 #if 0
18 #undef wrmsrl
19 #define wrmsrl(msr, val) 						\
20 do {									\
21 	unsigned int _msr = (msr);					\
22 	u64 _val = (val);						\
23 	trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr),		\
24 			(unsigned long long)(_val));			\
25 	native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32));	\
26 } while (0)
27 #endif
28 
29 /*
30  *          |   NHM/WSM    |      SNB     |
31  * register -------------------------------
32  *          |  HT  | no HT |  HT  | no HT |
33  *-----------------------------------------
34  * offcore  | core | core  | cpu  | core  |
35  * lbr_sel  | core | core  | cpu  | core  |
36  * ld_lat   | cpu  | core  | cpu  | core  |
37  *-----------------------------------------
38  *
39  * Given that there is a small number of shared regs,
40  * we can pre-allocate their slot in the per-cpu
41  * per-core reg tables.
42  */
43 enum extra_reg_type {
44 	EXTRA_REG_NONE  = -1,	/* not used */
45 
46 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
47 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
48 	EXTRA_REG_LBR   = 2,	/* lbr_select */
49 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
50 
51 	EXTRA_REG_MAX		/* number of entries needed */
52 };
53 
54 struct event_constraint {
55 	union {
56 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
57 		u64		idxmsk64;
58 	};
59 	u64	code;
60 	u64	cmask;
61 	int	weight;
62 	int	overlap;
63 	int	flags;
64 };
65 /*
66  * struct hw_perf_event.flags flags
67  */
68 #define PERF_X86_EVENT_PEBS_LDLAT	0x0001 /* ld+ldlat data address sampling */
69 #define PERF_X86_EVENT_PEBS_ST		0x0002 /* st data address sampling */
70 #define PERF_X86_EVENT_PEBS_ST_HSW	0x0004 /* haswell style datala, store */
71 #define PERF_X86_EVENT_COMMITTED	0x0008 /* event passed commit_txn */
72 #define PERF_X86_EVENT_PEBS_LD_HSW	0x0010 /* haswell style datala, load */
73 #define PERF_X86_EVENT_PEBS_NA_HSW	0x0020 /* haswell style datala, unknown */
74 #define PERF_X86_EVENT_EXCL		0x0040 /* HT exclusivity on counter */
75 #define PERF_X86_EVENT_DYNAMIC		0x0080 /* dynamic alloc'd constraint */
76 #define PERF_X86_EVENT_RDPMC_ALLOWED	0x0100 /* grant rdpmc permission */
77 #define PERF_X86_EVENT_EXCL_ACCT	0x0200 /* accounted EXCL event */
78 
79 
80 struct amd_nb {
81 	int nb_id;  /* NorthBridge id */
82 	int refcnt; /* reference count */
83 	struct perf_event *owners[X86_PMC_IDX_MAX];
84 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
85 };
86 
87 /* The maximal number of PEBS events: */
88 #define MAX_PEBS_EVENTS		8
89 
90 /*
91  * A debug store configuration.
92  *
93  * We only support architectures that use 64bit fields.
94  */
95 struct debug_store {
96 	u64	bts_buffer_base;
97 	u64	bts_index;
98 	u64	bts_absolute_maximum;
99 	u64	bts_interrupt_threshold;
100 	u64	pebs_buffer_base;
101 	u64	pebs_index;
102 	u64	pebs_absolute_maximum;
103 	u64	pebs_interrupt_threshold;
104 	u64	pebs_event_reset[MAX_PEBS_EVENTS];
105 };
106 
107 /*
108  * Per register state.
109  */
110 struct er_account {
111 	raw_spinlock_t		lock;	/* per-core: protect structure */
112 	u64                 config;	/* extra MSR config */
113 	u64                 reg;	/* extra MSR number */
114 	atomic_t            ref;	/* reference count */
115 };
116 
117 /*
118  * Per core/cpu state
119  *
120  * Used to coordinate shared registers between HT threads or
121  * among events on a single PMU.
122  */
123 struct intel_shared_regs {
124 	struct er_account       regs[EXTRA_REG_MAX];
125 	int                     refcnt;		/* per-core: #HT threads */
126 	unsigned                core_id;	/* per-core: core id */
127 };
128 
129 enum intel_excl_state_type {
130 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
131 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
132 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
133 };
134 
135 struct intel_excl_states {
136 	enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
137 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
138 	bool sched_started; /* true if scheduling has started */
139 };
140 
141 struct intel_excl_cntrs {
142 	raw_spinlock_t	lock;
143 
144 	struct intel_excl_states states[2];
145 
146 	union {
147 		u16	has_exclusive[2];
148 		u32	exclusive_present;
149 	};
150 
151 	int		refcnt;		/* per-core: #HT threads */
152 	unsigned	core_id;	/* per-core: core id */
153 };
154 
155 #define MAX_LBR_ENTRIES		16
156 
157 enum {
158 	X86_PERF_KFREE_SHARED = 0,
159 	X86_PERF_KFREE_EXCL   = 1,
160 	X86_PERF_KFREE_MAX
161 };
162 
163 struct cpu_hw_events {
164 	/*
165 	 * Generic x86 PMC bits
166 	 */
167 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
168 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
169 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
170 	int			enabled;
171 
172 	int			n_events; /* the # of events in the below arrays */
173 	int			n_added;  /* the # last events in the below arrays;
174 					     they've never been enabled yet */
175 	int			n_txn;    /* the # last events in the below arrays;
176 					     added in the current transaction */
177 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
178 	u64			tags[X86_PMC_IDX_MAX];
179 
180 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
181 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
182 
183 	int			n_excl; /* the number of exclusive events */
184 
185 	unsigned int		group_flag;
186 	int			is_fake;
187 
188 	/*
189 	 * Intel DebugStore bits
190 	 */
191 	struct debug_store	*ds;
192 	u64			pebs_enabled;
193 
194 	/*
195 	 * Intel LBR bits
196 	 */
197 	int				lbr_users;
198 	void				*lbr_context;
199 	struct perf_branch_stack	lbr_stack;
200 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
201 	struct er_account		*lbr_sel;
202 	u64				br_sel;
203 
204 	/*
205 	 * Intel host/guest exclude bits
206 	 */
207 	u64				intel_ctrl_guest_mask;
208 	u64				intel_ctrl_host_mask;
209 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
210 
211 	/*
212 	 * Intel checkpoint mask
213 	 */
214 	u64				intel_cp_status;
215 
216 	/*
217 	 * manage shared (per-core, per-cpu) registers
218 	 * used on Intel NHM/WSM/SNB
219 	 */
220 	struct intel_shared_regs	*shared_regs;
221 	/*
222 	 * manage exclusive counter access between hyperthread
223 	 */
224 	struct event_constraint *constraint_list; /* in enable order */
225 	struct intel_excl_cntrs		*excl_cntrs;
226 	int excl_thread_id; /* 0 or 1 */
227 
228 	/*
229 	 * AMD specific bits
230 	 */
231 	struct amd_nb			*amd_nb;
232 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
233 	u64				perf_ctr_virt_mask;
234 
235 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
236 };
237 
238 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
239 	{ .idxmsk64 = (n) },		\
240 	.code = (c),			\
241 	.cmask = (m),			\
242 	.weight = (w),			\
243 	.overlap = (o),			\
244 	.flags = f,			\
245 }
246 
247 #define EVENT_CONSTRAINT(c, n, m)	\
248 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
249 
250 #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
251 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
252 			   0, PERF_X86_EVENT_EXCL)
253 
254 /*
255  * The overlap flag marks event constraints with overlapping counter
256  * masks. This is the case if the counter mask of such an event is not
257  * a subset of any other counter mask of a constraint with an equal or
258  * higher weight, e.g.:
259  *
260  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
261  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
262  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
263  *
264  * The event scheduler may not select the correct counter in the first
265  * cycle because it needs to know which subsequent events will be
266  * scheduled. It may fail to schedule the events then. So we set the
267  * overlap flag for such constraints to give the scheduler a hint which
268  * events to select for counter rescheduling.
269  *
270  * Care must be taken as the rescheduling algorithm is O(n!) which
271  * will increase scheduling cycles for an over-commited system
272  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
273  * and its counter masks must be kept at a minimum.
274  */
275 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
276 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
277 
278 /*
279  * Constraint on the Event code.
280  */
281 #define INTEL_EVENT_CONSTRAINT(c, n)	\
282 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
283 
284 /*
285  * Constraint on the Event code + UMask + fixed-mask
286  *
287  * filter mask to validate fixed counter events.
288  * the following filters disqualify for fixed counters:
289  *  - inv
290  *  - edge
291  *  - cnt-mask
292  *  - in_tx
293  *  - in_tx_checkpointed
294  *  The other filters are supported by fixed counters.
295  *  The any-thread option is supported starting with v3.
296  */
297 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
298 #define FIXED_EVENT_CONSTRAINT(c, n)	\
299 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
300 
301 /*
302  * Constraint on the Event code + UMask
303  */
304 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
305 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
306 
307 /* Like UEVENT_CONSTRAINT, but match flags too */
308 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
309 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
310 
311 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
312 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
313 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
314 
315 #define INTEL_PLD_CONSTRAINT(c, n)	\
316 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
317 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
318 
319 #define INTEL_PST_CONSTRAINT(c, n)	\
320 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
321 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
322 
323 /* Event constraint, but match on all event flags too. */
324 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
325 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
326 
327 /* Check only flags, but allow all event/umask */
328 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
329 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
330 
331 /* Check flags and event code, and set the HSW store flag */
332 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
333 	__EVENT_CONSTRAINT(code, n, 			\
334 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
335 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
336 
337 /* Check flags and event code, and set the HSW load flag */
338 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
339 	__EVENT_CONSTRAINT(code, n,			\
340 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
341 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
342 
343 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
344 	__EVENT_CONSTRAINT(code, n,			\
345 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
346 			  HWEIGHT(n), 0, \
347 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
348 
349 /* Check flags and event code/umask, and set the HSW store flag */
350 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
351 	__EVENT_CONSTRAINT(code, n, 			\
352 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
353 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
354 
355 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
356 	__EVENT_CONSTRAINT(code, n,			\
357 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
358 			  HWEIGHT(n), 0, \
359 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
360 
361 /* Check flags and event code/umask, and set the HSW load flag */
362 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
363 	__EVENT_CONSTRAINT(code, n, 			\
364 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
365 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
366 
367 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
368 	__EVENT_CONSTRAINT(code, n,			\
369 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
370 			  HWEIGHT(n), 0, \
371 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
372 
373 /* Check flags and event code/umask, and set the HSW N/A flag */
374 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
375 	__EVENT_CONSTRAINT(code, n, 			\
376 			  INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
377 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
378 
379 
380 /*
381  * We define the end marker as having a weight of -1
382  * to enable blacklisting of events using a counter bitmask
383  * of zero and thus a weight of zero.
384  * The end marker has a weight that cannot possibly be
385  * obtained from counting the bits in the bitmask.
386  */
387 #define EVENT_CONSTRAINT_END { .weight = -1 }
388 
389 /*
390  * Check for end marker with weight == -1
391  */
392 #define for_each_event_constraint(e, c)	\
393 	for ((e) = (c); (e)->weight != -1; (e)++)
394 
395 /*
396  * Extra registers for specific events.
397  *
398  * Some events need large masks and require external MSRs.
399  * Those extra MSRs end up being shared for all events on
400  * a PMU and sometimes between PMU of sibling HT threads.
401  * In either case, the kernel needs to handle conflicting
402  * accesses to those extra, shared, regs. The data structure
403  * to manage those registers is stored in cpu_hw_event.
404  */
405 struct extra_reg {
406 	unsigned int		event;
407 	unsigned int		msr;
408 	u64			config_mask;
409 	u64			valid_mask;
410 	int			idx;  /* per_xxx->regs[] reg index */
411 	bool			extra_msr_access;
412 };
413 
414 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
415 	.event = (e),			\
416 	.msr = (ms),			\
417 	.config_mask = (m),		\
418 	.valid_mask = (vm),		\
419 	.idx = EXTRA_REG_##i,		\
420 	.extra_msr_access = true,	\
421 	}
422 
423 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
424 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
425 
426 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
427 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
428 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
429 
430 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
431 	INTEL_UEVENT_EXTRA_REG(c, \
432 			       MSR_PEBS_LD_LAT_THRESHOLD, \
433 			       0xffff, \
434 			       LDLAT)
435 
436 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
437 
438 union perf_capabilities {
439 	struct {
440 		u64	lbr_format:6;
441 		u64	pebs_trap:1;
442 		u64	pebs_arch_reg:1;
443 		u64	pebs_format:4;
444 		u64	smm_freeze:1;
445 		/*
446 		 * PMU supports separate counter range for writing
447 		 * values > 32bit.
448 		 */
449 		u64	full_width_write:1;
450 	};
451 	u64	capabilities;
452 };
453 
454 struct x86_pmu_quirk {
455 	struct x86_pmu_quirk *next;
456 	void (*func)(void);
457 };
458 
459 union x86_pmu_config {
460 	struct {
461 		u64 event:8,
462 		    umask:8,
463 		    usr:1,
464 		    os:1,
465 		    edge:1,
466 		    pc:1,
467 		    interrupt:1,
468 		    __reserved1:1,
469 		    en:1,
470 		    inv:1,
471 		    cmask:8,
472 		    event2:4,
473 		    __reserved2:4,
474 		    go:1,
475 		    ho:1;
476 	} bits;
477 	u64 value;
478 };
479 
480 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
481 
482 enum {
483 	x86_lbr_exclusive_lbr,
484 	x86_lbr_exclusive_bts,
485 	x86_lbr_exclusive_pt,
486 	x86_lbr_exclusive_max,
487 };
488 
489 /*
490  * struct x86_pmu - generic x86 pmu
491  */
492 struct x86_pmu {
493 	/*
494 	 * Generic x86 PMC bits
495 	 */
496 	const char	*name;
497 	int		version;
498 	int		(*handle_irq)(struct pt_regs *);
499 	void		(*disable_all)(void);
500 	void		(*enable_all)(int added);
501 	void		(*enable)(struct perf_event *);
502 	void		(*disable)(struct perf_event *);
503 	int		(*hw_config)(struct perf_event *event);
504 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
505 	unsigned	eventsel;
506 	unsigned	perfctr;
507 	int		(*addr_offset)(int index, bool eventsel);
508 	int		(*rdpmc_index)(int index);
509 	u64		(*event_map)(int);
510 	int		max_events;
511 	int		num_counters;
512 	int		num_counters_fixed;
513 	int		cntval_bits;
514 	u64		cntval_mask;
515 	union {
516 			unsigned long events_maskl;
517 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
518 	};
519 	int		events_mask_len;
520 	int		apic;
521 	u64		max_period;
522 	struct event_constraint *
523 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
524 						 int idx,
525 						 struct perf_event *event);
526 
527 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
528 						 struct perf_event *event);
529 
530 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
531 
532 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
533 
534 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
535 
536 	struct event_constraint *event_constraints;
537 	struct x86_pmu_quirk *quirks;
538 	int		perfctr_second_write;
539 	bool		late_ack;
540 	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
541 
542 	/*
543 	 * sysfs attrs
544 	 */
545 	int		attr_rdpmc_broken;
546 	int		attr_rdpmc;
547 	struct attribute **format_attrs;
548 	struct attribute **event_attrs;
549 
550 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
551 	struct attribute **cpu_events;
552 
553 	/*
554 	 * CPU Hotplug hooks
555 	 */
556 	int		(*cpu_prepare)(int cpu);
557 	void		(*cpu_starting)(int cpu);
558 	void		(*cpu_dying)(int cpu);
559 	void		(*cpu_dead)(int cpu);
560 
561 	void		(*check_microcode)(void);
562 	void		(*sched_task)(struct perf_event_context *ctx,
563 				      bool sched_in);
564 
565 	/*
566 	 * Intel Arch Perfmon v2+
567 	 */
568 	u64			intel_ctrl;
569 	union perf_capabilities intel_cap;
570 
571 	/*
572 	 * Intel DebugStore bits
573 	 */
574 	unsigned int	bts		:1,
575 			bts_active	:1,
576 			pebs		:1,
577 			pebs_active	:1,
578 			pebs_broken	:1;
579 	int		pebs_record_size;
580 	void		(*drain_pebs)(struct pt_regs *regs);
581 	struct event_constraint *pebs_constraints;
582 	void		(*pebs_aliases)(struct perf_event *event);
583 	int 		max_pebs_events;
584 
585 	/*
586 	 * Intel LBR
587 	 */
588 	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
589 	int		lbr_nr;			   /* hardware stack size */
590 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
591 	const int	*lbr_sel_map;		   /* lbr_select mappings */
592 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
593 
594 	/*
595 	 * Intel PT/LBR/BTS are exclusive
596 	 */
597 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
598 
599 	/*
600 	 * Extra registers for events
601 	 */
602 	struct extra_reg *extra_regs;
603 	unsigned int flags;
604 
605 	/*
606 	 * Intel host/guest support (KVM)
607 	 */
608 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
609 };
610 
611 struct x86_perf_task_context {
612 	u64 lbr_from[MAX_LBR_ENTRIES];
613 	u64 lbr_to[MAX_LBR_ENTRIES];
614 	int lbr_callstack_users;
615 	int lbr_stack_state;
616 };
617 
618 #define x86_add_quirk(func_)						\
619 do {									\
620 	static struct x86_pmu_quirk __quirk __initdata = {		\
621 		.func = func_,						\
622 	};								\
623 	__quirk.next = x86_pmu.quirks;					\
624 	x86_pmu.quirks = &__quirk;					\
625 } while (0)
626 
627 /*
628  * x86_pmu flags
629  */
630 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
631 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
632 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
633 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
634 
635 #define EVENT_VAR(_id)  event_attr_##_id
636 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
637 
638 #define EVENT_ATTR(_name, _id)						\
639 static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
640 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
641 	.id		= PERF_COUNT_HW_##_id,				\
642 	.event_str	= NULL,						\
643 };
644 
645 #define EVENT_ATTR_STR(_name, v, str)					\
646 static struct perf_pmu_events_attr event_attr_##v = {			\
647 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
648 	.id		= 0,						\
649 	.event_str	= str,						\
650 };
651 
652 extern struct x86_pmu x86_pmu __read_mostly;
653 
x86_pmu_has_lbr_callstack(void)654 static inline bool x86_pmu_has_lbr_callstack(void)
655 {
656 	return  x86_pmu.lbr_sel_map &&
657 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
658 }
659 
660 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
661 
662 int x86_perf_event_set_period(struct perf_event *event);
663 
664 /*
665  * Generalized hw caching related hw_event table, filled
666  * in on a per model basis. A value of 0 means
667  * 'not supported', -1 means 'hw_event makes no sense on
668  * this CPU', any other value means the raw hw_event
669  * ID.
670  */
671 
672 #define C(x) PERF_COUNT_HW_CACHE_##x
673 
674 extern u64 __read_mostly hw_cache_event_ids
675 				[PERF_COUNT_HW_CACHE_MAX]
676 				[PERF_COUNT_HW_CACHE_OP_MAX]
677 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
678 extern u64 __read_mostly hw_cache_extra_regs
679 				[PERF_COUNT_HW_CACHE_MAX]
680 				[PERF_COUNT_HW_CACHE_OP_MAX]
681 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
682 
683 u64 x86_perf_event_update(struct perf_event *event);
684 
x86_pmu_config_addr(int index)685 static inline unsigned int x86_pmu_config_addr(int index)
686 {
687 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
688 				   x86_pmu.addr_offset(index, true) : index);
689 }
690 
x86_pmu_event_addr(int index)691 static inline unsigned int x86_pmu_event_addr(int index)
692 {
693 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
694 				  x86_pmu.addr_offset(index, false) : index);
695 }
696 
x86_pmu_rdpmc_index(int index)697 static inline int x86_pmu_rdpmc_index(int index)
698 {
699 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
700 }
701 
702 int x86_add_exclusive(unsigned int what);
703 
704 void x86_del_exclusive(unsigned int what);
705 
706 int x86_reserve_hardware(void);
707 
708 void x86_release_hardware(void);
709 
710 void hw_perf_lbr_event_destroy(struct perf_event *event);
711 
712 int x86_setup_perfctr(struct perf_event *event);
713 
714 int x86_pmu_hw_config(struct perf_event *event);
715 
716 void x86_pmu_disable_all(void);
717 
__x86_pmu_enable_event(struct hw_perf_event * hwc,u64 enable_mask)718 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
719 					  u64 enable_mask)
720 {
721 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
722 
723 	if (hwc->extra_reg.reg)
724 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
725 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
726 }
727 
728 void x86_pmu_enable_all(int added);
729 
730 int perf_assign_events(struct event_constraint **constraints, int n,
731 			int wmin, int wmax, int gpmax, int *assign);
732 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
733 
734 void x86_pmu_stop(struct perf_event *event, int flags);
735 
x86_pmu_disable_event(struct perf_event * event)736 static inline void x86_pmu_disable_event(struct perf_event *event)
737 {
738 	struct hw_perf_event *hwc = &event->hw;
739 
740 	wrmsrl(hwc->config_base, hwc->config);
741 }
742 
743 void x86_pmu_enable_event(struct perf_event *event);
744 
745 int x86_pmu_handle_irq(struct pt_regs *regs);
746 
747 extern struct event_constraint emptyconstraint;
748 
749 extern struct event_constraint unconstrained;
750 
kernel_ip(unsigned long ip)751 static inline bool kernel_ip(unsigned long ip)
752 {
753 #ifdef CONFIG_X86_32
754 	return ip > PAGE_OFFSET;
755 #else
756 	return (long)ip < 0;
757 #endif
758 }
759 
760 /*
761  * Not all PMUs provide the right context information to place the reported IP
762  * into full context. Specifically segment registers are typically not
763  * supplied.
764  *
765  * Assuming the address is a linear address (it is for IBS), we fake the CS and
766  * vm86 mode using the known zero-based code segment and 'fix up' the registers
767  * to reflect this.
768  *
769  * Intel PEBS/LBR appear to typically provide the effective address, nothing
770  * much we can do about that but pray and treat it like a linear address.
771  */
set_linear_ip(struct pt_regs * regs,unsigned long ip)772 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
773 {
774 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
775 	if (regs->flags & X86_VM_MASK)
776 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
777 	regs->ip = ip;
778 }
779 
780 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
781 ssize_t intel_event_sysfs_show(char *page, u64 config);
782 
783 #ifdef CONFIG_CPU_SUP_AMD
784 
785 int amd_pmu_init(void);
786 
787 #else /* CONFIG_CPU_SUP_AMD */
788 
amd_pmu_init(void)789 static inline int amd_pmu_init(void)
790 {
791 	return 0;
792 }
793 
794 #endif /* CONFIG_CPU_SUP_AMD */
795 
796 #ifdef CONFIG_CPU_SUP_INTEL
797 
intel_pmu_needs_lbr_smpl(struct perf_event * event)798 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
799 {
800 	/* user explicitly requested branch sampling */
801 	if (has_branch_stack(event))
802 		return true;
803 
804 	/* implicit branch sampling to correct PEBS skid */
805 	if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
806 	    x86_pmu.intel_cap.pebs_format < 2)
807 		return true;
808 
809 	return false;
810 }
811 
intel_pmu_has_bts(struct perf_event * event)812 static inline bool intel_pmu_has_bts(struct perf_event *event)
813 {
814 	if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
815 	    !event->attr.freq && event->hw.sample_period == 1)
816 		return true;
817 
818 	return false;
819 }
820 
821 int intel_pmu_save_and_restart(struct perf_event *event);
822 
823 struct event_constraint *
824 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
825 			  struct perf_event *event);
826 
827 struct intel_shared_regs *allocate_shared_regs(int cpu);
828 
829 int intel_pmu_init(void);
830 
831 void init_debug_store_on_cpu(int cpu);
832 
833 void fini_debug_store_on_cpu(int cpu);
834 
835 void release_ds_buffers(void);
836 
837 void reserve_ds_buffers(void);
838 
839 extern struct event_constraint bts_constraint;
840 
841 void intel_pmu_enable_bts(u64 config);
842 
843 void intel_pmu_disable_bts(void);
844 
845 int intel_pmu_drain_bts_buffer(void);
846 
847 extern struct event_constraint intel_core2_pebs_event_constraints[];
848 
849 extern struct event_constraint intel_atom_pebs_event_constraints[];
850 
851 extern struct event_constraint intel_slm_pebs_event_constraints[];
852 
853 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
854 
855 extern struct event_constraint intel_westmere_pebs_event_constraints[];
856 
857 extern struct event_constraint intel_snb_pebs_event_constraints[];
858 
859 extern struct event_constraint intel_ivb_pebs_event_constraints[];
860 
861 extern struct event_constraint intel_hsw_pebs_event_constraints[];
862 
863 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
864 
865 void intel_pmu_pebs_enable(struct perf_event *event);
866 
867 void intel_pmu_pebs_disable(struct perf_event *event);
868 
869 void intel_pmu_pebs_enable_all(void);
870 
871 void intel_pmu_pebs_disable_all(void);
872 
873 void intel_ds_init(void);
874 
875 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
876 
877 void intel_pmu_lbr_reset(void);
878 
879 void intel_pmu_lbr_enable(struct perf_event *event);
880 
881 void intel_pmu_lbr_disable(struct perf_event *event);
882 
883 void intel_pmu_lbr_enable_all(bool pmi);
884 
885 void intel_pmu_lbr_disable_all(void);
886 
887 void intel_pmu_lbr_read(void);
888 
889 void intel_pmu_lbr_init_core(void);
890 
891 void intel_pmu_lbr_init_nhm(void);
892 
893 void intel_pmu_lbr_init_atom(void);
894 
895 void intel_pmu_lbr_init_snb(void);
896 
897 void intel_pmu_lbr_init_hsw(void);
898 
899 int intel_pmu_setup_lbr_filter(struct perf_event *event);
900 
901 void intel_pt_interrupt(void);
902 
903 int intel_bts_interrupt(void);
904 
905 void intel_bts_enable_local(void);
906 
907 void intel_bts_disable_local(void);
908 
909 int p4_pmu_init(void);
910 
911 int p6_pmu_init(void);
912 
913 int knc_pmu_init(void);
914 
915 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
916 			  char *page);
917 
is_ht_workaround_enabled(void)918 static inline int is_ht_workaround_enabled(void)
919 {
920 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
921 }
922 #else /* CONFIG_CPU_SUP_INTEL */
923 
reserve_ds_buffers(void)924 static inline void reserve_ds_buffers(void)
925 {
926 }
927 
release_ds_buffers(void)928 static inline void release_ds_buffers(void)
929 {
930 }
931 
intel_pmu_init(void)932 static inline int intel_pmu_init(void)
933 {
934 	return 0;
935 }
936 
allocate_shared_regs(int cpu)937 static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
938 {
939 	return NULL;
940 }
941 
is_ht_workaround_enabled(void)942 static inline int is_ht_workaround_enabled(void)
943 {
944 	return 0;
945 }
946 #endif /* CONFIG_CPU_SUP_INTEL */
947