1 /*
2  * Intel(R) Processor Trace PMU driver for perf
3  * Copyright (c) 2013-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * Intel PT is specified in the Intel Architecture Instruction Set Extensions
15  * Programming Reference:
16  * http://software.intel.com/en-us/intel-isa-extensions
17  */
18 
19 #undef DEBUG
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/device.h>
26 
27 #include <asm/perf_event.h>
28 #include <asm/insn.h>
29 #include <asm/io.h>
30 
31 #include "perf_event.h"
32 #include "intel_pt.h"
33 
34 static DEFINE_PER_CPU(struct pt, pt_ctx);
35 
36 static struct pt_pmu pt_pmu;
37 
38 enum cpuid_regs {
39 	CR_EAX = 0,
40 	CR_ECX,
41 	CR_EDX,
42 	CR_EBX
43 };
44 
45 /*
46  * Capabilities of Intel PT hardware, such as number of address bits or
47  * supported output schemes, are cached and exported to userspace as "caps"
48  * attribute group of pt pmu device
49  * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
50  * relevant bits together with intel_pt traces.
51  *
52  * These are necessary for both trace decoding (payloads_lip, contains address
53  * width encoded in IP-related packets), and event configuration (bitmasks with
54  * permitted values for certain bit fields).
55  */
56 #define PT_CAP(_n, _l, _r, _m)						\
57 	[PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l,	\
58 			    .reg = _r, .mask = _m }
59 
60 static struct pt_cap_desc {
61 	const char	*name;
62 	u32		leaf;
63 	u8		reg;
64 	u32		mask;
65 } pt_caps[] = {
66 	PT_CAP(max_subleaf,		0, CR_EAX, 0xffffffff),
67 	PT_CAP(cr3_filtering,		0, CR_EBX, BIT(0)),
68 	PT_CAP(psb_cyc,			0, CR_EBX, BIT(1)),
69 	PT_CAP(mtc,			0, CR_EBX, BIT(3)),
70 	PT_CAP(topa_output,		0, CR_ECX, BIT(0)),
71 	PT_CAP(topa_multiple_entries,	0, CR_ECX, BIT(1)),
72 	PT_CAP(single_range_output,	0, CR_ECX, BIT(2)),
73 	PT_CAP(payloads_lip,		0, CR_ECX, BIT(31)),
74 	PT_CAP(mtc_periods,		1, CR_EAX, 0xffff0000),
75 	PT_CAP(cycle_thresholds,	1, CR_EBX, 0xffff),
76 	PT_CAP(psb_periods,		1, CR_EBX, 0xffff0000),
77 };
78 
pt_cap_get(enum pt_capabilities cap)79 static u32 pt_cap_get(enum pt_capabilities cap)
80 {
81 	struct pt_cap_desc *cd = &pt_caps[cap];
82 	u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
83 	unsigned int shift = __ffs(cd->mask);
84 
85 	return (c & cd->mask) >> shift;
86 }
87 
pt_cap_show(struct device * cdev,struct device_attribute * attr,char * buf)88 static ssize_t pt_cap_show(struct device *cdev,
89 			   struct device_attribute *attr,
90 			   char *buf)
91 {
92 	struct dev_ext_attribute *ea =
93 		container_of(attr, struct dev_ext_attribute, attr);
94 	enum pt_capabilities cap = (long)ea->var;
95 
96 	return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
97 }
98 
99 static struct attribute_group pt_cap_group = {
100 	.name	= "caps",
101 };
102 
103 PMU_FORMAT_ATTR(cyc,		"config:1"	);
104 PMU_FORMAT_ATTR(mtc,		"config:9"	);
105 PMU_FORMAT_ATTR(tsc,		"config:10"	);
106 PMU_FORMAT_ATTR(noretcomp,	"config:11"	);
107 PMU_FORMAT_ATTR(mtc_period,	"config:14-17"	);
108 PMU_FORMAT_ATTR(cyc_thresh,	"config:19-22"	);
109 PMU_FORMAT_ATTR(psb_period,	"config:24-27"	);
110 
111 static struct attribute *pt_formats_attr[] = {
112 	&format_attr_cyc.attr,
113 	&format_attr_mtc.attr,
114 	&format_attr_tsc.attr,
115 	&format_attr_noretcomp.attr,
116 	&format_attr_mtc_period.attr,
117 	&format_attr_cyc_thresh.attr,
118 	&format_attr_psb_period.attr,
119 	NULL,
120 };
121 
122 static struct attribute_group pt_format_group = {
123 	.name	= "format",
124 	.attrs	= pt_formats_attr,
125 };
126 
127 static const struct attribute_group *pt_attr_groups[] = {
128 	&pt_cap_group,
129 	&pt_format_group,
130 	NULL,
131 };
132 
pt_pmu_hw_init(void)133 static int __init pt_pmu_hw_init(void)
134 {
135 	struct dev_ext_attribute *de_attrs;
136 	struct attribute **attrs;
137 	size_t size;
138 	int ret;
139 	long i;
140 
141 	attrs = NULL;
142 
143 	for (i = 0; i < PT_CPUID_LEAVES; i++) {
144 		cpuid_count(20, i,
145 			    &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
146 			    &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM],
147 			    &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM],
148 			    &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
149 	}
150 
151 	ret = -ENOMEM;
152 	size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
153 	attrs = kzalloc(size, GFP_KERNEL);
154 	if (!attrs)
155 		goto fail;
156 
157 	size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
158 	de_attrs = kzalloc(size, GFP_KERNEL);
159 	if (!de_attrs)
160 		goto fail;
161 
162 	for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
163 		struct dev_ext_attribute *de_attr = de_attrs + i;
164 
165 		de_attr->attr.attr.name = pt_caps[i].name;
166 
167 		sysfs_attr_init(&de_attr->attr.attr);
168 
169 		de_attr->attr.attr.mode		= S_IRUGO;
170 		de_attr->attr.show		= pt_cap_show;
171 		de_attr->var			= (void *)i;
172 
173 		attrs[i] = &de_attr->attr.attr;
174 	}
175 
176 	pt_cap_group.attrs = attrs;
177 
178 	return 0;
179 
180 fail:
181 	kfree(attrs);
182 
183 	return ret;
184 }
185 
186 #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC	| \
187 			  RTIT_CTL_CYC_THRESH	| \
188 			  RTIT_CTL_PSB_FREQ)
189 
190 #define RTIT_CTL_MTC	(RTIT_CTL_MTC_EN	| \
191 			 RTIT_CTL_MTC_RANGE)
192 
193 #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN		| \
194 			RTIT_CTL_DISRETC	| \
195 			RTIT_CTL_CYC_PSB	| \
196 			RTIT_CTL_MTC)
197 
pt_event_valid(struct perf_event * event)198 static bool pt_event_valid(struct perf_event *event)
199 {
200 	u64 config = event->attr.config;
201 	u64 allowed, requested;
202 
203 	if ((config & PT_CONFIG_MASK) != config)
204 		return false;
205 
206 	if (config & RTIT_CTL_CYC_PSB) {
207 		if (!pt_cap_get(PT_CAP_psb_cyc))
208 			return false;
209 
210 		allowed = pt_cap_get(PT_CAP_psb_periods);
211 		requested = (config & RTIT_CTL_PSB_FREQ) >>
212 			RTIT_CTL_PSB_FREQ_OFFSET;
213 		if (requested && (!(allowed & BIT(requested))))
214 			return false;
215 
216 		allowed = pt_cap_get(PT_CAP_cycle_thresholds);
217 		requested = (config & RTIT_CTL_CYC_THRESH) >>
218 			RTIT_CTL_CYC_THRESH_OFFSET;
219 		if (requested && (!(allowed & BIT(requested))))
220 			return false;
221 	}
222 
223 	if (config & RTIT_CTL_MTC) {
224 		/*
225 		 * In the unlikely case that CPUID lists valid mtc periods,
226 		 * but not the mtc capability, drop out here.
227 		 *
228 		 * Spec says that setting mtc period bits while mtc bit in
229 		 * CPUID is 0 will #GP, so better safe than sorry.
230 		 */
231 		if (!pt_cap_get(PT_CAP_mtc))
232 			return false;
233 
234 		allowed = pt_cap_get(PT_CAP_mtc_periods);
235 		if (!allowed)
236 			return false;
237 
238 		requested = (config & RTIT_CTL_MTC_RANGE) >>
239 			RTIT_CTL_MTC_RANGE_OFFSET;
240 
241 		if (!(allowed & BIT(requested)))
242 			return false;
243 	}
244 
245 	return true;
246 }
247 
248 /*
249  * PT configuration helpers
250  * These all are cpu affine and operate on a local PT
251  */
252 
pt_config(struct perf_event * event)253 static void pt_config(struct perf_event *event)
254 {
255 	u64 reg;
256 
257 	if (!event->hw.itrace_started) {
258 		event->hw.itrace_started = 1;
259 		wrmsrl(MSR_IA32_RTIT_STATUS, 0);
260 	}
261 
262 	reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
263 
264 	if (!event->attr.exclude_kernel)
265 		reg |= RTIT_CTL_OS;
266 	if (!event->attr.exclude_user)
267 		reg |= RTIT_CTL_USR;
268 
269 	reg |= (event->attr.config & PT_CONFIG_MASK);
270 
271 	wrmsrl(MSR_IA32_RTIT_CTL, reg);
272 }
273 
pt_config_start(bool start)274 static void pt_config_start(bool start)
275 {
276 	u64 ctl;
277 
278 	rdmsrl(MSR_IA32_RTIT_CTL, ctl);
279 	if (start)
280 		ctl |= RTIT_CTL_TRACEEN;
281 	else
282 		ctl &= ~RTIT_CTL_TRACEEN;
283 	wrmsrl(MSR_IA32_RTIT_CTL, ctl);
284 
285 	/*
286 	 * A wrmsr that disables trace generation serializes other PT
287 	 * registers and causes all data packets to be written to memory,
288 	 * but a fence is required for the data to become globally visible.
289 	 *
290 	 * The below WMB, separating data store and aux_head store matches
291 	 * the consumer's RMB that separates aux_head load and data load.
292 	 */
293 	if (!start)
294 		wmb();
295 }
296 
pt_config_buffer(void * buf,unsigned int topa_idx,unsigned int output_off)297 static void pt_config_buffer(void *buf, unsigned int topa_idx,
298 			     unsigned int output_off)
299 {
300 	u64 reg;
301 
302 	wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
303 
304 	reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
305 
306 	wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
307 }
308 
309 /*
310  * Keep ToPA table-related metadata on the same page as the actual table,
311  * taking up a few words from the top
312  */
313 
314 #define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
315 
316 /**
317  * struct topa - page-sized ToPA table with metadata at the top
318  * @table:	actual ToPA table entries, as understood by PT hardware
319  * @list:	linkage to struct pt_buffer's list of tables
320  * @phys:	physical address of this page
321  * @offset:	offset of the first entry in this table in the buffer
322  * @size:	total size of all entries in this table
323  * @last:	index of the last initialized entry in this table
324  */
325 struct topa {
326 	struct topa_entry	table[TENTS_PER_PAGE];
327 	struct list_head	list;
328 	u64			phys;
329 	u64			offset;
330 	size_t			size;
331 	int			last;
332 };
333 
334 /* make -1 stand for the last table entry */
335 #define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
336 
337 /**
338  * topa_alloc() - allocate page-sized ToPA table
339  * @cpu:	CPU on which to allocate.
340  * @gfp:	Allocation flags.
341  *
342  * Return:	On success, return the pointer to ToPA table page.
343  */
topa_alloc(int cpu,gfp_t gfp)344 static struct topa *topa_alloc(int cpu, gfp_t gfp)
345 {
346 	int node = cpu_to_node(cpu);
347 	struct topa *topa;
348 	struct page *p;
349 
350 	p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
351 	if (!p)
352 		return NULL;
353 
354 	topa = page_address(p);
355 	topa->last = 0;
356 	topa->phys = page_to_phys(p);
357 
358 	/*
359 	 * In case of singe-entry ToPA, always put the self-referencing END
360 	 * link as the 2nd entry in the table
361 	 */
362 	if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
363 		TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
364 		TOPA_ENTRY(topa, 1)->end = 1;
365 	}
366 
367 	return topa;
368 }
369 
370 /**
371  * topa_free() - free a page-sized ToPA table
372  * @topa:	Table to deallocate.
373  */
topa_free(struct topa * topa)374 static void topa_free(struct topa *topa)
375 {
376 	free_page((unsigned long)topa);
377 }
378 
379 /**
380  * topa_insert_table() - insert a ToPA table into a buffer
381  * @buf:	 PT buffer that's being extended.
382  * @topa:	 New topa table to be inserted.
383  *
384  * If it's the first table in this buffer, set up buffer's pointers
385  * accordingly; otherwise, add a END=1 link entry to @topa to the current
386  * "last" table and adjust the last table pointer to @topa.
387  */
topa_insert_table(struct pt_buffer * buf,struct topa * topa)388 static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
389 {
390 	struct topa *last = buf->last;
391 
392 	list_add_tail(&topa->list, &buf->tables);
393 
394 	if (!buf->first) {
395 		buf->first = buf->last = buf->cur = topa;
396 		return;
397 	}
398 
399 	topa->offset = last->offset + last->size;
400 	buf->last = topa;
401 
402 	if (!pt_cap_get(PT_CAP_topa_multiple_entries))
403 		return;
404 
405 	BUG_ON(last->last != TENTS_PER_PAGE - 1);
406 
407 	TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
408 	TOPA_ENTRY(last, -1)->end = 1;
409 }
410 
411 /**
412  * topa_table_full() - check if a ToPA table is filled up
413  * @topa:	ToPA table.
414  */
topa_table_full(struct topa * topa)415 static bool topa_table_full(struct topa *topa)
416 {
417 	/* single-entry ToPA is a special case */
418 	if (!pt_cap_get(PT_CAP_topa_multiple_entries))
419 		return !!topa->last;
420 
421 	return topa->last == TENTS_PER_PAGE - 1;
422 }
423 
424 /**
425  * topa_insert_pages() - create a list of ToPA tables
426  * @buf:	PT buffer being initialized.
427  * @gfp:	Allocation flags.
428  *
429  * This initializes a list of ToPA tables with entries from
430  * the data_pages provided by rb_alloc_aux().
431  *
432  * Return:	0 on success or error code.
433  */
topa_insert_pages(struct pt_buffer * buf,gfp_t gfp)434 static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
435 {
436 	struct topa *topa = buf->last;
437 	int order = 0;
438 	struct page *p;
439 
440 	p = virt_to_page(buf->data_pages[buf->nr_pages]);
441 	if (PagePrivate(p))
442 		order = page_private(p);
443 
444 	if (topa_table_full(topa)) {
445 		topa = topa_alloc(buf->cpu, gfp);
446 		if (!topa)
447 			return -ENOMEM;
448 
449 		topa_insert_table(buf, topa);
450 	}
451 
452 	TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
453 	TOPA_ENTRY(topa, -1)->size = order;
454 	if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
455 		TOPA_ENTRY(topa, -1)->intr = 1;
456 		TOPA_ENTRY(topa, -1)->stop = 1;
457 	}
458 
459 	topa->last++;
460 	topa->size += sizes(order);
461 
462 	buf->nr_pages += 1ul << order;
463 
464 	return 0;
465 }
466 
467 /**
468  * pt_topa_dump() - print ToPA tables and their entries
469  * @buf:	PT buffer.
470  */
pt_topa_dump(struct pt_buffer * buf)471 static void pt_topa_dump(struct pt_buffer *buf)
472 {
473 	struct topa *topa;
474 
475 	list_for_each_entry(topa, &buf->tables, list) {
476 		int i;
477 
478 		pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
479 			 topa->phys, topa->offset, topa->size);
480 		for (i = 0; i < TENTS_PER_PAGE; i++) {
481 			pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
482 				 &topa->table[i],
483 				 (unsigned long)topa->table[i].base << TOPA_SHIFT,
484 				 sizes(topa->table[i].size),
485 				 topa->table[i].end ?  'E' : ' ',
486 				 topa->table[i].intr ? 'I' : ' ',
487 				 topa->table[i].stop ? 'S' : ' ',
488 				 *(u64 *)&topa->table[i]);
489 			if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
490 			     topa->table[i].stop) ||
491 			    topa->table[i].end)
492 				break;
493 		}
494 	}
495 }
496 
497 /**
498  * pt_buffer_advance() - advance to the next output region
499  * @buf:	PT buffer.
500  *
501  * Advance the current pointers in the buffer to the next ToPA entry.
502  */
pt_buffer_advance(struct pt_buffer * buf)503 static void pt_buffer_advance(struct pt_buffer *buf)
504 {
505 	buf->output_off = 0;
506 	buf->cur_idx++;
507 
508 	if (buf->cur_idx == buf->cur->last) {
509 		if (buf->cur == buf->last)
510 			buf->cur = buf->first;
511 		else
512 			buf->cur = list_entry(buf->cur->list.next, struct topa,
513 					      list);
514 		buf->cur_idx = 0;
515 	}
516 }
517 
518 /**
519  * pt_update_head() - calculate current offsets and sizes
520  * @pt:		Per-cpu pt context.
521  *
522  * Update buffer's current write pointer position and data size.
523  */
pt_update_head(struct pt * pt)524 static void pt_update_head(struct pt *pt)
525 {
526 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
527 	u64 topa_idx, base, old;
528 
529 	/* offset of the first region in this table from the beginning of buf */
530 	base = buf->cur->offset + buf->output_off;
531 
532 	/* offset of the current output region within this table */
533 	for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
534 		base += sizes(buf->cur->table[topa_idx].size);
535 
536 	if (buf->snapshot) {
537 		local_set(&buf->data_size, base);
538 	} else {
539 		old = (local64_xchg(&buf->head, base) &
540 		       ((buf->nr_pages << PAGE_SHIFT) - 1));
541 		if (base < old)
542 			base += buf->nr_pages << PAGE_SHIFT;
543 
544 		local_add(base - old, &buf->data_size);
545 	}
546 }
547 
548 /**
549  * pt_buffer_region() - obtain current output region's address
550  * @buf:	PT buffer.
551  */
pt_buffer_region(struct pt_buffer * buf)552 static void *pt_buffer_region(struct pt_buffer *buf)
553 {
554 	return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
555 }
556 
557 /**
558  * pt_buffer_region_size() - obtain current output region's size
559  * @buf:	PT buffer.
560  */
pt_buffer_region_size(struct pt_buffer * buf)561 static size_t pt_buffer_region_size(struct pt_buffer *buf)
562 {
563 	return sizes(buf->cur->table[buf->cur_idx].size);
564 }
565 
566 /**
567  * pt_handle_status() - take care of possible status conditions
568  * @pt:		Per-cpu pt context.
569  */
pt_handle_status(struct pt * pt)570 static void pt_handle_status(struct pt *pt)
571 {
572 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
573 	int advance = 0;
574 	u64 status;
575 
576 	rdmsrl(MSR_IA32_RTIT_STATUS, status);
577 
578 	if (status & RTIT_STATUS_ERROR) {
579 		pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
580 		pt_topa_dump(buf);
581 		status &= ~RTIT_STATUS_ERROR;
582 	}
583 
584 	if (status & RTIT_STATUS_STOPPED) {
585 		status &= ~RTIT_STATUS_STOPPED;
586 
587 		/*
588 		 * On systems that only do single-entry ToPA, hitting STOP
589 		 * means we are already losing data; need to let the decoder
590 		 * know.
591 		 */
592 		if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
593 		    buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
594 			local_inc(&buf->lost);
595 			advance++;
596 		}
597 	}
598 
599 	/*
600 	 * Also on single-entry ToPA implementations, interrupt will come
601 	 * before the output reaches its output region's boundary.
602 	 */
603 	if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
604 	    pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
605 		void *head = pt_buffer_region(buf);
606 
607 		/* everything within this margin needs to be zeroed out */
608 		memset(head + buf->output_off, 0,
609 		       pt_buffer_region_size(buf) -
610 		       buf->output_off);
611 		advance++;
612 	}
613 
614 	if (advance)
615 		pt_buffer_advance(buf);
616 
617 	wrmsrl(MSR_IA32_RTIT_STATUS, status);
618 }
619 
620 /**
621  * pt_read_offset() - translate registers into buffer pointers
622  * @buf:	PT buffer.
623  *
624  * Set buffer's output pointers from MSR values.
625  */
pt_read_offset(struct pt_buffer * buf)626 static void pt_read_offset(struct pt_buffer *buf)
627 {
628 	u64 offset, base_topa;
629 
630 	rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
631 	buf->cur = phys_to_virt(base_topa);
632 
633 	rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
634 	/* offset within current output region */
635 	buf->output_off = offset >> 32;
636 	/* index of current output region within this table */
637 	buf->cur_idx = (offset & 0xffffff80) >> 7;
638 }
639 
640 /**
641  * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
642  * @buf:	PT buffer.
643  * @pg:		Page offset in the buffer.
644  *
645  * When advancing to the next output region (ToPA entry), given a page offset
646  * into the buffer, we need to find the offset of the first page in the next
647  * region.
648  */
pt_topa_next_entry(struct pt_buffer * buf,unsigned int pg)649 static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
650 {
651 	struct topa_entry *te = buf->topa_index[pg];
652 
653 	/* one region */
654 	if (buf->first == buf->last && buf->first->last == 1)
655 		return pg;
656 
657 	do {
658 		pg++;
659 		pg &= buf->nr_pages - 1;
660 	} while (buf->topa_index[pg] == te);
661 
662 	return pg;
663 }
664 
665 /**
666  * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
667  * @buf:	PT buffer.
668  * @handle:	Current output handle.
669  *
670  * Place INT and STOP marks to prevent overwriting old data that the consumer
671  * hasn't yet collected and waking up the consumer after a certain fraction of
672  * the buffer has filled up. Only needed and sensible for non-snapshot counters.
673  *
674  * This obviously relies on buf::head to figure out buffer markers, so it has
675  * to be called after pt_buffer_reset_offsets() and before the hardware tracing
676  * is enabled.
677  */
pt_buffer_reset_markers(struct pt_buffer * buf,struct perf_output_handle * handle)678 static int pt_buffer_reset_markers(struct pt_buffer *buf,
679 				   struct perf_output_handle *handle)
680 
681 {
682 	unsigned long head = local64_read(&buf->head);
683 	unsigned long idx, npages, wakeup;
684 
685 	/* can't stop in the middle of an output region */
686 	if (buf->output_off + handle->size + 1 <
687 	    sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size))
688 		return -EINVAL;
689 
690 
691 	/* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
692 	if (!pt_cap_get(PT_CAP_topa_multiple_entries))
693 		return 0;
694 
695 	/* clear STOP and INT from current entry */
696 	buf->topa_index[buf->stop_pos]->stop = 0;
697 	buf->topa_index[buf->stop_pos]->intr = 0;
698 	buf->topa_index[buf->intr_pos]->intr = 0;
699 
700 	/* how many pages till the STOP marker */
701 	npages = handle->size >> PAGE_SHIFT;
702 
703 	/* if it's on a page boundary, fill up one more page */
704 	if (!offset_in_page(head + handle->size + 1))
705 		npages++;
706 
707 	idx = (head >> PAGE_SHIFT) + npages;
708 	idx &= buf->nr_pages - 1;
709 	buf->stop_pos = idx;
710 
711 	wakeup = handle->wakeup >> PAGE_SHIFT;
712 
713 	/* in the worst case, wake up the consumer one page before hard stop */
714 	idx = (head >> PAGE_SHIFT) + npages - 1;
715 	if (idx > wakeup)
716 		idx = wakeup;
717 
718 	idx &= buf->nr_pages - 1;
719 	buf->intr_pos = idx;
720 
721 	buf->topa_index[buf->stop_pos]->stop = 1;
722 	buf->topa_index[buf->stop_pos]->intr = 1;
723 	buf->topa_index[buf->intr_pos]->intr = 1;
724 
725 	return 0;
726 }
727 
728 /**
729  * pt_buffer_setup_topa_index() - build topa_index[] table of regions
730  * @buf:	PT buffer.
731  *
732  * topa_index[] references output regions indexed by offset into the
733  * buffer for purposes of quick reverse lookup.
734  */
pt_buffer_setup_topa_index(struct pt_buffer * buf)735 static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
736 {
737 	struct topa *cur = buf->first, *prev = buf->last;
738 	struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
739 		*te_prev = TOPA_ENTRY(prev, prev->last - 1);
740 	int pg = 0, idx = 0;
741 
742 	while (pg < buf->nr_pages) {
743 		int tidx;
744 
745 		/* pages within one topa entry */
746 		for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
747 			buf->topa_index[pg] = te_prev;
748 
749 		te_prev = te_cur;
750 
751 		if (idx == cur->last - 1) {
752 			/* advance to next topa table */
753 			idx = 0;
754 			cur = list_entry(cur->list.next, struct topa, list);
755 		} else {
756 			idx++;
757 		}
758 		te_cur = TOPA_ENTRY(cur, idx);
759 	}
760 
761 }
762 
763 /**
764  * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
765  * @buf:	PT buffer.
766  * @head:	Write pointer (aux_head) from AUX buffer.
767  *
768  * Find the ToPA table and entry corresponding to given @head and set buffer's
769  * "current" pointers accordingly. This is done after we have obtained the
770  * current aux_head position from a successful call to perf_aux_output_begin()
771  * to make sure the hardware is writing to the right place.
772  *
773  * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
774  * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
775  * which are used to determine INT and STOP markers' locations by a subsequent
776  * call to pt_buffer_reset_markers().
777  */
pt_buffer_reset_offsets(struct pt_buffer * buf,unsigned long head)778 static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
779 {
780 	int pg;
781 
782 	if (buf->snapshot)
783 		head &= (buf->nr_pages << PAGE_SHIFT) - 1;
784 
785 	pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
786 	pg = pt_topa_next_entry(buf, pg);
787 
788 	buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
789 	buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
790 			(unsigned long)buf->cur) / sizeof(struct topa_entry);
791 	buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
792 
793 	local64_set(&buf->head, head);
794 	local_set(&buf->data_size, 0);
795 }
796 
797 /**
798  * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
799  * @buf:	PT buffer.
800  */
pt_buffer_fini_topa(struct pt_buffer * buf)801 static void pt_buffer_fini_topa(struct pt_buffer *buf)
802 {
803 	struct topa *topa, *iter;
804 
805 	list_for_each_entry_safe(topa, iter, &buf->tables, list) {
806 		/*
807 		 * right now, this is in free_aux() path only, so
808 		 * no need to unlink this table from the list
809 		 */
810 		topa_free(topa);
811 	}
812 }
813 
814 /**
815  * pt_buffer_init_topa() - initialize ToPA table for pt buffer
816  * @buf:	PT buffer.
817  * @size:	Total size of all regions within this ToPA.
818  * @gfp:	Allocation flags.
819  */
pt_buffer_init_topa(struct pt_buffer * buf,unsigned long nr_pages,gfp_t gfp)820 static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
821 			       gfp_t gfp)
822 {
823 	struct topa *topa;
824 	int err;
825 
826 	topa = topa_alloc(buf->cpu, gfp);
827 	if (!topa)
828 		return -ENOMEM;
829 
830 	topa_insert_table(buf, topa);
831 
832 	while (buf->nr_pages < nr_pages) {
833 		err = topa_insert_pages(buf, gfp);
834 		if (err) {
835 			pt_buffer_fini_topa(buf);
836 			return -ENOMEM;
837 		}
838 	}
839 
840 	pt_buffer_setup_topa_index(buf);
841 
842 	/* link last table to the first one, unless we're double buffering */
843 	if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
844 		TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
845 		TOPA_ENTRY(buf->last, -1)->end = 1;
846 	}
847 
848 	pt_topa_dump(buf);
849 	return 0;
850 }
851 
852 /**
853  * pt_buffer_setup_aux() - set up topa tables for a PT buffer
854  * @cpu:	Cpu on which to allocate, -1 means current.
855  * @pages:	Array of pointers to buffer pages passed from perf core.
856  * @nr_pages:	Number of pages in the buffer.
857  * @snapshot:	If this is a snapshot/overwrite counter.
858  *
859  * This is a pmu::setup_aux callback that sets up ToPA tables and all the
860  * bookkeeping for an AUX buffer.
861  *
862  * Return:	Our private PT buffer structure.
863  */
864 static void *
pt_buffer_setup_aux(int cpu,void ** pages,int nr_pages,bool snapshot)865 pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
866 {
867 	struct pt_buffer *buf;
868 	int node, ret;
869 
870 	if (!nr_pages)
871 		return NULL;
872 
873 	if (cpu == -1)
874 		cpu = raw_smp_processor_id();
875 	node = cpu_to_node(cpu);
876 
877 	buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
878 			   GFP_KERNEL, node);
879 	if (!buf)
880 		return NULL;
881 
882 	buf->cpu = cpu;
883 	buf->snapshot = snapshot;
884 	buf->data_pages = pages;
885 
886 	INIT_LIST_HEAD(&buf->tables);
887 
888 	ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
889 	if (ret) {
890 		kfree(buf);
891 		return NULL;
892 	}
893 
894 	return buf;
895 }
896 
897 /**
898  * pt_buffer_free_aux() - perf AUX deallocation path callback
899  * @data:	PT buffer.
900  */
pt_buffer_free_aux(void * data)901 static void pt_buffer_free_aux(void *data)
902 {
903 	struct pt_buffer *buf = data;
904 
905 	pt_buffer_fini_topa(buf);
906 	kfree(buf);
907 }
908 
909 /**
910  * pt_buffer_is_full() - check if the buffer is full
911  * @buf:	PT buffer.
912  * @pt:		Per-cpu pt handle.
913  *
914  * If the user hasn't read data from the output region that aux_head
915  * points to, the buffer is considered full: the user needs to read at
916  * least this region and update aux_tail to point past it.
917  */
pt_buffer_is_full(struct pt_buffer * buf,struct pt * pt)918 static bool pt_buffer_is_full(struct pt_buffer *buf, struct pt *pt)
919 {
920 	if (buf->snapshot)
921 		return false;
922 
923 	if (local_read(&buf->data_size) >= pt->handle.size)
924 		return true;
925 
926 	return false;
927 }
928 
929 /**
930  * intel_pt_interrupt() - PT PMI handler
931  */
intel_pt_interrupt(void)932 void intel_pt_interrupt(void)
933 {
934 	struct pt *pt = this_cpu_ptr(&pt_ctx);
935 	struct pt_buffer *buf;
936 	struct perf_event *event = pt->handle.event;
937 
938 	/*
939 	 * There may be a dangling PT bit in the interrupt status register
940 	 * after PT has been disabled by pt_event_stop(). Make sure we don't
941 	 * do anything (particularly, re-enable) for this event here.
942 	 */
943 	if (!ACCESS_ONCE(pt->handle_nmi))
944 		return;
945 
946 	pt_config_start(false);
947 
948 	if (!event)
949 		return;
950 
951 	buf = perf_get_aux(&pt->handle);
952 	if (!buf)
953 		return;
954 
955 	pt_read_offset(buf);
956 
957 	pt_handle_status(pt);
958 
959 	pt_update_head(pt);
960 
961 	perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
962 			    local_xchg(&buf->lost, 0));
963 
964 	if (!event->hw.state) {
965 		int ret;
966 
967 		buf = perf_aux_output_begin(&pt->handle, event);
968 		if (!buf) {
969 			event->hw.state = PERF_HES_STOPPED;
970 			return;
971 		}
972 
973 		pt_buffer_reset_offsets(buf, pt->handle.head);
974 		/* snapshot counters don't use PMI, so it's safe */
975 		ret = pt_buffer_reset_markers(buf, &pt->handle);
976 		if (ret) {
977 			perf_aux_output_end(&pt->handle, 0, true);
978 			return;
979 		}
980 
981 		pt_config_buffer(buf->cur->table, buf->cur_idx,
982 				 buf->output_off);
983 		pt_config(event);
984 	}
985 }
986 
987 /*
988  * PMU callbacks
989  */
990 
pt_event_start(struct perf_event * event,int mode)991 static void pt_event_start(struct perf_event *event, int mode)
992 {
993 	struct pt *pt = this_cpu_ptr(&pt_ctx);
994 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
995 
996 	if (!buf || pt_buffer_is_full(buf, pt)) {
997 		event->hw.state = PERF_HES_STOPPED;
998 		return;
999 	}
1000 
1001 	ACCESS_ONCE(pt->handle_nmi) = 1;
1002 	event->hw.state = 0;
1003 
1004 	pt_config_buffer(buf->cur->table, buf->cur_idx,
1005 			 buf->output_off);
1006 	pt_config(event);
1007 }
1008 
pt_event_stop(struct perf_event * event,int mode)1009 static void pt_event_stop(struct perf_event *event, int mode)
1010 {
1011 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1012 
1013 	/*
1014 	 * Protect against the PMI racing with disabling wrmsr,
1015 	 * see comment in intel_pt_interrupt().
1016 	 */
1017 	ACCESS_ONCE(pt->handle_nmi) = 0;
1018 	pt_config_start(false);
1019 
1020 	if (event->hw.state == PERF_HES_STOPPED)
1021 		return;
1022 
1023 	event->hw.state = PERF_HES_STOPPED;
1024 
1025 	if (mode & PERF_EF_UPDATE) {
1026 		struct pt_buffer *buf = perf_get_aux(&pt->handle);
1027 
1028 		if (!buf)
1029 			return;
1030 
1031 		if (WARN_ON_ONCE(pt->handle.event != event))
1032 			return;
1033 
1034 		pt_read_offset(buf);
1035 
1036 		pt_handle_status(pt);
1037 
1038 		pt_update_head(pt);
1039 	}
1040 }
1041 
pt_event_del(struct perf_event * event,int mode)1042 static void pt_event_del(struct perf_event *event, int mode)
1043 {
1044 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1045 	struct pt_buffer *buf;
1046 
1047 	pt_event_stop(event, PERF_EF_UPDATE);
1048 
1049 	buf = perf_get_aux(&pt->handle);
1050 
1051 	if (buf) {
1052 		if (buf->snapshot)
1053 			pt->handle.head =
1054 				local_xchg(&buf->data_size,
1055 					   buf->nr_pages << PAGE_SHIFT);
1056 		perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
1057 				    local_xchg(&buf->lost, 0));
1058 	}
1059 }
1060 
pt_event_add(struct perf_event * event,int mode)1061 static int pt_event_add(struct perf_event *event, int mode)
1062 {
1063 	struct pt_buffer *buf;
1064 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1065 	struct hw_perf_event *hwc = &event->hw;
1066 	int ret = -EBUSY;
1067 
1068 	if (pt->handle.event)
1069 		goto fail;
1070 
1071 	buf = perf_aux_output_begin(&pt->handle, event);
1072 	ret = -EINVAL;
1073 	if (!buf)
1074 		goto fail_stop;
1075 
1076 	pt_buffer_reset_offsets(buf, pt->handle.head);
1077 	if (!buf->snapshot) {
1078 		ret = pt_buffer_reset_markers(buf, &pt->handle);
1079 		if (ret)
1080 			goto fail_end_stop;
1081 	}
1082 
1083 	if (mode & PERF_EF_START) {
1084 		pt_event_start(event, 0);
1085 		ret = -EBUSY;
1086 		if (hwc->state == PERF_HES_STOPPED)
1087 			goto fail_end_stop;
1088 	} else {
1089 		hwc->state = PERF_HES_STOPPED;
1090 	}
1091 
1092 	return 0;
1093 
1094 fail_end_stop:
1095 	perf_aux_output_end(&pt->handle, 0, true);
1096 fail_stop:
1097 	hwc->state = PERF_HES_STOPPED;
1098 fail:
1099 	return ret;
1100 }
1101 
pt_event_read(struct perf_event * event)1102 static void pt_event_read(struct perf_event *event)
1103 {
1104 }
1105 
pt_event_destroy(struct perf_event * event)1106 static void pt_event_destroy(struct perf_event *event)
1107 {
1108 	x86_del_exclusive(x86_lbr_exclusive_pt);
1109 }
1110 
pt_event_init(struct perf_event * event)1111 static int pt_event_init(struct perf_event *event)
1112 {
1113 	if (event->attr.type != pt_pmu.pmu.type)
1114 		return -ENOENT;
1115 
1116 	if (!pt_event_valid(event))
1117 		return -EINVAL;
1118 
1119 	if (x86_add_exclusive(x86_lbr_exclusive_pt))
1120 		return -EBUSY;
1121 
1122 	event->destroy = pt_event_destroy;
1123 
1124 	return 0;
1125 }
1126 
pt_init(void)1127 static __init int pt_init(void)
1128 {
1129 	int ret, cpu, prior_warn = 0;
1130 
1131 	BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1132 
1133 	if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
1134 		return -ENODEV;
1135 
1136 	get_online_cpus();
1137 	for_each_online_cpu(cpu) {
1138 		u64 ctl;
1139 
1140 		ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1141 		if (!ret && (ctl & RTIT_CTL_TRACEEN))
1142 			prior_warn++;
1143 	}
1144 	put_online_cpus();
1145 
1146 	if (prior_warn) {
1147 		x86_add_exclusive(x86_lbr_exclusive_pt);
1148 		pr_warn("PT is enabled at boot time, doing nothing\n");
1149 
1150 		return -EBUSY;
1151 	}
1152 
1153 	ret = pt_pmu_hw_init();
1154 	if (ret)
1155 		return ret;
1156 
1157 	if (!pt_cap_get(PT_CAP_topa_output)) {
1158 		pr_warn("ToPA output is not supported on this CPU\n");
1159 		return -ENODEV;
1160 	}
1161 
1162 	if (!pt_cap_get(PT_CAP_topa_multiple_entries))
1163 		pt_pmu.pmu.capabilities =
1164 			PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
1165 
1166 	pt_pmu.pmu.capabilities	|= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1167 	pt_pmu.pmu.attr_groups	= pt_attr_groups;
1168 	pt_pmu.pmu.task_ctx_nr	= perf_sw_context;
1169 	pt_pmu.pmu.event_init	= pt_event_init;
1170 	pt_pmu.pmu.add		= pt_event_add;
1171 	pt_pmu.pmu.del		= pt_event_del;
1172 	pt_pmu.pmu.start	= pt_event_start;
1173 	pt_pmu.pmu.stop		= pt_event_stop;
1174 	pt_pmu.pmu.read		= pt_event_read;
1175 	pt_pmu.pmu.setup_aux	= pt_buffer_setup_aux;
1176 	pt_pmu.pmu.free_aux	= pt_buffer_free_aux;
1177 	ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1178 
1179 	return ret;
1180 }
1181 arch_initcall(pt_init);
1182