x86_pmu           315 arch/x86/events/amd/core.c 	if (!(x86_pmu.flags & PMU_FL_PAIR))
x86_pmu           389 arch/x86/events/amd/core.c 	for (i = 0; i < x86_pmu.num_counters; i++) {
x86_pmu           456 arch/x86/events/amd/core.c 	for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
x86_pmu           499 arch/x86/events/amd/core.c 	for (i = 0; i < x86_pmu.num_counters; i++) {
x86_pmu           512 arch/x86/events/amd/core.c 	if (!x86_pmu.amd_nb_constraints)
x86_pmu           531 arch/x86/events/amd/core.c 	if (!x86_pmu.amd_nb_constraints)
x86_pmu           557 arch/x86/events/amd/core.c 	if (!x86_pmu.amd_nb_constraints)
x86_pmu           593 arch/x86/events/amd/core.c 		if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
x86_pmu           622 arch/x86/events/amd/core.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu           901 arch/x86/events/amd/core.c static __initconst const struct x86_pmu amd_pmu = {
x86_pmu           950 arch/x86/events/amd/core.c 	x86_pmu.eventsel	= MSR_F15H_PERF_CTL;
x86_pmu           951 arch/x86/events/amd/core.c 	x86_pmu.perfctr		= MSR_F15H_PERF_CTR;
x86_pmu           952 arch/x86/events/amd/core.c 	x86_pmu.num_counters	= AMD64_NUM_COUNTERS_CORE;
x86_pmu           957 arch/x86/events/amd/core.c 	x86_pmu.amd_nb_constraints = 0;
x86_pmu           961 arch/x86/events/amd/core.c 		x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
x86_pmu           971 arch/x86/events/amd/core.c 		for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
x86_pmu           976 arch/x86/events/amd/core.c 				    x86_pmu.num_counters / 2, 0,
x86_pmu           979 arch/x86/events/amd/core.c 		x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
x86_pmu           980 arch/x86/events/amd/core.c 		x86_pmu.flags |= PMU_FL_PAIR;
x86_pmu           995 arch/x86/events/amd/core.c 	x86_pmu = amd_pmu;
x86_pmu          1006 arch/x86/events/amd/core.c 		x86_pmu.amd_nb_constraints = 0;
x86_pmu            46 arch/x86/events/core.c struct x86_pmu x86_pmu __read_mostly;
x86_pmu            71 arch/x86/events/core.c 	int shift = 64 - x86_pmu.cntval_bits;
x86_pmu           121 arch/x86/events/core.c 	if (!x86_pmu.extra_regs)
x86_pmu           124 arch/x86/events/core.c 	for (er = x86_pmu.extra_regs; er->msr; er++) {
x86_pmu           151 arch/x86/events/core.c 	for (i = 0; i < x86_pmu.num_counters; i++) {
x86_pmu           156 arch/x86/events/core.c 	for (i = 0; i < x86_pmu.num_counters; i++) {
x86_pmu           167 arch/x86/events/core.c 	i = x86_pmu.num_counters;
x86_pmu           180 arch/x86/events/core.c 	for (i = 0; i < x86_pmu.num_counters; i++) {
x86_pmu           204 arch/x86/events/core.c 	for (i = 0; i < x86_pmu.num_counters; i++) {
x86_pmu           218 arch/x86/events/core.c 	if (x86_pmu.num_counters_fixed) {
x86_pmu           223 arch/x86/events/core.c 		for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
x86_pmu           296 arch/x86/events/core.c 	return x86_pmu.handle_irq != NULL;
x86_pmu           377 arch/x86/events/core.c 	if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
x86_pmu           380 arch/x86/events/core.c 	if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
x86_pmu           382 arch/x86/events/core.c 		for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
x86_pmu           383 arch/x86/events/core.c 			if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
x86_pmu           386 arch/x86/events/core.c 		atomic_inc(&x86_pmu.lbr_exclusive[what]);
x86_pmu           406 arch/x86/events/core.c 	if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
x86_pmu           409 arch/x86/events/core.c 	atomic_dec(&x86_pmu.lbr_exclusive[what]);
x86_pmu           419 arch/x86/events/core.c 		hwc->sample_period = x86_pmu.max_period;
x86_pmu           430 arch/x86/events/core.c 	if (attr->config >= x86_pmu.max_events)
x86_pmu           433 arch/x86/events/core.c 	attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
x86_pmu           438 arch/x86/events/core.c 	config = x86_pmu.event_map(attr->config);
x86_pmu           486 arch/x86/events/core.c 	if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
x86_pmu           490 arch/x86/events/core.c 		if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
x86_pmu           493 arch/x86/events/core.c 		if (x86_pmu.pebs_prec_dist)
x86_pmu           515 arch/x86/events/core.c 	if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
x86_pmu           562 arch/x86/events/core.c 	if (event->attr.sample_period && x86_pmu.limit_period) {
x86_pmu           563 arch/x86/events/core.c 		if (x86_pmu.limit_period(event, event->attr.sample_period) >
x86_pmu           611 arch/x86/events/core.c 	return x86_pmu.hw_config(event);
x86_pmu           619 arch/x86/events/core.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu           659 arch/x86/events/core.c 	x86_pmu.disable_all();
x86_pmu           667 arch/x86/events/core.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu           892 arch/x86/events/core.c 	if (x86_pmu.start_scheduling)
x86_pmu           893 arch/x86/events/core.c 		x86_pmu.start_scheduling(cpuc);
x86_pmu           910 arch/x86/events/core.c 			c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
x86_pmu           944 arch/x86/events/core.c 		int gpmax = x86_pmu.num_counters;
x86_pmu           977 arch/x86/events/core.c 			if (x86_pmu.commit_scheduling)
x86_pmu           978 arch/x86/events/core.c 				x86_pmu.commit_scheduling(cpuc, i, assign[i]);
x86_pmu           987 arch/x86/events/core.c 			if (x86_pmu.put_event_constraints)
x86_pmu           988 arch/x86/events/core.c 				x86_pmu.put_event_constraints(cpuc, e);
x86_pmu           994 arch/x86/events/core.c 	if (x86_pmu.stop_scheduling)
x86_pmu           995 arch/x86/events/core.c 		x86_pmu.stop_scheduling(cpuc);
x86_pmu          1009 arch/x86/events/core.c 	max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
x86_pmu          1182 arch/x86/events/core.c 	x86_pmu.enable_all(added);
x86_pmu          1223 arch/x86/events/core.c 	if (left > x86_pmu.max_period)
x86_pmu          1224 arch/x86/events/core.c 		left = x86_pmu.max_period;
x86_pmu          1226 arch/x86/events/core.c 	if (x86_pmu.limit_period)
x86_pmu          1227 arch/x86/events/core.c 		left = x86_pmu.limit_period(event, left);
x86_pmu          1237 arch/x86/events/core.c 	wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
x86_pmu          1244 arch/x86/events/core.c 	if (x86_pmu.perfctr_second_write) {
x86_pmu          1246 arch/x86/events/core.c 			(u64)(-left) & x86_pmu.cntval_mask);
x86_pmu          1296 arch/x86/events/core.c 	ret = x86_pmu.schedule_events(cpuc, n, assign);
x86_pmu          1314 arch/x86/events/core.c 	if (x86_pmu.add) {
x86_pmu          1319 arch/x86/events/core.c 		x86_pmu.add(event);
x86_pmu          1348 arch/x86/events/core.c 	x86_pmu.enable(event);
x86_pmu          1360 arch/x86/events/core.c 	if (!x86_pmu.num_counters)
x86_pmu          1368 arch/x86/events/core.c 	if (x86_pmu.version >= 2) {
x86_pmu          1379 arch/x86/events/core.c 		if (x86_pmu.pebs_constraints) {
x86_pmu          1383 arch/x86/events/core.c 		if (x86_pmu.lbr_nr) {
x86_pmu          1390 arch/x86/events/core.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu          1403 arch/x86/events/core.c 	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
x86_pmu          1418 arch/x86/events/core.c 		x86_pmu.disable(event);
x86_pmu          1468 arch/x86/events/core.c 	if (x86_pmu.put_event_constraints)
x86_pmu          1469 arch/x86/events/core.c 		x86_pmu.put_event_constraints(cpuc, event);
x86_pmu          1482 arch/x86/events/core.c 	if (x86_pmu.del) {
x86_pmu          1487 arch/x86/events/core.c 		x86_pmu.del(event);
x86_pmu          1511 arch/x86/events/core.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu          1518 arch/x86/events/core.c 		if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
x86_pmu          1542 arch/x86/events/core.c 	if (!x86_pmu.apic || !x86_pmu_initialized())
x86_pmu          1566 arch/x86/events/core.c 	ret = x86_pmu.handle_irq(regs);
x86_pmu          1585 arch/x86/events/core.c 	if (x86_pmu.cpu_prepare)
x86_pmu          1586 arch/x86/events/core.c 		return x86_pmu.cpu_prepare(cpu);
x86_pmu          1592 arch/x86/events/core.c 	if (x86_pmu.cpu_dead)
x86_pmu          1593 arch/x86/events/core.c 		x86_pmu.cpu_dead(cpu);
x86_pmu          1611 arch/x86/events/core.c 	if (x86_pmu.cpu_starting)
x86_pmu          1612 arch/x86/events/core.c 		x86_pmu.cpu_starting(cpu);
x86_pmu          1618 arch/x86/events/core.c 	if (x86_pmu.cpu_dying)
x86_pmu          1619 arch/x86/events/core.c 		x86_pmu.cpu_dying(cpu);
x86_pmu          1628 arch/x86/events/core.c 	x86_pmu.apic = 0;
x86_pmu          1653 arch/x86/events/core.c 	if (pmu_attr->id < x86_pmu.max_events)
x86_pmu          1654 arch/x86/events/core.c 		config = x86_pmu.event_map(pmu_attr->id);
x86_pmu          1660 arch/x86/events/core.c 	return x86_pmu.events_sysfs_show(page, config);
x86_pmu          1723 arch/x86/events/core.c 	if (idx >= x86_pmu.max_events)
x86_pmu          1728 arch/x86/events/core.c 	return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
x86_pmu          1795 arch/x86/events/core.c 		x86_pmu.name = "HYGON";
x86_pmu          1811 arch/x86/events/core.c 	pr_cont("%s PMU driver.\n", x86_pmu.name);
x86_pmu          1813 arch/x86/events/core.c 	x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu          1815 arch/x86/events/core.c 	for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
x86_pmu          1818 arch/x86/events/core.c 	if (!x86_pmu.intel_ctrl)
x86_pmu          1819 arch/x86/events/core.c 		x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
x86_pmu          1825 arch/x86/events/core.c 		__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
x86_pmu          1826 arch/x86/events/core.c 				   0, x86_pmu.num_counters, 0, 0);
x86_pmu          1828 arch/x86/events/core.c 	x86_pmu_format_group.attrs = x86_pmu.format_attrs;
x86_pmu          1830 arch/x86/events/core.c 	if (!x86_pmu.events_sysfs_show)
x86_pmu          1833 arch/x86/events/core.c 	pmu.attr_update = x86_pmu.attr_update;
x86_pmu          1835 arch/x86/events/core.c 	pr_info("... version:                %d\n",     x86_pmu.version);
x86_pmu          1836 arch/x86/events/core.c 	pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
x86_pmu          1837 arch/x86/events/core.c 	pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
x86_pmu          1838 arch/x86/events/core.c 	pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
x86_pmu          1839 arch/x86/events/core.c 	pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
x86_pmu          1840 arch/x86/events/core.c 	pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
x86_pmu          1841 arch/x86/events/core.c 	pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
x86_pmu          1881 arch/x86/events/core.c 	if (x86_pmu.read)
x86_pmu          1882 arch/x86/events/core.c 		return x86_pmu.read(event);
x86_pmu          1960 arch/x86/events/core.c 	ret = x86_pmu.schedule_events(cpuc, n, assign);
x86_pmu          2020 arch/x86/events/core.c 	c = x86_pmu.get_event_constraints(fake_cpuc, 0, event);
x86_pmu          2025 arch/x86/events/core.c 	if (x86_pmu.put_event_constraints)
x86_pmu          2026 arch/x86/events/core.c 		x86_pmu.put_event_constraints(fake_cpuc, event);
x86_pmu          2069 arch/x86/events/core.c 	ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
x86_pmu          2113 arch/x86/events/core.c 	if (READ_ONCE(x86_pmu.attr_rdpmc) &&
x86_pmu          2163 arch/x86/events/core.c 	if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
x86_pmu          2175 arch/x86/events/core.c 	return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
x86_pmu          2192 arch/x86/events/core.c 	if (x86_pmu.attr_rdpmc_broken)
x86_pmu          2195 arch/x86/events/core.c 	if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
x86_pmu          2208 arch/x86/events/core.c 	x86_pmu.attr_rdpmc = val;
x86_pmu          2253 arch/x86/events/core.c 	if (x86_pmu.sched_task)
x86_pmu          2254 arch/x86/events/core.c 		x86_pmu.sched_task(ctx, sched_in);
x86_pmu          2259 arch/x86/events/core.c 	if (x86_pmu.check_microcode)
x86_pmu          2260 arch/x86/events/core.c 		x86_pmu.check_microcode();
x86_pmu          2265 arch/x86/events/core.c 	if (x86_pmu.check_period && x86_pmu.check_period(event, value))
x86_pmu          2268 arch/x86/events/core.c 	if (value && x86_pmu.limit_period) {
x86_pmu          2269 arch/x86/events/core.c 		if (x86_pmu.limit_period(event, value) > value)
x86_pmu          2281 arch/x86/events/core.c 	if (x86_pmu.aux_output_match)
x86_pmu          2282 arch/x86/events/core.c 		return x86_pmu.aux_output_match(event);
x86_pmu          2326 arch/x86/events/core.c 	userpg->pmc_width = x86_pmu.cntval_bits;
x86_pmu          2594 arch/x86/events/core.c 	cap->version		= x86_pmu.version;
x86_pmu          2595 arch/x86/events/core.c 	cap->num_counters_gp	= x86_pmu.num_counters;
x86_pmu          2596 arch/x86/events/core.c 	cap->num_counters_fixed	= x86_pmu.num_counters_fixed;
x86_pmu          2597 arch/x86/events/core.c 	cap->bit_width_gp	= x86_pmu.cntval_bits;
x86_pmu          2598 arch/x86/events/core.c 	cap->bit_width_fixed	= x86_pmu.cntval_bits;
x86_pmu          2599 arch/x86/events/core.c 	cap->events_mask	= (unsigned int)x86_pmu.events_maskl;
x86_pmu          2600 arch/x86/events/core.c 	cap->events_mask_len	= x86_pmu.events_mask_len;
x86_pmu           582 arch/x86/events/intel/bts.c 	if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
x86_pmu          1974 arch/x86/events/intel/core.c 			x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
x86_pmu          2214 arch/x86/events/intel/core.c 	if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
x86_pmu          2220 arch/x86/events/intel/core.c 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
x86_pmu          2298 arch/x86/events/intel/core.c 	if (!x86_pmu.num_counters)
x86_pmu          2305 arch/x86/events/intel/core.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu          2309 arch/x86/events/intel/core.c 	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
x86_pmu          2316 arch/x86/events/intel/core.c 	if (x86_pmu.version >= 2) {
x86_pmu          2322 arch/x86/events/intel/core.c 	if (x86_pmu.lbr_nr) {
x86_pmu          2368 arch/x86/events/intel/core.c 	if (x86_pmu.flags & PMU_FL_PEBS_ALL)
x86_pmu          2378 arch/x86/events/intel/core.c 		x86_pmu.drain_pebs(regs);
x86_pmu          2379 arch/x86/events/intel/core.c 		status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
x86_pmu          2536 arch/x86/events/intel/core.c 	if (!x86_pmu.late_ack)
x86_pmu          2584 arch/x86/events/intel/core.c 	if (x86_pmu.late_ack)
x86_pmu          2602 arch/x86/events/intel/core.c 	if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
x86_pmu          2611 arch/x86/events/intel/core.c 	if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
x86_pmu          2623 arch/x86/events/intel/core.c 		event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
x86_pmu          2627 arch/x86/events/intel/core.c 		event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
x86_pmu          2770 arch/x86/events/intel/core.c 	if (x86_pmu.event_constraints) {
x86_pmu          2771 arch/x86/events/intel/core.c 		for_each_event_constraint(c, x86_pmu.event_constraints) {
x86_pmu          3212 arch/x86/events/intel/core.c 	unsigned long flags = x86_pmu.large_pebs_flags;
x86_pmu          3229 arch/x86/events/intel/core.c 		if (!x86_pmu.bts_active)
x86_pmu          3278 arch/x86/events/intel/core.c 		if (x86_pmu.pebs_aliases)
x86_pmu          3279 arch/x86/events/intel/core.c 			x86_pmu.pebs_aliases(event);
x86_pmu          3315 arch/x86/events/intel/core.c 	if (x86_pmu.version < 3)
x86_pmu          3328 arch/x86/events/intel/core.c 	if (x86_pmu.guest_get_msrs)
x86_pmu          3329 arch/x86/events/intel/core.c 		return x86_pmu.guest_get_msrs(nr);
x86_pmu          3341 arch/x86/events/intel/core.c 	arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
x86_pmu          3342 arch/x86/events/intel/core.c 	arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
x86_pmu          3343 arch/x86/events/intel/core.c 	if (x86_pmu.flags & PMU_FL_PEBS_ALL)
x86_pmu          3349 arch/x86/events/intel/core.c 	if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
x86_pmu          3373 arch/x86/events/intel/core.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
x86_pmu          3391 arch/x86/events/intel/core.c 	*nr = x86_pmu.num_counters;
x86_pmu          3406 arch/x86/events/intel/core.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu          3650 arch/x86/events/intel/core.c 	cpuc->pebs_record_size = x86_pmu.pebs_record_size;
x86_pmu          3652 arch/x86/events/intel/core.c 	if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
x86_pmu          3658 arch/x86/events/intel/core.c 	if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
x86_pmu          3666 arch/x86/events/intel/core.c 	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
x86_pmu          3720 arch/x86/events/intel/core.c 	if (x86_pmu.flags & PMU_FL_TFA) {
x86_pmu          3726 arch/x86/events/intel/core.c 	if (x86_pmu.version > 1)
x86_pmu          3727 arch/x86/events/intel/core.c 		flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
x86_pmu          3729 arch/x86/events/intel/core.c 	if (x86_pmu.counter_freezing)
x86_pmu          3735 arch/x86/events/intel/core.c 	if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
x86_pmu          3750 arch/x86/events/intel/core.c 	if (x86_pmu.lbr_sel_map)
x86_pmu          3753 arch/x86/events/intel/core.c 	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
x86_pmu          3792 arch/x86/events/intel/core.c 	if (x86_pmu.counter_freezing)
x86_pmu          3829 arch/x86/events/intel/core.c 	if (!x86_pmu.intel_cap.pebs_output_pt_available)
x86_pmu          3876 arch/x86/events/intel/core.c static __initconst const struct x86_pmu core_pmu = {
x86_pmu          3919 arch/x86/events/intel/core.c static __initconst const struct x86_pmu intel_pmu = {
x86_pmu          3985 arch/x86/events/intel/core.c 	x86_pmu.pebs = 0;
x86_pmu          3986 arch/x86/events/intel/core.c 	x86_pmu.pebs_constraints = NULL;
x86_pmu          4020 arch/x86/events/intel/core.c 	x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
x86_pmu          4025 arch/x86/events/intel/core.c 	WARN_ON_ONCE(x86_pmu.check_microcode);
x86_pmu          4026 arch/x86/events/intel/core.c 	x86_pmu.check_microcode = intel_check_pebs_isolation;
x86_pmu          4044 arch/x86/events/intel/core.c 	if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
x86_pmu          4050 arch/x86/events/intel/core.c 	if (x86_pmu.pebs_broken) {
x86_pmu          4052 arch/x86/events/intel/core.c 		x86_pmu.pebs_broken = 0;
x86_pmu          4055 arch/x86/events/intel/core.c 		x86_pmu.pebs_broken = 1;
x86_pmu          4061 arch/x86/events/intel/core.c 	unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
x86_pmu          4063 arch/x86/events/intel/core.c 	return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
x86_pmu          4121 arch/x86/events/intel/core.c 	x86_pmu.check_microcode = intel_snb_check_microcode;
x86_pmu          4142 arch/x86/events/intel/core.c 	for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
x86_pmu          4153 arch/x86/events/intel/core.c 	ebx.full = x86_pmu.events_maskl;
x86_pmu          4163 arch/x86/events/intel/core.c 		x86_pmu.events_maskl = ebx.full;
x86_pmu          4196 arch/x86/events/intel/core.c 		x86_pmu.counter_freezing = false;
x86_pmu          4197 arch/x86/events/intel/core.c 		x86_pmu.handle_irq = intel_pmu_handle_irq;
x86_pmu          4215 arch/x86/events/intel/core.c 	x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
x86_pmu          4217 arch/x86/events/intel/core.c 	x86_pmu.start_scheduling = intel_start_scheduling;
x86_pmu          4218 arch/x86/events/intel/core.c 	x86_pmu.commit_scheduling = intel_commit_scheduling;
x86_pmu          4219 arch/x86/events/intel/core.c 	x86_pmu.stop_scheduling = intel_stop_scheduling;
x86_pmu          4305 arch/x86/events/intel/core.c 	return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
x86_pmu          4326 arch/x86/events/intel/core.c 	if (x86_pmu.attr_freeze_on_smi == val)
x86_pmu          4329 arch/x86/events/intel/core.c 	x86_pmu.attr_freeze_on_smi = val;
x86_pmu          4390 arch/x86/events/intel/core.c 	return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
x86_pmu          4435 arch/x86/events/intel/core.c 	return x86_pmu.pebs ? attr->mode : 0;
x86_pmu          4441 arch/x86/events/intel/core.c 	return x86_pmu.lbr_nr ? attr->mode : 0;
x86_pmu          4447 arch/x86/events/intel/core.c 	return x86_pmu.version >= 2 ? attr->mode : 0;
x86_pmu          4454 arch/x86/events/intel/core.c 		return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
x86_pmu          4552 arch/x86/events/intel/core.c 		x86_pmu = core_pmu;
x86_pmu          4554 arch/x86/events/intel/core.c 		x86_pmu = intel_pmu;
x86_pmu          4556 arch/x86/events/intel/core.c 	x86_pmu.version			= version;
x86_pmu          4557 arch/x86/events/intel/core.c 	x86_pmu.num_counters		= eax.split.num_counters;
x86_pmu          4558 arch/x86/events/intel/core.c 	x86_pmu.cntval_bits		= eax.split.bit_width;
x86_pmu          4559 arch/x86/events/intel/core.c 	x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1;
x86_pmu          4561 arch/x86/events/intel/core.c 	x86_pmu.events_maskl		= ebx.full;
x86_pmu          4562 arch/x86/events/intel/core.c 	x86_pmu.events_mask_len		= eax.split.mask_length;
x86_pmu          4564 arch/x86/events/intel/core.c 	x86_pmu.max_pebs_events		= min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
x86_pmu          4573 arch/x86/events/intel/core.c 		x86_pmu.num_counters_fixed =
x86_pmu          4578 arch/x86/events/intel/core.c 		x86_pmu.counter_freezing = !disable_counter_freezing;
x86_pmu          4584 arch/x86/events/intel/core.c 		x86_pmu.intel_cap.capabilities = capabilities;
x86_pmu          4612 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_core2_event_constraints;
x86_pmu          4613 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
x86_pmu          4628 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_nehalem_event_constraints;
x86_pmu          4629 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
x86_pmu          4630 arch/x86/events/intel/core.c 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu          4631 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_nehalem_extra_regs;
x86_pmu          4632 arch/x86/events/intel/core.c 		x86_pmu.limit_period = nhm_limit_period;
x86_pmu          4645 arch/x86/events/intel/core.c 		x86_pmu.pebs_no_tlb = 1;
x86_pmu          4662 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_gen_event_constraints;
x86_pmu          4663 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
x86_pmu          4664 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
x86_pmu          4681 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu          4682 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
x86_pmu          4683 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_slm_extra_regs;
x86_pmu          4684 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4701 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu          4702 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
x86_pmu          4703 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_glm_extra_regs;
x86_pmu          4709 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = NULL;
x86_pmu          4710 arch/x86/events/intel/core.c 		x86_pmu.pebs_prec_dist = true;
x86_pmu          4711 arch/x86/events/intel/core.c 		x86_pmu.lbr_pt_coexist = true;
x86_pmu          4712 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4728 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu          4729 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_glm_extra_regs;
x86_pmu          4734 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = NULL;
x86_pmu          4735 arch/x86/events/intel/core.c 		x86_pmu.pebs_prec_dist = true;
x86_pmu          4736 arch/x86/events/intel/core.c 		x86_pmu.lbr_pt_coexist = true;
x86_pmu          4737 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4738 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu          4739 arch/x86/events/intel/core.c 		x86_pmu.get_event_constraints = glp_get_event_constraints;
x86_pmu          4750 arch/x86/events/intel/core.c 		x86_pmu.late_ack = true;
x86_pmu          4759 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu          4760 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_tnt_extra_regs;
x86_pmu          4765 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = NULL;
x86_pmu          4766 arch/x86/events/intel/core.c 		x86_pmu.pebs_prec_dist = true;
x86_pmu          4767 arch/x86/events/intel/core.c 		x86_pmu.lbr_pt_coexist = true;
x86_pmu          4768 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4769 arch/x86/events/intel/core.c 		x86_pmu.get_event_constraints = tnt_get_event_constraints;
x86_pmu          4785 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_westmere_event_constraints;
x86_pmu          4786 arch/x86/events/intel/core.c 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu          4787 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
x86_pmu          4788 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_westmere_extra_regs;
x86_pmu          4789 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4817 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu          4818 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu          4819 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu          4821 arch/x86/events/intel/core.c 			x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu          4823 arch/x86/events/intel/core.c 			x86_pmu.extra_regs = intel_snb_extra_regs;
x86_pmu          4827 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4828 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu          4859 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_ivb_event_constraints;
x86_pmu          4860 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu          4861 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
x86_pmu          4862 arch/x86/events/intel/core.c 		x86_pmu.pebs_prec_dist = true;
x86_pmu          4864 arch/x86/events/intel/core.c 			x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu          4866 arch/x86/events/intel/core.c 			x86_pmu.extra_regs = intel_snb_extra_regs;
x86_pmu          4868 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4869 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu          4891 arch/x86/events/intel/core.c 		x86_pmu.late_ack = true;
x86_pmu          4897 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_hsw_event_constraints;
x86_pmu          4898 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
x86_pmu          4899 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu          4900 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
x86_pmu          4901 arch/x86/events/intel/core.c 		x86_pmu.pebs_prec_dist = true;
x86_pmu          4903 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4904 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu          4906 arch/x86/events/intel/core.c 		x86_pmu.hw_config = hsw_hw_config;
x86_pmu          4907 arch/x86/events/intel/core.c 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu          4908 arch/x86/events/intel/core.c 		x86_pmu.lbr_double_abort = true;
x86_pmu          4923 arch/x86/events/intel/core.c 		x86_pmu.late_ack = true;
x86_pmu          4939 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_bdw_event_constraints;
x86_pmu          4940 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
x86_pmu          4941 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu          4942 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
x86_pmu          4943 arch/x86/events/intel/core.c 		x86_pmu.pebs_prec_dist = true;
x86_pmu          4945 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4946 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu          4948 arch/x86/events/intel/core.c 		x86_pmu.hw_config = hsw_hw_config;
x86_pmu          4949 arch/x86/events/intel/core.c 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu          4950 arch/x86/events/intel/core.c 		x86_pmu.limit_period = bdw_limit_period;
x86_pmu          4968 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu          4969 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
x86_pmu          4970 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_knl_extra_regs;
x86_pmu          4973 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          4974 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu          4990 arch/x86/events/intel/core.c 		x86_pmu.late_ack = true;
x86_pmu          5001 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_skl_event_constraints;
x86_pmu          5002 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
x86_pmu          5003 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_skl_extra_regs;
x86_pmu          5004 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
x86_pmu          5005 arch/x86/events/intel/core.c 		x86_pmu.pebs_prec_dist = true;
x86_pmu          5007 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          5008 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu          5010 arch/x86/events/intel/core.c 		x86_pmu.hw_config = hsw_hw_config;
x86_pmu          5011 arch/x86/events/intel/core.c 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu          5021 arch/x86/events/intel/core.c 			x86_pmu.flags |= PMU_FL_TFA;
x86_pmu          5022 arch/x86/events/intel/core.c 			x86_pmu.get_event_constraints = tfa_get_event_constraints;
x86_pmu          5023 arch/x86/events/intel/core.c 			x86_pmu.enable_all = intel_tfa_pmu_enable_all;
x86_pmu          5024 arch/x86/events/intel/core.c 			x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
x86_pmu          5039 arch/x86/events/intel/core.c 		x86_pmu.late_ack = true;
x86_pmu          5045 arch/x86/events/intel/core.c 		x86_pmu.event_constraints = intel_icl_event_constraints;
x86_pmu          5046 arch/x86/events/intel/core.c 		x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
x86_pmu          5047 arch/x86/events/intel/core.c 		x86_pmu.extra_regs = intel_icl_extra_regs;
x86_pmu          5048 arch/x86/events/intel/core.c 		x86_pmu.pebs_aliases = NULL;
x86_pmu          5049 arch/x86/events/intel/core.c 		x86_pmu.pebs_prec_dist = true;
x86_pmu          5050 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu          5051 arch/x86/events/intel/core.c 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu          5053 arch/x86/events/intel/core.c 		x86_pmu.hw_config = hsw_hw_config;
x86_pmu          5054 arch/x86/events/intel/core.c 		x86_pmu.get_event_constraints = icl_get_event_constraints;
x86_pmu          5060 arch/x86/events/intel/core.c 		x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
x86_pmu          5061 arch/x86/events/intel/core.c 		x86_pmu.lbr_pt_coexist = true;
x86_pmu          5068 arch/x86/events/intel/core.c 		switch (x86_pmu.version) {
x86_pmu          5070 arch/x86/events/intel/core.c 			x86_pmu.event_constraints = intel_v1_event_constraints;
x86_pmu          5078 arch/x86/events/intel/core.c 			x86_pmu.event_constraints = intel_gen_event_constraints;
x86_pmu          5094 arch/x86/events/intel/core.c 	x86_pmu.attr_update = attr_update;
x86_pmu          5096 arch/x86/events/intel/core.c 	if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
x86_pmu          5098 arch/x86/events/intel/core.c 		     x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu          5099 arch/x86/events/intel/core.c 		x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
x86_pmu          5101 arch/x86/events/intel/core.c 	x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
x86_pmu          5103 arch/x86/events/intel/core.c 	if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
x86_pmu          5105 arch/x86/events/intel/core.c 		     x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
x86_pmu          5106 arch/x86/events/intel/core.c 		x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
x86_pmu          5109 arch/x86/events/intel/core.c 	x86_pmu.intel_ctrl |=
x86_pmu          5110 arch/x86/events/intel/core.c 		((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
x86_pmu          5112 arch/x86/events/intel/core.c 	if (x86_pmu.event_constraints) {
x86_pmu          5117 arch/x86/events/intel/core.c 		for_each_event_constraint(c, x86_pmu.event_constraints) {
x86_pmu          5120 arch/x86/events/intel/core.c 				c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
x86_pmu          5123 arch/x86/events/intel/core.c 				~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
x86_pmu          5134 arch/x86/events/intel/core.c 	if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu          5135 arch/x86/events/intel/core.c 		x86_pmu.lbr_nr = 0;
x86_pmu          5136 arch/x86/events/intel/core.c 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
x86_pmu          5137 arch/x86/events/intel/core.c 		if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
x86_pmu          5138 arch/x86/events/intel/core.c 		      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
x86_pmu          5139 arch/x86/events/intel/core.c 			x86_pmu.lbr_nr = 0;
x86_pmu          5142 arch/x86/events/intel/core.c 	if (x86_pmu.lbr_nr)
x86_pmu          5143 arch/x86/events/intel/core.c 		pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
x86_pmu          5150 arch/x86/events/intel/core.c 	if (x86_pmu.extra_regs) {
x86_pmu          5151 arch/x86/events/intel/core.c 		for (er = x86_pmu.extra_regs; er->msr; er++) {
x86_pmu          5155 arch/x86/events/intel/core.c 				x86_pmu.lbr_sel_map = NULL;
x86_pmu          5160 arch/x86/events/intel/core.c 	if (x86_pmu.intel_cap.full_width_write) {
x86_pmu          5161 arch/x86/events/intel/core.c 		x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
x86_pmu          5162 arch/x86/events/intel/core.c 		x86_pmu.perfctr = MSR_IA32_PMC0;
x86_pmu          5170 arch/x86/events/intel/core.c 	if (x86_pmu.counter_freezing)
x86_pmu          5171 arch/x86/events/intel/core.c 		x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
x86_pmu          5188 arch/x86/events/intel/core.c 	if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
x86_pmu          5200 arch/x86/events/intel/core.c 	x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
x86_pmu          5202 arch/x86/events/intel/core.c 	x86_pmu.start_scheduling = NULL;
x86_pmu          5203 arch/x86/events/intel/core.c 	x86_pmu.commit_scheduling = NULL;
x86_pmu          5204 arch/x86/events/intel/core.c 	x86_pmu.stop_scheduling = NULL;
x86_pmu           180 arch/x86/events/intel/ds.c 	if (x86_pmu.pebs_no_tlb) {
x86_pmu           338 arch/x86/events/intel/ds.c 	size_t bsiz = x86_pmu.pebs_buffer_size;
x86_pmu           342 arch/x86/events/intel/ds.c 	if (!x86_pmu.pebs)
x86_pmu           353 arch/x86/events/intel/ds.c 	if (x86_pmu.intel_cap.pebs_format < 2) {
x86_pmu           367 arch/x86/events/intel/ds.c 	max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
x86_pmu           377 arch/x86/events/intel/ds.c 	if (!x86_pmu.pebs)
x86_pmu           385 arch/x86/events/intel/ds.c 	ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
x86_pmu           386 arch/x86/events/intel/ds.c 	dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
x86_pmu           397 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts)
x86_pmu           424 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts)
x86_pmu           452 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts && !x86_pmu.pebs)
x86_pmu           478 arch/x86/events/intel/ds.c 	x86_pmu.bts_active = 0;
x86_pmu           479 arch/x86/events/intel/ds.c 	x86_pmu.pebs_active = 0;
x86_pmu           481 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts && !x86_pmu.pebs)
x86_pmu           484 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts)
x86_pmu           487 arch/x86/events/intel/ds.c 	if (!x86_pmu.pebs)
x86_pmu           520 arch/x86/events/intel/ds.c 		if (x86_pmu.bts && !bts_err)
x86_pmu           521 arch/x86/events/intel/ds.c 			x86_pmu.bts_active = 1;
x86_pmu           523 arch/x86/events/intel/ds.c 		if (x86_pmu.pebs && !pebs_err)
x86_pmu           524 arch/x86/events/intel/ds.c 			x86_pmu.pebs_active = 1;
x86_pmu           600 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts_active)
x86_pmu           674 arch/x86/events/intel/ds.c 	x86_pmu.drain_pebs(&regs);
x86_pmu           879 arch/x86/events/intel/ds.c 	if (x86_pmu.pebs_constraints) {
x86_pmu           880 arch/x86/events/intel/ds.c 		for_each_event_constraint(c, x86_pmu.pebs_constraints) {
x86_pmu           892 arch/x86/events/intel/ds.c 	if (x86_pmu.flags & PMU_FL_PEBS_ALL)
x86_pmu           928 arch/x86/events/intel/ds.c 	if (x86_pmu.flags & PMU_FL_PEBS_ALL)
x86_pmu           929 arch/x86/events/intel/ds.c 		reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
x86_pmu           931 arch/x86/events/intel/ds.c 		reserved = x86_pmu.max_pebs_events;
x86_pmu           956 arch/x86/events/intel/ds.c 		sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry);
x86_pmu           990 arch/x86/events/intel/ds.c 		      x86_pmu.rtm_abort_event);
x86_pmu          1005 arch/x86/events/intel/ds.c 			((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
x86_pmu          1036 arch/x86/events/intel/ds.c 	if (x86_pmu.intel_cap.pebs_baseline && add) {
x86_pmu          1112 arch/x86/events/intel/ds.c 	if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
x86_pmu          1117 arch/x86/events/intel/ds.c 	if (x86_pmu.intel_cap.pebs_baseline) {
x86_pmu          1135 arch/x86/events/intel/ds.c 			(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
x86_pmu          1170 arch/x86/events/intel/ds.c 	    (x86_pmu.version < 5))
x86_pmu          1212 arch/x86/events/intel/ds.c 	if (!x86_pmu.intel_cap.pebs_trap)
x86_pmu          1314 arch/x86/events/intel/ds.c 	if (x86_pmu.intel_cap.pebs_format < 4)
x86_pmu          1429 arch/x86/events/intel/ds.c 		if (x86_pmu.intel_cap.pebs_format >= 2) {
x86_pmu          1454 arch/x86/events/intel/ds.c 	    x86_pmu.intel_cap.pebs_format >= 1)
x86_pmu          1457 arch/x86/events/intel/ds.c 	if (x86_pmu.intel_cap.pebs_format >= 2) {
x86_pmu          1473 arch/x86/events/intel/ds.c 	if (x86_pmu.intel_cap.pebs_format >= 3 &&
x86_pmu          1626 arch/x86/events/intel/ds.c 	if (x86_pmu.intel_cap.pebs_format < 1)
x86_pmu          1637 arch/x86/events/intel/ds.c 			if (x86_pmu.intel_cap.pebs_format >= 3)
x86_pmu          1669 arch/x86/events/intel/ds.c 	int shift = 64 - x86_pmu.cntval_bits;
x86_pmu          1780 arch/x86/events/intel/ds.c 	if (!x86_pmu.pebs_active)
x86_pmu          1840 arch/x86/events/intel/ds.c 	if (!x86_pmu.pebs_active)
x86_pmu          1848 arch/x86/events/intel/ds.c 	mask = (1ULL << x86_pmu.max_pebs_events) - 1;
x86_pmu          1849 arch/x86/events/intel/ds.c 	size = x86_pmu.max_pebs_events;
x86_pmu          1850 arch/x86/events/intel/ds.c 	if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
x86_pmu          1851 arch/x86/events/intel/ds.c 		mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
x86_pmu          1852 arch/x86/events/intel/ds.c 		size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
x86_pmu          1860 arch/x86/events/intel/ds.c 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
x86_pmu          1868 arch/x86/events/intel/ds.c 		if (x86_pmu.intel_cap.pebs_format >= 3) {
x86_pmu          1888 arch/x86/events/intel/ds.c 					x86_pmu.max_pebs_events);
x86_pmu          1889 arch/x86/events/intel/ds.c 		if (bit >= x86_pmu.max_pebs_events)
x86_pmu          1953 arch/x86/events/intel/ds.c 	if (!x86_pmu.pebs_active)
x86_pmu          1961 arch/x86/events/intel/ds.c 	mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
x86_pmu          1962 arch/x86/events/intel/ds.c 	       (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
x86_pmu          1963 arch/x86/events/intel/ds.c 	size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
x86_pmu          2009 arch/x86/events/intel/ds.c 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu          2010 arch/x86/events/intel/ds.c 	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
x86_pmu          2011 arch/x86/events/intel/ds.c 	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
x86_pmu          2012 arch/x86/events/intel/ds.c 	if (x86_pmu.version <= 4)
x86_pmu          2013 arch/x86/events/intel/ds.c 		x86_pmu.pebs_no_isolation = 1;
x86_pmu          2015 arch/x86/events/intel/ds.c 	if (x86_pmu.pebs) {
x86_pmu          2016 arch/x86/events/intel/ds.c 		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
x86_pmu          2018 arch/x86/events/intel/ds.c 		int format = x86_pmu.intel_cap.pebs_format;
x86_pmu          2021 arch/x86/events/intel/ds.c 			x86_pmu.intel_cap.pebs_baseline = 0;
x86_pmu          2026 arch/x86/events/intel/ds.c 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
x86_pmu          2034 arch/x86/events/intel/ds.c 			x86_pmu.pebs_buffer_size = PAGE_SIZE;
x86_pmu          2035 arch/x86/events/intel/ds.c 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
x86_pmu          2040 arch/x86/events/intel/ds.c 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
x86_pmu          2041 arch/x86/events/intel/ds.c 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
x86_pmu          2046 arch/x86/events/intel/ds.c 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
x86_pmu          2047 arch/x86/events/intel/ds.c 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
x86_pmu          2052 arch/x86/events/intel/ds.c 			x86_pmu.pebs_record_size =
x86_pmu          2054 arch/x86/events/intel/ds.c 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
x86_pmu          2055 arch/x86/events/intel/ds.c 			x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
x86_pmu          2059 arch/x86/events/intel/ds.c 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
x86_pmu          2060 arch/x86/events/intel/ds.c 			x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
x86_pmu          2061 arch/x86/events/intel/ds.c 			if (x86_pmu.intel_cap.pebs_baseline) {
x86_pmu          2062 arch/x86/events/intel/ds.c 				x86_pmu.large_pebs_flags |=
x86_pmu          2065 arch/x86/events/intel/ds.c 				x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu          2070 arch/x86/events/intel/ds.c 				x86_pmu.large_pebs_flags &=
x86_pmu          2080 arch/x86/events/intel/ds.c 			if (x86_pmu.intel_cap.pebs_output_pt_available) {
x86_pmu          2089 arch/x86/events/intel/ds.c 			x86_pmu.pebs = 0;
x86_pmu          2098 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts && !x86_pmu.pebs)
x86_pmu           290 arch/x86/events/intel/knc.c static const struct x86_pmu knc_pmu __initconst = {
x86_pmu           316 arch/x86/events/intel/knc.c 	x86_pmu = knc_pmu;
x86_pmu           162 arch/x86/events/intel/lbr.c 	if (pmi && x86_pmu.version >= 4)
x86_pmu           170 arch/x86/events/intel/lbr.c 		lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
x86_pmu           201 arch/x86/events/intel/lbr.c 	for (i = 0; i < x86_pmu.lbr_nr; i++)
x86_pmu           202 arch/x86/events/intel/lbr.c 		wrmsrl(x86_pmu.lbr_from + i, 0);
x86_pmu           209 arch/x86/events/intel/lbr.c 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
x86_pmu           210 arch/x86/events/intel/lbr.c 		wrmsrl(x86_pmu.lbr_from + i, 0);
x86_pmu           211 arch/x86/events/intel/lbr.c 		wrmsrl(x86_pmu.lbr_to   + i, 0);
x86_pmu           212 arch/x86/events/intel/lbr.c 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
x86_pmu           221 arch/x86/events/intel/lbr.c 	if (!x86_pmu.lbr_nr)
x86_pmu           224 arch/x86/events/intel/lbr.c 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
x86_pmu           240 arch/x86/events/intel/lbr.c 	rdmsrl(x86_pmu.lbr_tos, tos);
x86_pmu           269 arch/x86/events/intel/lbr.c 	int lbr_format = x86_pmu.intel_cap.lbr_format;
x86_pmu           314 arch/x86/events/intel/lbr.c 	wrmsrl(x86_pmu.lbr_from + idx, val);
x86_pmu           319 arch/x86/events/intel/lbr.c 	wrmsrl(x86_pmu.lbr_to + idx, val);
x86_pmu           326 arch/x86/events/intel/lbr.c 	rdmsrl(x86_pmu.lbr_from + idx, val);
x86_pmu           335 arch/x86/events/intel/lbr.c 	rdmsrl(x86_pmu.lbr_to + idx, val);
x86_pmu           366 arch/x86/events/intel/lbr.c 	mask = x86_pmu.lbr_nr - 1;
x86_pmu           372 arch/x86/events/intel/lbr.c 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
x86_pmu           376 arch/x86/events/intel/lbr.c 	for (; i < x86_pmu.lbr_nr; i++) {
x86_pmu           380 arch/x86/events/intel/lbr.c 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
x86_pmu           384 arch/x86/events/intel/lbr.c 	wrmsrl(x86_pmu.lbr_tos, tos);
x86_pmu           400 arch/x86/events/intel/lbr.c 	mask = x86_pmu.lbr_nr - 1;
x86_pmu           402 arch/x86/events/intel/lbr.c 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
x86_pmu           409 arch/x86/events/intel/lbr.c 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
x86_pmu           462 arch/x86/events/intel/lbr.c 	if (!x86_pmu.lbr_nr)
x86_pmu           491 arch/x86/events/intel/lbr.c 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
x86_pmu           503 arch/x86/events/intel/lbr.c 	if (!x86_pmu.lbr_nr)
x86_pmu           512 arch/x86/events/intel/lbr.c 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
x86_pmu           538 arch/x86/events/intel/lbr.c 	unsigned long mask = x86_pmu.lbr_nr - 1;
x86_pmu           542 arch/x86/events/intel/lbr.c 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
x86_pmu           552 arch/x86/events/intel/lbr.c 		rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
x86_pmu           575 arch/x86/events/intel/lbr.c 	unsigned long mask = x86_pmu.lbr_nr - 1;
x86_pmu           576 arch/x86/events/intel/lbr.c 	int lbr_format = x86_pmu.intel_cap.lbr_format;
x86_pmu           580 arch/x86/events/intel/lbr.c 	int num = x86_pmu.lbr_nr;
x86_pmu           645 arch/x86/events/intel/lbr.c 		if (abort && x86_pmu.lbr_double_abort && out > 0)
x86_pmu           675 arch/x86/events/intel/lbr.c 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
x86_pmu           767 arch/x86/events/intel/lbr.c 		v = x86_pmu.lbr_sel_map[i];
x86_pmu           785 arch/x86/events/intel/lbr.c 	reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
x86_pmu           789 arch/x86/events/intel/lbr.c 	    (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
x86_pmu           802 arch/x86/events/intel/lbr.c 	if (!x86_pmu.lbr_nr)
x86_pmu           815 arch/x86/events/intel/lbr.c 	if (x86_pmu.lbr_sel_map)
x86_pmu          1099 arch/x86/events/intel/lbr.c 	cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
x86_pmu          1100 arch/x86/events/intel/lbr.c 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
x86_pmu          1172 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_nr     = 4;
x86_pmu          1173 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
x86_pmu          1174 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
x86_pmu          1175 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
x86_pmu          1186 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_nr     = 16;
x86_pmu          1187 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
x86_pmu          1188 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
x86_pmu          1189 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
x86_pmu          1191 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu          1192 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
x86_pmu          1206 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_nr	 = 16;
x86_pmu          1207 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
x86_pmu          1208 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
x86_pmu          1209 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
x86_pmu          1211 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu          1212 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
x86_pmu          1225 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_nr	 = 16;
x86_pmu          1226 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
x86_pmu          1227 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
x86_pmu          1228 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
x86_pmu          1230 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu          1231 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
x86_pmu          1240 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_nr	 = 32;
x86_pmu          1241 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
x86_pmu          1242 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
x86_pmu          1243 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
x86_pmu          1245 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu          1246 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
x86_pmu          1270 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_nr	   = 8;
x86_pmu          1271 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
x86_pmu          1272 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
x86_pmu          1273 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
x86_pmu          1284 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_nr	   = 8;
x86_pmu          1285 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
x86_pmu          1286 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
x86_pmu          1287 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
x86_pmu          1289 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu          1290 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
x86_pmu          1302 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_nr	   = 8;
x86_pmu          1303 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
x86_pmu          1304 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
x86_pmu          1305 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
x86_pmu          1307 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu          1308 arch/x86/events/intel/lbr.c 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
x86_pmu          1311 arch/x86/events/intel/lbr.c 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
x86_pmu          1312 arch/x86/events/intel/lbr.c 		x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
x86_pmu           921 arch/x86/events/intel/p4.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu           990 arch/x86/events/intel/p4.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu          1009 arch/x86/events/intel/p4.c 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
x86_pmu          1028 arch/x86/events/intel/p4.c 		if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
x86_pmu          1301 arch/x86/events/intel/p4.c static __initconst const struct x86_pmu p4_pmu = {
x86_pmu          1359 arch/x86/events/intel/p4.c 	x86_pmu = p4_pmu;
x86_pmu          1370 arch/x86/events/intel/p4.c 	for (i = 0; i < x86_pmu.num_counters; i++) {
x86_pmu           201 arch/x86/events/intel/p6.c static __initconst const struct x86_pmu p6_pmu = {
x86_pmu           242 arch/x86/events/intel/p6.c 		x86_pmu.attr_rdpmc_broken = 1;
x86_pmu           243 arch/x86/events/intel/p6.c 		x86_pmu.attr_rdpmc = 0;
x86_pmu           249 arch/x86/events/intel/p6.c 	x86_pmu = p6_pmu;
x86_pmu           726 arch/x86/events/perf_event.h 	__quirk.next = x86_pmu.quirks;					\
x86_pmu           727 arch/x86/events/perf_event.h 	x86_pmu.quirks = &__quirk;					\
x86_pmu           767 arch/x86/events/perf_event.h extern struct x86_pmu x86_pmu __read_mostly;
x86_pmu           771 arch/x86/events/perf_event.h 	return  x86_pmu.lbr_sel_map &&
x86_pmu           772 arch/x86/events/perf_event.h 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
x86_pmu           802 arch/x86/events/perf_event.h 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
x86_pmu           803 arch/x86/events/perf_event.h 				   x86_pmu.addr_offset(index, true) : index);
x86_pmu           808 arch/x86/events/perf_event.h 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
x86_pmu           809 arch/x86/events/perf_event.h 				  x86_pmu.addr_offset(index, false) : index);
x86_pmu           814 arch/x86/events/perf_event.h 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
x86_pmu           934 arch/x86/events/perf_event.h 	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
x86_pmu          1075 arch/x86/events/perf_event.h 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
x86_pmu           273 arch/x86/kvm/vmx/pmu_intel.c 	struct x86_pmu_capability x86_pmu;
x86_pmu           295 arch/x86/kvm/vmx/pmu_intel.c 	perf_get_x86_pmu_capability(&x86_pmu);
x86_pmu           298 arch/x86/kvm/vmx/pmu_intel.c 					 x86_pmu.num_counters_gp);
x86_pmu           308 arch/x86/kvm/vmx/pmu_intel.c 			      x86_pmu.num_counters_fixed);
x86_pmu          5199 arch/x86/kvm/x86.c 	struct x86_pmu_capability x86_pmu;
x86_pmu          5206 arch/x86/kvm/x86.c 	perf_get_x86_pmu_capability(&x86_pmu);
x86_pmu          5254 arch/x86/kvm/x86.c 			    min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
x86_pmu          5259 arch/x86/kvm/x86.c 			    min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
x86_pmu           501 arch/x86/xen/pmu.c 	if (x86_pmu.handle_irq(&regs))