Lines Matching refs:x86_pmu

42 struct x86_pmu x86_pmu __read_mostly;
67 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
117 if (!x86_pmu.extra_regs) in x86_pmu_extra_regs()
120 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
147 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
152 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
163 i = x86_pmu.num_counters; in reserve_pmc_hardware()
176 for (i = 0; i < x86_pmu.num_counters; i++) { in release_pmc_hardware()
200 for (i = 0; i < x86_pmu.num_counters; i++) { in check_hw_exists()
214 if (x86_pmu.num_counters_fixed) { in check_hw_exists()
219 for (i = 0; i < x86_pmu.num_counters_fixed; i++) { in check_hw_exists()
288 return x86_pmu.handle_irq != NULL; in x86_pmu_initialized()
362 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { in x86_add_exclusive()
364 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { in x86_add_exclusive()
365 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) in x86_add_exclusive()
368 atomic_inc(&x86_pmu.lbr_exclusive[what]); in x86_add_exclusive()
382 atomic_dec(&x86_pmu.lbr_exclusive[what]); in x86_del_exclusive()
393 hwc->sample_period = x86_pmu.max_period; in x86_setup_perfctr()
404 if (attr->config >= x86_pmu.max_events) in x86_setup_perfctr()
410 config = x86_pmu.event_map(attr->config); in x86_setup_perfctr()
424 if (!x86_pmu.bts_active) in x86_setup_perfctr()
479 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { in x86_pmu_hw_config()
483 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) in x86_pmu_hw_config()
494 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
541 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
542 if (x86_pmu.limit_period(event, event->attr.sample_period) > in x86_pmu_hw_config()
575 return x86_pmu.hw_config(event); in __x86_pmu_event_init()
583 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_disable_all()
623 x86_pmu.disable_all(); in x86_pmu_disable()
631 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_enable_all()
841 if (x86_pmu.start_scheduling) in x86_schedule_events()
842 x86_pmu.start_scheduling(cpuc); in x86_schedule_events()
846 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
879 int gpmax = x86_pmu.num_counters; in x86_schedule_events()
913 if (x86_pmu.commit_scheduling) in x86_schedule_events()
914 x86_pmu.commit_scheduling(cpuc, i, assign[i]); in x86_schedule_events()
929 if (x86_pmu.put_event_constraints) in x86_schedule_events()
930 x86_pmu.put_event_constraints(cpuc, e); in x86_schedule_events()
934 if (x86_pmu.stop_scheduling) in x86_schedule_events()
935 x86_pmu.stop_scheduling(cpuc); in x86_schedule_events()
949 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; in collect_events()
1080 x86_pmu.enable_all(added); in x86_pmu_enable()
1121 if (left > x86_pmu.max_period) in x86_perf_event_set_period()
1122 left = x86_pmu.max_period; in x86_perf_event_set_period()
1124 if (x86_pmu.limit_period) in x86_perf_event_set_period()
1125 left = x86_pmu.limit_period(event, left); in x86_perf_event_set_period()
1137 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1145 if (x86_pmu.perfctr_second_write) { in x86_perf_event_set_period()
1147 (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1194 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_add()
1238 x86_pmu.enable(event); in x86_pmu_start()
1250 if (!x86_pmu.num_counters) in perf_event_print_debug()
1258 if (x86_pmu.version >= 2) { in perf_event_print_debug()
1269 if (x86_pmu.pebs_constraints) { in perf_event_print_debug()
1273 if (x86_pmu.lbr_nr) { in perf_event_print_debug()
1280 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in perf_event_print_debug()
1293 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { in perf_event_print_debug()
1308 x86_pmu.disable(event); in x86_pmu_stop()
1362 if (x86_pmu.put_event_constraints) in x86_pmu_del()
1363 x86_pmu.put_event_constraints(cpuc, event); in x86_pmu_del()
1395 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_handle_irq()
1410 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) in x86_pmu_handle_irq()
1434 if (!x86_pmu.apic || !x86_pmu_initialized()) in perf_events_lapic_init()
1458 ret = x86_pmu.handle_irq(regs); in perf_event_nmi_handler()
1481 if (x86_pmu.cpu_prepare) in x86_pmu_notifier()
1482 ret = x86_pmu.cpu_prepare(cpu); in x86_pmu_notifier()
1486 if (x86_pmu.cpu_starting) in x86_pmu_notifier()
1487 x86_pmu.cpu_starting(cpu); in x86_pmu_notifier()
1498 if (x86_pmu.cpu_dying) in x86_pmu_notifier()
1499 x86_pmu.cpu_dying(cpu); in x86_pmu_notifier()
1504 if (x86_pmu.cpu_dead) in x86_pmu_notifier()
1505 x86_pmu.cpu_dead(cpu); in x86_pmu_notifier()
1520 x86_pmu.apic = 0; in pmu_check_apic()
1555 if (x86_pmu.event_map(i)) in filter_events()
1597 u64 config = x86_pmu.event_map(pmu_attr->id); in events_sysfs_show()
1603 return x86_pmu.events_sysfs_show(page, config); in events_sysfs_show()
1705 pr_cont("%s PMU driver.\n", x86_pmu.name); in init_hw_perf_events()
1707 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ in init_hw_perf_events()
1709 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) in init_hw_perf_events()
1712 if (!x86_pmu.intel_ctrl) in init_hw_perf_events()
1713 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; in init_hw_perf_events()
1719 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, in init_hw_perf_events()
1720 0, x86_pmu.num_counters, 0, 0); in init_hw_perf_events()
1722 x86_pmu_format_group.attrs = x86_pmu.format_attrs; in init_hw_perf_events()
1724 if (x86_pmu.event_attrs) in init_hw_perf_events()
1725 x86_pmu_events_group.attrs = x86_pmu.event_attrs; in init_hw_perf_events()
1727 if (!x86_pmu.events_sysfs_show) in init_hw_perf_events()
1732 if (x86_pmu.cpu_events) { in init_hw_perf_events()
1735 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events); in init_hw_perf_events()
1740 pr_info("... version: %d\n", x86_pmu.version); in init_hw_perf_events()
1741 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); in init_hw_perf_events()
1742 pr_info("... generic registers: %d\n", x86_pmu.num_counters); in init_hw_perf_events()
1743 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); in init_hw_perf_events()
1744 pr_info("... max period: %016Lx\n", x86_pmu.max_period); in init_hw_perf_events()
1745 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); in init_hw_perf_events()
1746 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); in init_hw_perf_events()
1834 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_commit_txn()
1872 if (x86_pmu.extra_regs) { in allocate_fake_cpuc()
1897 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event); in validate_event()
1902 if (x86_pmu.put_event_constraints) in validate_event()
1903 x86_pmu.put_event_constraints(fake_cpuc, event); in validate_event()
1947 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); in validate_group()
1991 if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) in x86_pmu_event_init()
2031 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { in x86_pmu_event_idx()
2043 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); in get_attr_rdpmc()
2060 if (x86_pmu.attr_rdpmc_broken) in set_attr_rdpmc()
2063 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) { in set_attr_rdpmc()
2076 x86_pmu.attr_rdpmc = val; in set_attr_rdpmc()
2101 if (x86_pmu.sched_task) in x86_pmu_sched_task()
2102 x86_pmu.sched_task(ctx, sched_in); in x86_pmu_sched_task()
2107 if (x86_pmu.check_microcode) in perf_check_microcode()
2108 x86_pmu.check_microcode(); in perf_check_microcode()
2147 userpg->pmc_width = x86_pmu.cntval_bits; in arch_perf_update_userpage()
2405 cap->version = x86_pmu.version; in perf_get_x86_pmu_capability()
2406 cap->num_counters_gp = x86_pmu.num_counters; in perf_get_x86_pmu_capability()
2407 cap->num_counters_fixed = x86_pmu.num_counters_fixed; in perf_get_x86_pmu_capability()
2408 cap->bit_width_gp = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2409 cap->bit_width_fixed = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2410 cap->events_mask = (unsigned int)x86_pmu.events_maskl; in perf_get_x86_pmu_capability()
2411 cap->events_mask_len = x86_pmu.events_mask_len; in perf_get_x86_pmu_capability()