Lines Matching refs:event

64 u64 x86_perf_event_update(struct perf_event *event)  in x86_perf_event_update()  argument
66 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_update()
101 local64_add(delta, &event->count); in x86_perf_event_update()
110 static int x86_pmu_extra_regs(u64 config, struct perf_event *event) in x86_pmu_extra_regs() argument
115 reg = &event->hw.extra_reg; in x86_pmu_extra_regs()
121 if (er->event != (config & er->config_mask)) in x86_pmu_extra_regs()
123 if (event->attr.config1 & ~er->valid_mask) in x86_pmu_extra_regs()
130 reg->config = event->attr.config1; in x86_pmu_extra_regs()
271 static void hw_perf_event_destroy(struct perf_event *event) in hw_perf_event_destroy() argument
276 void hw_perf_lbr_event_destroy(struct perf_event *event) in hw_perf_lbr_event_destroy() argument
278 hw_perf_event_destroy(event); in hw_perf_lbr_event_destroy()
290 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) in set_ext_hw_attr() argument
292 struct perf_event_attr *attr = &event->attr; in set_ext_hw_attr()
320 return x86_pmu_extra_regs(val, event); in set_ext_hw_attr()
382 int x86_setup_perfctr(struct perf_event *event) in x86_setup_perfctr() argument
384 struct perf_event_attr *attr = &event->attr; in x86_setup_perfctr()
385 struct hw_perf_event *hwc = &event->hw; in x86_setup_perfctr()
388 if (!is_sampling_event(event)) { in x86_setup_perfctr()
395 return x86_pmu_extra_regs(event->attr.config, event); in x86_setup_perfctr()
398 return set_ext_hw_attr(hwc, event); in x86_setup_perfctr()
431 event->destroy = hw_perf_lbr_event_destroy; in x86_setup_perfctr()
445 static inline int precise_br_compat(struct perf_event *event) in precise_br_compat() argument
447 u64 m = event->attr.branch_sample_type; in precise_br_compat()
456 if (!event->attr.exclude_user) in precise_br_compat()
459 if (!event->attr.exclude_kernel) in precise_br_compat()
469 int x86_pmu_hw_config(struct perf_event *event) in x86_pmu_hw_config() argument
471 if (event->attr.precise_ip) { in x86_pmu_hw_config()
483 if (event->attr.precise_ip > precise) in x86_pmu_hw_config()
490 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
491 u64 *br_type = &event->attr.branch_sample_type; in x86_pmu_hw_config()
493 if (has_branch_stack(event)) { in x86_pmu_hw_config()
494 if (!precise_br_compat(event)) in x86_pmu_hw_config()
509 if (!event->attr.exclude_user) in x86_pmu_hw_config()
512 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
517 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) in x86_pmu_hw_config()
518 event->attach_state |= PERF_ATTACH_TASK_DATA; in x86_pmu_hw_config()
524 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; in x86_pmu_hw_config()
529 if (!event->attr.exclude_user) in x86_pmu_hw_config()
530 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; in x86_pmu_hw_config()
531 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
532 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; in x86_pmu_hw_config()
534 if (event->attr.type == PERF_TYPE_RAW) in x86_pmu_hw_config()
535 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; in x86_pmu_hw_config()
537 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
538 if (x86_pmu.limit_period(event, event->attr.sample_period) > in x86_pmu_hw_config()
539 event->attr.sample_period) in x86_pmu_hw_config()
543 return x86_setup_perfctr(event); in x86_pmu_hw_config()
549 static int __x86_pmu_event_init(struct perf_event *event) in __x86_pmu_event_init() argument
560 event->destroy = hw_perf_event_destroy; in __x86_pmu_event_init()
562 event->hw.idx = -1; in __x86_pmu_event_init()
563 event->hw.last_cpu = -1; in __x86_pmu_event_init()
564 event->hw.last_tag = ~0ULL; in __x86_pmu_event_init()
567 event->hw.extra_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
568 event->hw.branch_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
570 return x86_pmu.hw_config(event); in __x86_pmu_event_init()
625 static inline int is_x86_event(struct perf_event *event) in is_x86_event() argument
627 return event->pmu == &pmu; in is_x86_event()
639 int event; /* event index */ member
678 sched->state.event = idx; /* start with min weight */ in perf_sched_init()
718 if (sched->state.event >= sched->max_events) in __perf_sched_find_counter()
721 c = sched->constraints[sched->state.event]; in __perf_sched_find_counter()
776 sched->state.event++; in perf_sched_next_event()
777 if (sched->state.event >= sched->max_events) { in perf_sched_next_event()
779 sched->state.event = 0; in perf_sched_next_event()
784 c = sched->constraints[sched->state.event]; in perf_sched_next_event()
806 assign[sched.state.event] = sched.state.counter; in perf_assign_events()
931 struct perf_event *event; in collect_events() local
948 list_for_each_entry(event, &leader->sibling_list, group_entry) { in collect_events()
949 if (!is_x86_event(event) || in collect_events()
950 event->state <= PERF_EVENT_STATE_OFF) in collect_events()
956 cpuc->event_list[n] = event; in collect_events()
962 static inline void x86_assign_hw_event(struct perf_event *event, in x86_assign_hw_event() argument
965 struct hw_perf_event *hwc = &event->hw; in x86_assign_hw_event()
994 static void x86_pmu_start(struct perf_event *event, int flags);
999 struct perf_event *event; in x86_pmu_enable() local
1018 event = cpuc->event_list[i]; in x86_pmu_enable()
1019 hwc = &event->hw; in x86_pmu_enable()
1038 x86_pmu_stop(event, PERF_EF_UPDATE); in x86_pmu_enable()
1045 event = cpuc->event_list[i]; in x86_pmu_enable()
1046 hwc = &event->hw; in x86_pmu_enable()
1049 x86_assign_hw_event(event, cpuc, i); in x86_pmu_enable()
1056 x86_pmu_start(event, PERF_EF_RELOAD); in x86_pmu_enable()
1074 int x86_perf_event_set_period(struct perf_event *event) in x86_perf_event_set_period() argument
1076 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_set_period()
1110 left = x86_pmu.limit_period(event, left); in x86_perf_event_set_period()
1132 perf_event_update_userpage(event); in x86_perf_event_set_period()
1137 void x86_pmu_enable_event(struct perf_event *event) in x86_pmu_enable_event() argument
1140 __x86_pmu_enable_event(&event->hw, in x86_pmu_enable_event()
1150 static int x86_pmu_add(struct perf_event *event, int flags) in x86_pmu_add() argument
1157 hwc = &event->hw; in x86_pmu_add()
1160 ret = n = collect_events(cpuc, event, false); in x86_pmu_add()
1199 static void x86_pmu_start(struct perf_event *event, int flags) in x86_pmu_start() argument
1202 int idx = event->hw.idx; in x86_pmu_start()
1204 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) in x86_pmu_start()
1211 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in x86_pmu_start()
1212 x86_perf_event_set_period(event); in x86_pmu_start()
1215 event->hw.state = 0; in x86_pmu_start()
1217 cpuc->events[idx] = event; in x86_pmu_start()
1220 x86_pmu.enable(event); in x86_pmu_start()
1221 perf_event_update_userpage(event); in x86_pmu_start()
1284 void x86_pmu_stop(struct perf_event *event, int flags) in x86_pmu_stop() argument
1287 struct hw_perf_event *hwc = &event->hw; in x86_pmu_stop()
1290 x86_pmu.disable(event); in x86_pmu_stop()
1301 x86_perf_event_update(event); in x86_pmu_stop()
1306 static void x86_pmu_del(struct perf_event *event, int flags) in x86_pmu_del() argument
1314 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED; in x86_pmu_del()
1330 x86_pmu_stop(event, PERF_EF_UPDATE); in x86_pmu_del()
1333 if (event == cpuc->event_list[i]) in x86_pmu_del()
1345 x86_pmu.put_event_constraints(cpuc, event); in x86_pmu_del()
1354 perf_event_update_userpage(event); in x86_pmu_del()
1361 struct perf_event *event; in x86_pmu_handle_irq() local
1389 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1391 val = x86_perf_event_update(event); in x86_pmu_handle_irq()
1399 perf_sample_data_init(&data, 0, event->hw.last_period); in x86_pmu_handle_irq()
1401 if (!x86_perf_event_set_period(event)) in x86_pmu_handle_irq()
1404 if (perf_event_overflow(event, &data, regs)) in x86_pmu_handle_irq()
1405 x86_pmu_stop(event, 0); in x86_pmu_handle_irq()
1616 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) in x86_event_sysfs_show() argument
1630 ret = sprintf(page, "event=0x%02llx", event); in x86_event_sysfs_show()
1733 static inline void x86_pmu_read(struct perf_event *event) in x86_pmu_read() argument
1735 x86_perf_event_update(event); in x86_pmu_read()
1838 static int validate_event(struct perf_event *event) in validate_event() argument
1848 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event); in validate_event()
1854 x86_pmu.put_event_constraints(fake_cpuc, event); in validate_event()
1872 static int validate_group(struct perf_event *event) in validate_group() argument
1874 struct perf_event *leader = event->group_leader; in validate_group()
1892 n = collect_events(fake_cpuc, event, false); in validate_group()
1905 static int x86_pmu_event_init(struct perf_event *event) in x86_pmu_event_init() argument
1910 switch (event->attr.type) { in x86_pmu_event_init()
1920 err = __x86_pmu_event_init(event); in x86_pmu_event_init()
1927 tmp = event->pmu; in x86_pmu_event_init()
1928 event->pmu = &pmu; in x86_pmu_event_init()
1930 if (event->group_leader != event) in x86_pmu_event_init()
1931 err = validate_group(event); in x86_pmu_event_init()
1933 err = validate_event(event); in x86_pmu_event_init()
1935 event->pmu = tmp; in x86_pmu_event_init()
1938 if (event->destroy) in x86_pmu_event_init()
1939 event->destroy(event); in x86_pmu_event_init()
1943 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; in x86_pmu_event_init()
1954 static void x86_pmu_event_mapped(struct perf_event *event) in x86_pmu_event_mapped() argument
1956 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) in x86_pmu_event_mapped()
1963 static void x86_pmu_event_unmapped(struct perf_event *event) in x86_pmu_event_unmapped() argument
1968 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) in x86_pmu_event_unmapped()
1975 static int x86_pmu_event_idx(struct perf_event *event) in x86_pmu_event_idx() argument
1977 int idx = event->hw.idx; in x86_pmu_event_idx()
1979 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) in x86_pmu_event_idx()
2089 void arch_perf_update_userpage(struct perf_event *event, in arch_perf_update_userpage() argument
2097 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); in arch_perf_update_userpage()
2118 if (event->clock == &local_clock) { in arch_perf_update_userpage()