Lines Matching refs:event

64 u64 x86_perf_event_update(struct perf_event *event)  in x86_perf_event_update()  argument
66 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_update()
101 local64_add(delta, &event->count); in x86_perf_event_update()
110 static int x86_pmu_extra_regs(u64 config, struct perf_event *event) in x86_pmu_extra_regs() argument
115 reg = &event->hw.extra_reg; in x86_pmu_extra_regs()
121 if (er->event != (config & er->config_mask)) in x86_pmu_extra_regs()
123 if (event->attr.config1 & ~er->valid_mask) in x86_pmu_extra_regs()
130 reg->config = event->attr.config1; in x86_pmu_extra_regs()
272 static void hw_perf_event_destroy(struct perf_event *event) in hw_perf_event_destroy() argument
278 void hw_perf_lbr_event_destroy(struct perf_event *event) in hw_perf_lbr_event_destroy() argument
280 hw_perf_event_destroy(event); in hw_perf_lbr_event_destroy()
292 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) in set_ext_hw_attr() argument
294 struct perf_event_attr *attr = &event->attr; in set_ext_hw_attr()
322 return x86_pmu_extra_regs(val, event); in set_ext_hw_attr()
386 int x86_setup_perfctr(struct perf_event *event) in x86_setup_perfctr() argument
388 struct perf_event_attr *attr = &event->attr; in x86_setup_perfctr()
389 struct hw_perf_event *hwc = &event->hw; in x86_setup_perfctr()
392 if (!is_sampling_event(event)) { in x86_setup_perfctr()
399 return x86_pmu_extra_regs(event->attr.config, event); in x86_setup_perfctr()
402 return set_ext_hw_attr(hwc, event); in x86_setup_perfctr()
435 event->destroy = hw_perf_lbr_event_destroy; in x86_setup_perfctr()
449 static inline int precise_br_compat(struct perf_event *event) in precise_br_compat() argument
451 u64 m = event->attr.branch_sample_type; in precise_br_compat()
460 if (!event->attr.exclude_user) in precise_br_compat()
463 if (!event->attr.exclude_kernel) in precise_br_compat()
473 int x86_pmu_hw_config(struct perf_event *event) in x86_pmu_hw_config() argument
475 if (event->attr.precise_ip) { in x86_pmu_hw_config()
487 if (event->attr.precise_ip > precise) in x86_pmu_hw_config()
494 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
495 u64 *br_type = &event->attr.branch_sample_type; in x86_pmu_hw_config()
497 if (has_branch_stack(event)) { in x86_pmu_hw_config()
498 if (!precise_br_compat(event)) in x86_pmu_hw_config()
513 if (!event->attr.exclude_user) in x86_pmu_hw_config()
516 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
521 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) in x86_pmu_hw_config()
522 event->attach_state |= PERF_ATTACH_TASK_DATA; in x86_pmu_hw_config()
528 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; in x86_pmu_hw_config()
533 if (!event->attr.exclude_user) in x86_pmu_hw_config()
534 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; in x86_pmu_hw_config()
535 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
536 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; in x86_pmu_hw_config()
538 if (event->attr.type == PERF_TYPE_RAW) in x86_pmu_hw_config()
539 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; in x86_pmu_hw_config()
541 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
542 if (x86_pmu.limit_period(event, event->attr.sample_period) > in x86_pmu_hw_config()
543 event->attr.sample_period) in x86_pmu_hw_config()
547 return x86_setup_perfctr(event); in x86_pmu_hw_config()
553 static int __x86_pmu_event_init(struct perf_event *event) in __x86_pmu_event_init() argument
565 event->destroy = hw_perf_event_destroy; in __x86_pmu_event_init()
567 event->hw.idx = -1; in __x86_pmu_event_init()
568 event->hw.last_cpu = -1; in __x86_pmu_event_init()
569 event->hw.last_tag = ~0ULL; in __x86_pmu_event_init()
572 event->hw.extra_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
573 event->hw.branch_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
575 return x86_pmu.hw_config(event); in __x86_pmu_event_init()
643 static inline int is_x86_event(struct perf_event *event) in is_x86_event() argument
645 return event->pmu == &pmu; in is_x86_event()
657 int event; /* event index */ member
696 sched->state.event = idx; /* start with min weight */ in perf_sched_init()
736 if (sched->state.event >= sched->max_events) in __perf_sched_find_counter()
739 c = sched->constraints[sched->state.event]; in __perf_sched_find_counter()
794 sched->state.event++; in perf_sched_next_event()
795 if (sched->state.event >= sched->max_events) { in perf_sched_next_event()
797 sched->state.event = 0; in perf_sched_next_event()
802 c = sched->constraints[sched->state.event]; in perf_sched_next_event()
824 assign[sched.state.event] = sched.state.counter; in perf_assign_events()
946 struct perf_event *event; in collect_events() local
963 list_for_each_entry(event, &leader->sibling_list, group_entry) { in collect_events()
964 if (!is_x86_event(event) || in collect_events()
965 event->state <= PERF_EVENT_STATE_OFF) in collect_events()
971 cpuc->event_list[n] = event; in collect_events()
977 static inline void x86_assign_hw_event(struct perf_event *event, in x86_assign_hw_event() argument
980 struct hw_perf_event *hwc = &event->hw; in x86_assign_hw_event()
1009 static void x86_pmu_start(struct perf_event *event, int flags);
1014 struct perf_event *event; in x86_pmu_enable() local
1033 event = cpuc->event_list[i]; in x86_pmu_enable()
1034 hwc = &event->hw; in x86_pmu_enable()
1053 x86_pmu_stop(event, PERF_EF_UPDATE); in x86_pmu_enable()
1060 event = cpuc->event_list[i]; in x86_pmu_enable()
1061 hwc = &event->hw; in x86_pmu_enable()
1064 x86_assign_hw_event(event, cpuc, i); in x86_pmu_enable()
1071 x86_pmu_start(event, PERF_EF_RELOAD); in x86_pmu_enable()
1089 int x86_perf_event_set_period(struct perf_event *event) in x86_perf_event_set_period() argument
1091 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_set_period()
1125 left = x86_pmu.limit_period(event, left); in x86_perf_event_set_period()
1150 perf_event_update_userpage(event); in x86_perf_event_set_period()
1155 void x86_pmu_enable_event(struct perf_event *event) in x86_pmu_enable_event() argument
1158 __x86_pmu_enable_event(&event->hw, in x86_pmu_enable_event()
1168 static int x86_pmu_add(struct perf_event *event, int flags) in x86_pmu_add() argument
1175 hwc = &event->hw; in x86_pmu_add()
1178 ret = n = collect_events(cpuc, event, false); in x86_pmu_add()
1217 static void x86_pmu_start(struct perf_event *event, int flags) in x86_pmu_start() argument
1220 int idx = event->hw.idx; in x86_pmu_start()
1222 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) in x86_pmu_start()
1229 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in x86_pmu_start()
1230 x86_perf_event_set_period(event); in x86_pmu_start()
1233 event->hw.state = 0; in x86_pmu_start()
1235 cpuc->events[idx] = event; in x86_pmu_start()
1238 x86_pmu.enable(event); in x86_pmu_start()
1239 perf_event_update_userpage(event); in x86_pmu_start()
1302 void x86_pmu_stop(struct perf_event *event, int flags) in x86_pmu_stop() argument
1305 struct hw_perf_event *hwc = &event->hw; in x86_pmu_stop()
1308 x86_pmu.disable(event); in x86_pmu_stop()
1319 x86_perf_event_update(event); in x86_pmu_stop()
1324 static void x86_pmu_del(struct perf_event *event, int flags) in x86_pmu_del() argument
1332 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED; in x86_pmu_del()
1348 x86_pmu_stop(event, PERF_EF_UPDATE); in x86_pmu_del()
1351 if (event == cpuc->event_list[i]) in x86_pmu_del()
1363 x86_pmu.put_event_constraints(cpuc, event); in x86_pmu_del()
1372 perf_event_update_userpage(event); in x86_pmu_del()
1379 struct perf_event *event; in x86_pmu_handle_irq() local
1407 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1409 val = x86_perf_event_update(event); in x86_pmu_handle_irq()
1417 perf_sample_data_init(&data, 0, event->hw.last_period); in x86_pmu_handle_irq()
1419 if (!x86_perf_event_set_period(event)) in x86_pmu_handle_irq()
1422 if (perf_event_overflow(event, &data, regs)) in x86_pmu_handle_irq()
1423 x86_pmu_stop(event, 0); in x86_pmu_handle_irq()
1638 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) in x86_event_sysfs_show() argument
1652 ret = sprintf(page, "event=0x%02llx", event); in x86_event_sysfs_show()
1755 static inline void x86_pmu_read(struct perf_event *event) in x86_pmu_read() argument
1757 x86_perf_event_update(event); in x86_pmu_read()
1887 static int validate_event(struct perf_event *event) in validate_event() argument
1897 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event); in validate_event()
1903 x86_pmu.put_event_constraints(fake_cpuc, event); in validate_event()
1921 static int validate_group(struct perf_event *event) in validate_group() argument
1923 struct perf_event *leader = event->group_leader; in validate_group()
1941 n = collect_events(fake_cpuc, event, false); in validate_group()
1954 static int x86_pmu_event_init(struct perf_event *event) in x86_pmu_event_init() argument
1959 switch (event->attr.type) { in x86_pmu_event_init()
1969 err = __x86_pmu_event_init(event); in x86_pmu_event_init()
1976 tmp = event->pmu; in x86_pmu_event_init()
1977 event->pmu = &pmu; in x86_pmu_event_init()
1979 if (event->group_leader != event) in x86_pmu_event_init()
1980 err = validate_group(event); in x86_pmu_event_init()
1982 err = validate_event(event); in x86_pmu_event_init()
1984 event->pmu = tmp; in x86_pmu_event_init()
1987 if (event->destroy) in x86_pmu_event_init()
1988 event->destroy(event); in x86_pmu_event_init()
1992 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; in x86_pmu_event_init()
2003 static void x86_pmu_event_mapped(struct perf_event *event) in x86_pmu_event_mapped() argument
2005 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) in x86_pmu_event_mapped()
2012 static void x86_pmu_event_unmapped(struct perf_event *event) in x86_pmu_event_unmapped() argument
2017 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) in x86_pmu_event_unmapped()
2024 static int x86_pmu_event_idx(struct perf_event *event) in x86_pmu_event_idx() argument
2026 int idx = event->hw.idx; in x86_pmu_event_idx()
2028 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) in x86_pmu_event_idx()
2138 void arch_perf_update_userpage(struct perf_event *event, in arch_perf_update_userpage() argument
2146 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); in arch_perf_update_userpage()
2167 if (event->clock == &local_clock) { in arch_perf_update_userpage()