Lines Matching refs:event

36 	struct perf_event *event[MAX_HWEVENTS];  member
116 static bool is_ebb_event(struct perf_event *event) { return false; } in is_ebb_event() argument
117 static int ebb_event_check(struct perf_event *event) { return 0; } in ebb_event_check() argument
118 static void ebb_event_add(struct perf_event *event) { } in ebb_event_add() argument
125 static inline void power_pmu_bhrb_enable(struct perf_event *event) {} in power_pmu_bhrb_enable() argument
126 static inline void power_pmu_bhrb_disable(struct perf_event *event) {} in power_pmu_bhrb_disable() argument
349 static void power_pmu_bhrb_enable(struct perf_event *event) in power_pmu_bhrb_enable() argument
357 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { in power_pmu_bhrb_enable()
359 cpuhw->bhrb_context = event->ctx; in power_pmu_bhrb_enable()
362 perf_sched_cb_inc(event->ctx->pmu); in power_pmu_bhrb_enable()
365 static void power_pmu_bhrb_disable(struct perf_event *event) in power_pmu_bhrb_disable() argument
374 perf_sched_cb_dec(event->ctx->pmu); in power_pmu_bhrb_disable()
500 static bool is_ebb_event(struct perf_event *event) in is_ebb_event() argument
508 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); in is_ebb_event()
511 static int ebb_event_check(struct perf_event *event) in ebb_event_check() argument
513 struct perf_event *leader = event->group_leader; in ebb_event_check()
516 if (is_ebb_event(leader) != is_ebb_event(event)) in ebb_event_check()
519 if (is_ebb_event(event)) { in ebb_event_check()
520 if (!(event->attach_state & PERF_ATTACH_TASK)) in ebb_event_check()
526 if (event->attr.freq || in ebb_event_check()
527 event->attr.inherit || in ebb_event_check()
528 event->attr.sample_type || in ebb_event_check()
529 event->attr.sample_period || in ebb_event_check()
530 event->attr.enable_on_exec) in ebb_event_check()
537 static void ebb_event_add(struct perf_event *event) in ebb_event_add() argument
539 if (!is_ebb_event(event) || current->thread.used_ebb) in ebb_event_add()
945 struct perf_event *event; in check_excludes() local
965 event = ctrs[i]; in check_excludes()
967 eu = event->attr.exclude_user; in check_excludes()
968 ek = event->attr.exclude_kernel; in check_excludes()
969 eh = event->attr.exclude_hv; in check_excludes()
971 } else if (event->attr.exclude_user != eu || in check_excludes()
972 event->attr.exclude_kernel != ek || in check_excludes()
973 event->attr.exclude_hv != eh) { in check_excludes()
1005 static void power_pmu_read(struct perf_event *event) in power_pmu_read() argument
1009 if (event->hw.state & PERF_HES_STOPPED) in power_pmu_read()
1012 if (!event->hw.idx) in power_pmu_read()
1015 if (is_ebb_event(event)) { in power_pmu_read()
1016 val = read_pmc(event->hw.idx); in power_pmu_read()
1017 local64_set(&event->hw.prev_count, val); in power_pmu_read()
1027 prev = local64_read(&event->hw.prev_count); in power_pmu_read()
1029 val = read_pmc(event->hw.idx); in power_pmu_read()
1033 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); in power_pmu_read()
1035 local64_add(delta, &event->count); in power_pmu_read()
1047 prev = local64_read(&event->hw.period_left); in power_pmu_read()
1051 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); in power_pmu_read()
1068 struct perf_event *event; in freeze_limited_counters() local
1073 event = cpuhw->limited_counter[i]; in freeze_limited_counters()
1074 if (!event->hw.idx) in freeze_limited_counters()
1076 val = (event->hw.idx == 5) ? pmc5 : pmc6; in freeze_limited_counters()
1077 prev = local64_read(&event->hw.prev_count); in freeze_limited_counters()
1078 event->hw.idx = 0; in freeze_limited_counters()
1081 local64_add(delta, &event->count); in freeze_limited_counters()
1088 struct perf_event *event; in thaw_limited_counters() local
1093 event = cpuhw->limited_counter[i]; in thaw_limited_counters()
1094 event->hw.idx = cpuhw->limited_hwidx[i]; in thaw_limited_counters()
1095 val = (event->hw.idx == 5) ? pmc5 : pmc6; in thaw_limited_counters()
1096 prev = local64_read(&event->hw.prev_count); in thaw_limited_counters()
1098 local64_set(&event->hw.prev_count, val); in thaw_limited_counters()
1099 perf_event_update_userpage(event); in thaw_limited_counters()
1213 struct perf_event *event; in power_pmu_enable() local
1244 ebb = is_ebb_event(cpuhw->event[0]); in power_pmu_enable()
1264 cpuhw->mmcr, cpuhw->event)) { in power_pmu_enable()
1276 event = cpuhw->event[0]; in power_pmu_enable()
1277 if (event->attr.exclude_user) in power_pmu_enable()
1279 if (event->attr.exclude_kernel) in power_pmu_enable()
1281 if (event->attr.exclude_hv) in power_pmu_enable()
1303 event = cpuhw->event[i]; in power_pmu_enable()
1304 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { in power_pmu_enable()
1305 power_pmu_read(event); in power_pmu_enable()
1306 write_pmc(event->hw.idx, 0); in power_pmu_enable()
1307 event->hw.idx = 0; in power_pmu_enable()
1316 event = cpuhw->event[i]; in power_pmu_enable()
1317 if (event->hw.idx) in power_pmu_enable()
1321 cpuhw->limited_counter[n_lim] = event; in power_pmu_enable()
1328 val = local64_read(&event->hw.prev_count); in power_pmu_enable()
1331 if (event->hw.sample_period) { in power_pmu_enable()
1332 left = local64_read(&event->hw.period_left); in power_pmu_enable()
1336 local64_set(&event->hw.prev_count, val); in power_pmu_enable()
1339 event->hw.idx = idx; in power_pmu_enable()
1340 if (event->hw.state & PERF_HES_STOPPED) in power_pmu_enable()
1344 perf_event_update_userpage(event); in power_pmu_enable()
1378 struct perf_event *event; in collect_events() local
1387 list_for_each_entry(event, &group->sibling_list, group_entry) { in collect_events()
1388 if (!is_software_event(event) && in collect_events()
1389 event->state != PERF_EVENT_STATE_OFF) { in collect_events()
1392 ctrs[n] = event; in collect_events()
1393 flags[n] = event->hw.event_base; in collect_events()
1394 events[n++] = event->hw.config; in collect_events()
1406 static int power_pmu_add(struct perf_event *event, int ef_flags) in power_pmu_add() argument
1414 perf_pmu_disable(event->pmu); in power_pmu_add()
1424 cpuhw->event[n0] = event; in power_pmu_add()
1425 cpuhw->events[n0] = event->hw.config; in power_pmu_add()
1426 cpuhw->flags[n0] = event->hw.event_base; in power_pmu_add()
1435 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in power_pmu_add()
1437 event->hw.state = 0; in power_pmu_add()
1447 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) in power_pmu_add()
1451 event->hw.config = cpuhw->events[n0]; in power_pmu_add()
1454 ebb_event_add(event); in power_pmu_add()
1461 if (has_branch_stack(event)) { in power_pmu_add()
1462 power_pmu_bhrb_enable(event); in power_pmu_add()
1464 event->attr.branch_sample_type); in power_pmu_add()
1467 perf_pmu_enable(event->pmu); in power_pmu_add()
1475 static void power_pmu_del(struct perf_event *event, int ef_flags) in power_pmu_del() argument
1482 perf_pmu_disable(event->pmu); in power_pmu_del()
1484 power_pmu_read(event); in power_pmu_del()
1488 if (event == cpuhw->event[i]) { in power_pmu_del()
1490 cpuhw->event[i-1] = cpuhw->event[i]; in power_pmu_del()
1495 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); in power_pmu_del()
1496 if (event->hw.idx) { in power_pmu_del()
1497 write_pmc(event->hw.idx, 0); in power_pmu_del()
1498 event->hw.idx = 0; in power_pmu_del()
1500 perf_event_update_userpage(event); in power_pmu_del()
1505 if (event == cpuhw->limited_counter[i]) in power_pmu_del()
1519 if (has_branch_stack(event)) in power_pmu_del()
1520 power_pmu_bhrb_disable(event); in power_pmu_del()
1522 perf_pmu_enable(event->pmu); in power_pmu_del()
1531 static void power_pmu_start(struct perf_event *event, int ef_flags) in power_pmu_start() argument
1537 if (!event->hw.idx || !event->hw.sample_period) in power_pmu_start()
1540 if (!(event->hw.state & PERF_HES_STOPPED)) in power_pmu_start()
1544 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in power_pmu_start()
1547 perf_pmu_disable(event->pmu); in power_pmu_start()
1549 event->hw.state = 0; in power_pmu_start()
1550 left = local64_read(&event->hw.period_left); in power_pmu_start()
1556 write_pmc(event->hw.idx, val); in power_pmu_start()
1558 perf_event_update_userpage(event); in power_pmu_start()
1559 perf_pmu_enable(event->pmu); in power_pmu_start()
1563 static void power_pmu_stop(struct perf_event *event, int ef_flags) in power_pmu_stop() argument
1567 if (!event->hw.idx || !event->hw.sample_period) in power_pmu_stop()
1570 if (event->hw.state & PERF_HES_STOPPED) in power_pmu_stop()
1574 perf_pmu_disable(event->pmu); in power_pmu_stop()
1576 power_pmu_read(event); in power_pmu_stop()
1577 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in power_pmu_stop()
1578 write_pmc(event->hw.idx, 0); in power_pmu_stop()
1580 perf_event_update_userpage(event); in power_pmu_stop()
1581 perf_pmu_enable(event->pmu); in power_pmu_stop()
1650 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) in power_pmu_commit_txn()
1657 cpuhw->event[i]->hw.config = cpuhw->events[i]; in power_pmu_commit_txn()
1671 static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, in can_go_on_limited_pmc() argument
1677 if (event->attr.exclude_user in can_go_on_limited_pmc()
1678 || event->attr.exclude_kernel in can_go_on_limited_pmc()
1679 || event->attr.exclude_hv in can_go_on_limited_pmc()
1680 || event->attr.sample_period) in can_go_on_limited_pmc()
1724 static void hw_perf_event_destroy(struct perf_event *event) in hw_perf_event_destroy() argument
1764 static int power_pmu_event_init(struct perf_event *event) in power_pmu_event_init() argument
1778 if (has_branch_stack(event)) { in power_pmu_event_init()
1784 switch (event->attr.type) { in power_pmu_event_init()
1786 ev = event->attr.config; in power_pmu_event_init()
1792 err = hw_perf_cache_event(event->attr.config, &ev); in power_pmu_event_init()
1797 ev = event->attr.config; in power_pmu_event_init()
1803 event->hw.config_base = ev; in power_pmu_event_init()
1804 event->hw.idx = 0; in power_pmu_event_init()
1812 event->attr.exclude_hv = 0; in power_pmu_event_init()
1821 if (event->attach_state & PERF_ATTACH_TASK) in power_pmu_event_init()
1829 if (can_go_on_limited_pmc(event, ev, flags)) { in power_pmu_event_init()
1844 err = ebb_event_check(event); in power_pmu_event_init()
1854 if (event->group_leader != event) { in power_pmu_event_init()
1855 n = collect_events(event->group_leader, ppmu->n_counter - 1, in power_pmu_event_init()
1861 ctrs[n] = event; in power_pmu_event_init()
1869 if (has_branch_stack(event)) { in power_pmu_event_init()
1871 event->attr.branch_sample_type); in power_pmu_event_init()
1883 event->hw.config = events[n]; in power_pmu_event_init()
1884 event->hw.event_base = cflags[n]; in power_pmu_event_init()
1885 event->hw.last_period = event->hw.sample_period; in power_pmu_event_init()
1886 local64_set(&event->hw.period_left, event->hw.last_period); in power_pmu_event_init()
1892 if (is_ebb_event(event)) in power_pmu_event_init()
1893 local64_set(&event->hw.prev_count, 0); in power_pmu_event_init()
1911 event->destroy = hw_perf_event_destroy; in power_pmu_event_init()
1916 static int power_pmu_event_idx(struct perf_event *event) in power_pmu_event_idx() argument
1918 return event->hw.idx; in power_pmu_event_idx()
1952 static void record_and_restart(struct perf_event *event, unsigned long val, in record_and_restart() argument
1955 u64 period = event->hw.sample_period; in record_and_restart()
1959 if (event->hw.state & PERF_HES_STOPPED) { in record_and_restart()
1960 write_pmc(event->hw.idx, 0); in record_and_restart()
1965 prev = local64_read(&event->hw.prev_count); in record_and_restart()
1967 local64_add(delta, &event->count); in record_and_restart()
1974 left = local64_read(&event->hw.period_left) - delta; in record_and_restart()
1983 event->hw.last_period = event->hw.sample_period; in record_and_restart()
1989 write_pmc(event->hw.idx, val); in record_and_restart()
1990 local64_set(&event->hw.prev_count, val); in record_and_restart()
1991 local64_set(&event->hw.period_left, left); in record_and_restart()
1992 perf_event_update_userpage(event); in record_and_restart()
2000 perf_sample_data_init(&data, ~0ULL, event->hw.last_period); in record_and_restart()
2002 if (event->attr.sample_type & PERF_SAMPLE_ADDR) in record_and_restart()
2005 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { in record_and_restart()
2012 if (perf_event_overflow(event, &data, regs)) in record_and_restart()
2013 power_pmu_stop(event, 0); in record_and_restart()
2081 struct perf_event *event; in perf_event_interrupt() local
2117 event = cpuhw->event[j]; in perf_event_interrupt()
2118 if (event->hw.idx == (i + 1)) { in perf_event_interrupt()
2120 record_and_restart(event, val[i], regs); in perf_event_interrupt()
2131 event = cpuhw->event[i]; in perf_event_interrupt()
2132 if (!event->hw.idx || is_limited_pmc(event->hw.idx)) in perf_event_interrupt()
2134 if (pmc_overflow_power7(val[event->hw.idx - 1])) { in perf_event_interrupt()
2137 record_and_restart(event, in perf_event_interrupt()
2138 val[event->hw.idx - 1], in perf_event_interrupt()