Lines Matching refs:event

131 static bool is_kernel_event(struct perf_event *event)  in is_kernel_event()  argument
133 return event->owner == EVENT_OWNER_KERNEL; in is_kernel_event()
321 static u64 perf_event_time(struct perf_event *event);
335 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
337 return event->clock(); in perf_event_clock()
365 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
367 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match()
371 if (!event->cgrp) in perf_cgroup_match()
385 event->cgrp->css.cgroup); in perf_cgroup_match()
388 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
390 css_put(&event->cgrp->css); in perf_detach_cgroup()
391 event->cgrp = NULL; in perf_detach_cgroup()
394 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
396 return event->cgrp != NULL; in is_cgroup_event()
399 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
403 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
427 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
435 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
438 cgrp = perf_cgroup_from_task(current, event->ctx); in update_cgrp_time_from_event()
442 if (cgrp == event->cgrp) in update_cgrp_time_from_event()
443 __update_cgrp_time(event->cgrp); in update_cgrp_time_from_event()
598 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
618 event->cgrp = cgrp; in perf_cgroup_connect()
626 perf_detach_cgroup(event); in perf_cgroup_connect()
635 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
638 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time()
639 event->shadow_ctx_time = now - t->timestamp; in perf_cgroup_set_shadow_time()
643 perf_cgroup_defer_enabled(struct perf_event *event) in perf_cgroup_defer_enabled() argument
651 if (is_cgroup_event(event) && !perf_cgroup_match(event)) in perf_cgroup_defer_enabled()
652 event->cgrp_defer_enabled = 1; in perf_cgroup_defer_enabled()
656 perf_cgroup_mark_enabled(struct perf_event *event, in perf_cgroup_mark_enabled() argument
660 u64 tstamp = perf_event_time(event); in perf_cgroup_mark_enabled()
662 if (!event->cgrp_defer_enabled) in perf_cgroup_mark_enabled()
665 event->cgrp_defer_enabled = 0; in perf_cgroup_mark_enabled()
667 event->tstamp_enabled = tstamp - event->total_time_enabled; in perf_cgroup_mark_enabled()
668 list_for_each_entry(sub, &event->sibling_list, group_entry) { in perf_cgroup_mark_enabled()
678 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
683 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
686 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
691 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) in perf_cgroup_event_cgrp_time() argument
696 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
714 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
733 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
737 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
743 perf_cgroup_defer_enabled(struct perf_event *event) in perf_cgroup_defer_enabled() argument
748 perf_cgroup_mark_enabled(struct perf_event *event, in perf_cgroup_mark_enabled() argument
958 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
964 ctx = ACCESS_ONCE(event->ctx); in perf_event_ctx_lock_nested()
972 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
982 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
984 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
987 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1013 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1018 if (event->parent) in perf_event_pid()
1019 event = event->parent; in perf_event_pid()
1021 return task_tgid_nr_ns(p, event->ns); in perf_event_pid()
1024 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1029 if (event->parent) in perf_event_tid()
1030 event = event->parent; in perf_event_tid()
1032 return task_pid_nr_ns(p, event->ns); in perf_event_tid()
1039 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1041 u64 id = event->id; in primary_event_id()
1043 if (event->parent) in primary_event_id()
1044 id = event->parent->id; in primary_event_id()
1141 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1143 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1145 if (is_cgroup_event(event)) in perf_event_time()
1146 return perf_cgroup_event_time(event); in perf_event_time()
1155 static void update_event_times(struct perf_event *event) in update_event_times() argument
1157 struct perf_event_context *ctx = event->ctx; in update_event_times()
1160 if (event->state < PERF_EVENT_STATE_INACTIVE || in update_event_times()
1161 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) in update_event_times()
1173 if (is_cgroup_event(event)) in update_event_times()
1174 run_end = perf_cgroup_event_time(event); in update_event_times()
1178 run_end = event->tstamp_stopped; in update_event_times()
1180 event->total_time_enabled = run_end - event->tstamp_enabled; in update_event_times()
1182 if (event->state == PERF_EVENT_STATE_INACTIVE) in update_event_times()
1183 run_end = event->tstamp_stopped; in update_event_times()
1185 run_end = perf_event_time(event); in update_event_times()
1187 event->total_time_running = run_end - event->tstamp_running; in update_event_times()
1196 struct perf_event *event; in update_group_times() local
1199 list_for_each_entry(event, &leader->sibling_list, group_entry) in update_group_times()
1200 update_event_times(event); in update_group_times()
1204 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) in ctx_group_list() argument
1206 if (event->attr.pinned) in ctx_group_list()
1217 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1219 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1220 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1227 if (event->group_leader == event) { in list_add_event()
1230 if (is_software_event(event)) in list_add_event()
1231 event->group_flags |= PERF_GROUP_SOFTWARE; in list_add_event()
1233 list = ctx_group_list(event, ctx); in list_add_event()
1234 list_add_tail(&event->group_entry, list); in list_add_event()
1237 if (is_cgroup_event(event)) in list_add_event()
1240 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1242 if (event->attr.inherit_stat) in list_add_event()
1251 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1253 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1257 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) in __perf_event_read_size() argument
1263 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in __perf_event_read_size()
1266 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in __perf_event_read_size()
1269 if (event->attr.read_format & PERF_FORMAT_ID) in __perf_event_read_size()
1272 if (event->attr.read_format & PERF_FORMAT_GROUP) { in __perf_event_read_size()
1278 event->read_size = size; in __perf_event_read_size()
1281 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1299 size += event->read_size; in __perf_event_header_size()
1307 event->header_size = size; in __perf_event_header_size()
1314 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1316 __perf_event_read_size(event, in perf_event__header_size()
1317 event->group_leader->nr_siblings); in perf_event__header_size()
1318 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1321 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1324 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1345 event->id_header_size = size; in perf_event__id_header_size()
1348 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1354 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); in perf_event_validate_size()
1355 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); in perf_event_validate_size()
1356 perf_event__id_header_size(event); in perf_event_validate_size()
1362 if (event->read_size + event->header_size + in perf_event_validate_size()
1363 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) in perf_event_validate_size()
1369 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1371 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1376 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1379 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1381 if (group_leader == event) in perf_group_attach()
1384 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1387 !is_software_event(event)) in perf_group_attach()
1390 list_add_tail(&event->group_entry, &group_leader->sibling_list); in perf_group_attach()
1404 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1408 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
1414 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
1417 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
1419 if (is_cgroup_event(event)) { in list_del_event()
1432 if (event->attr.inherit_stat) in list_del_event()
1435 list_del_rcu(&event->event_entry); in list_del_event()
1437 if (event->group_leader == event) in list_del_event()
1438 list_del_init(&event->group_entry); in list_del_event()
1440 update_group_times(event); in list_del_event()
1449 if (event->state > PERF_EVENT_STATE_OFF) in list_del_event()
1450 event->state = PERF_EVENT_STATE_OFF; in list_del_event()
1455 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
1463 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
1466 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
1471 if (event->group_leader != event) { in perf_group_detach()
1472 list_del_init(&event->group_entry); in perf_group_detach()
1473 event->group_leader->nr_siblings--; in perf_group_detach()
1477 if (!list_empty(&event->group_entry)) in perf_group_detach()
1478 list = &event->group_entry; in perf_group_detach()
1485 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { in perf_group_detach()
1491 sibling->group_flags = event->group_flags; in perf_group_detach()
1493 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
1497 perf_event__header_size(event->group_leader); in perf_group_detach()
1499 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) in perf_group_detach()
1506 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
1508 return event && !is_kernel_event(event) && !event->owner; in is_orphaned_event()
1515 static bool is_orphaned_child(struct perf_event *event) in is_orphaned_child() argument
1517 return is_orphaned_event(event->parent); in is_orphaned_child()
1542 static inline int pmu_filter_match(struct perf_event *event) in pmu_filter_match() argument
1544 struct pmu *pmu = event->pmu; in pmu_filter_match()
1545 return pmu->filter_match ? pmu->filter_match(event) : 1; in pmu_filter_match()
1549 event_filter_match(struct perf_event *event) in event_filter_match() argument
1551 return (event->cpu == -1 || event->cpu == smp_processor_id()) in event_filter_match()
1552 && perf_cgroup_match(event) && pmu_filter_match(event); in event_filter_match()
1556 event_sched_out(struct perf_event *event, in event_sched_out() argument
1560 u64 tstamp = perf_event_time(event); in event_sched_out()
1563 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
1572 if (event->state == PERF_EVENT_STATE_INACTIVE in event_sched_out()
1573 && !event_filter_match(event)) { in event_sched_out()
1574 delta = tstamp - event->tstamp_stopped; in event_sched_out()
1575 event->tstamp_running += delta; in event_sched_out()
1576 event->tstamp_stopped = tstamp; in event_sched_out()
1579 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
1582 perf_pmu_disable(event->pmu); in event_sched_out()
1584 event->tstamp_stopped = tstamp; in event_sched_out()
1585 event->pmu->del(event, 0); in event_sched_out()
1586 event->oncpu = -1; in event_sched_out()
1587 event->state = PERF_EVENT_STATE_INACTIVE; in event_sched_out()
1588 if (event->pending_disable) { in event_sched_out()
1589 event->pending_disable = 0; in event_sched_out()
1590 event->state = PERF_EVENT_STATE_OFF; in event_sched_out()
1593 if (!is_software_event(event)) in event_sched_out()
1597 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
1599 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
1602 if (is_orphaned_child(event)) in event_sched_out()
1605 perf_pmu_enable(event->pmu); in event_sched_out()
1613 struct perf_event *event; in group_sched_out() local
1621 list_for_each_entry(event, &group_event->sibling_list, group_entry) in group_sched_out()
1622 event_sched_out(event, cpuctx, ctx); in group_sched_out()
1629 struct perf_event *event; member
1642 struct perf_event *event = re->event; in __perf_remove_from_context() local
1643 struct perf_event_context *ctx = event->ctx; in __perf_remove_from_context()
1647 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
1649 perf_group_detach(event); in __perf_remove_from_context()
1650 list_del_event(event, ctx); in __perf_remove_from_context()
1674 static void perf_remove_from_context(struct perf_event *event, bool detach_group) in perf_remove_from_context() argument
1676 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
1679 .event = event, in perf_remove_from_context()
1692 cpu_function_call(event->cpu, __perf_remove_from_context, &re); in perf_remove_from_context()
1720 perf_group_detach(event); in perf_remove_from_context()
1721 list_del_event(event, ctx); in perf_remove_from_context()
1730 struct perf_event *event = info; in __perf_event_disable() local
1731 struct perf_event_context *ctx = event->ctx; in __perf_event_disable()
1750 if (event->state >= PERF_EVENT_STATE_INACTIVE) { in __perf_event_disable()
1752 update_cgrp_time_from_event(event); in __perf_event_disable()
1753 update_group_times(event); in __perf_event_disable()
1754 if (event == event->group_leader) in __perf_event_disable()
1755 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1757 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1758 event->state = PERF_EVENT_STATE_OFF; in __perf_event_disable()
1779 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
1781 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
1788 cpu_function_call(event->cpu, __perf_event_disable, event); in _perf_event_disable()
1793 if (!task_function_call(task, __perf_event_disable, event)) in _perf_event_disable()
1800 if (event->state == PERF_EVENT_STATE_ACTIVE) { in _perf_event_disable()
1814 if (event->state == PERF_EVENT_STATE_INACTIVE) { in _perf_event_disable()
1815 update_group_times(event); in _perf_event_disable()
1816 event->state = PERF_EVENT_STATE_OFF; in _perf_event_disable()
1825 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
1829 ctx = perf_event_ctx_lock(event); in perf_event_disable()
1830 _perf_event_disable(event); in perf_event_disable()
1831 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
1835 static void perf_set_shadow_time(struct perf_event *event, in perf_set_shadow_time() argument
1864 if (is_cgroup_event(event)) in perf_set_shadow_time()
1865 perf_cgroup_set_shadow_time(event, tstamp); in perf_set_shadow_time()
1867 event->shadow_ctx_time = tstamp - ctx->timestamp; in perf_set_shadow_time()
1872 static void perf_log_throttle(struct perf_event *event, int enable);
1873 static void perf_log_itrace_start(struct perf_event *event);
1876 event_sched_in(struct perf_event *event, in event_sched_in() argument
1880 u64 tstamp = perf_event_time(event); in event_sched_in()
1885 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
1888 event->state = PERF_EVENT_STATE_ACTIVE; in event_sched_in()
1889 event->oncpu = smp_processor_id(); in event_sched_in()
1896 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
1897 perf_log_throttle(event, 1); in event_sched_in()
1898 event->hw.interrupts = 0; in event_sched_in()
1906 perf_pmu_disable(event->pmu); in event_sched_in()
1908 perf_set_shadow_time(event, ctx, tstamp); in event_sched_in()
1910 perf_log_itrace_start(event); in event_sched_in()
1912 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
1913 event->state = PERF_EVENT_STATE_INACTIVE; in event_sched_in()
1914 event->oncpu = -1; in event_sched_in()
1919 event->tstamp_running += tstamp - event->tstamp_stopped; in event_sched_in()
1921 if (!is_software_event(event)) in event_sched_in()
1925 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
1928 if (event->attr.exclusive) in event_sched_in()
1931 if (is_orphaned_child(event)) in event_sched_in()
1935 perf_pmu_enable(event->pmu); in event_sched_in()
1945 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
1964 list_for_each_entry(event, &group_event->sibling_list, group_entry) { in group_sched_in()
1965 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
1966 partial_group = event; in group_sched_in()
1989 list_for_each_entry(event, &group_event->sibling_list, group_entry) { in group_sched_in()
1990 if (event == partial_group) in group_sched_in()
1994 event->tstamp_running += now - event->tstamp_stopped; in group_sched_in()
1995 event->tstamp_stopped = now; in group_sched_in()
1997 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2012 static int group_can_go_on(struct perf_event *event, in group_can_go_on() argument
2019 if (event->group_flags & PERF_GROUP_SOFTWARE) in group_can_go_on()
2031 if (event->attr.exclusive && cpuctx->active_oncpu) in group_can_go_on()
2040 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2043 u64 tstamp = perf_event_time(event); in add_event_to_ctx()
2045 list_add_event(event, ctx); in add_event_to_ctx()
2046 perf_group_attach(event); in add_event_to_ctx()
2047 event->tstamp_enabled = tstamp; in add_event_to_ctx()
2048 event->tstamp_running = tstamp; in add_event_to_ctx()
2049 event->tstamp_stopped = tstamp; in add_event_to_ctx()
2078 struct perf_event *event = info; in __perf_install_in_context() local
2079 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2117 update_cgrp_time_from_event(event); in __perf_install_in_context()
2119 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2144 struct perf_event *event, in perf_install_in_context() argument
2151 event->ctx = ctx; in perf_install_in_context()
2152 if (event->cpu != -1) in perf_install_in_context()
2153 event->cpu = cpu; in perf_install_in_context()
2160 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2165 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2187 add_event_to_ctx(event, ctx); in perf_install_in_context()
2199 static void __perf_event_mark_enabled(struct perf_event *event) in __perf_event_mark_enabled() argument
2202 u64 tstamp = perf_event_time(event); in __perf_event_mark_enabled()
2204 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_event_mark_enabled()
2205 event->tstamp_enabled = tstamp - event->total_time_enabled; in __perf_event_mark_enabled()
2206 list_for_each_entry(sub, &event->sibling_list, group_entry) { in __perf_event_mark_enabled()
2217 struct perf_event *event = info; in __perf_event_enable() local
2218 struct perf_event_context *ctx = event->ctx; in __perf_event_enable()
2219 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2238 if (event->state >= PERF_EVENT_STATE_INACTIVE) in __perf_event_enable()
2246 __perf_event_mark_enabled(event); in __perf_event_enable()
2248 if (!event_filter_match(event)) { in __perf_event_enable()
2249 if (is_cgroup_event(event)) in __perf_event_enable()
2250 perf_cgroup_defer_enabled(event); in __perf_event_enable()
2258 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_enable()
2261 if (!group_can_go_on(event, cpuctx, 1)) { in __perf_event_enable()
2264 if (event == leader) in __perf_event_enable()
2265 err = group_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2267 err = event_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2275 if (leader != event) { in __perf_event_enable()
2300 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
2302 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
2309 cpu_function_call(event->cpu, __perf_event_enable, event); in _perf_event_enable()
2314 if (event->state >= PERF_EVENT_STATE_INACTIVE) in _perf_event_enable()
2324 if (event->state == PERF_EVENT_STATE_ERROR) in _perf_event_enable()
2325 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
2329 __perf_event_mark_enabled(event); in _perf_event_enable()
2335 if (!task_function_call(task, __perf_event_enable, event)) in _perf_event_enable()
2344 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { in _perf_event_enable()
2360 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
2364 ctx = perf_event_ctx_lock(event); in perf_event_enable()
2365 _perf_event_enable(event); in perf_event_enable()
2366 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
2370 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
2375 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
2378 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
2379 _perf_event_enable(event); in _perf_event_refresh()
2387 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
2392 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
2393 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
2394 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
2404 struct perf_event *event; in ctx_sched_out() local
2418 list_for_each_entry(event, &ctx->pinned_groups, group_entry) in ctx_sched_out()
2419 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2423 list_for_each_entry(event, &ctx->flexible_groups, group_entry) in ctx_sched_out()
2424 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2467 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
2472 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
2482 switch (event->state) { in __perf_event_sync_stat()
2484 event->pmu->read(event); in __perf_event_sync_stat()
2488 update_event_times(event); in __perf_event_sync_stat()
2500 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
2503 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
2504 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
2509 perf_event_update_userpage(event); in __perf_event_sync_stat()
2516 struct perf_event *event, *next_event; in perf_event_sync_stat() local
2523 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
2529 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
2532 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
2534 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
2726 struct perf_event *event; in ctx_pinned_sched_in() local
2728 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { in ctx_pinned_sched_in()
2729 if (event->state <= PERF_EVENT_STATE_OFF) in ctx_pinned_sched_in()
2731 if (!event_filter_match(event)) in ctx_pinned_sched_in()
2735 if (is_cgroup_event(event)) in ctx_pinned_sched_in()
2736 perf_cgroup_mark_enabled(event, ctx); in ctx_pinned_sched_in()
2738 if (group_can_go_on(event, cpuctx, 1)) in ctx_pinned_sched_in()
2739 group_sched_in(event, cpuctx, ctx); in ctx_pinned_sched_in()
2745 if (event->state == PERF_EVENT_STATE_INACTIVE) { in ctx_pinned_sched_in()
2746 update_group_times(event); in ctx_pinned_sched_in()
2747 event->state = PERF_EVENT_STATE_ERROR; in ctx_pinned_sched_in()
2756 struct perf_event *event; in ctx_flexible_sched_in() local
2759 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { in ctx_flexible_sched_in()
2761 if (event->state <= PERF_EVENT_STATE_OFF) in ctx_flexible_sched_in()
2767 if (!event_filter_match(event)) in ctx_flexible_sched_in()
2771 if (is_cgroup_event(event)) in ctx_flexible_sched_in()
2772 perf_cgroup_mark_enabled(event, ctx); in ctx_flexible_sched_in()
2774 if (group_can_go_on(event, cpuctx, can_add_hw)) { in ctx_flexible_sched_in()
2775 if (group_sched_in(event, cpuctx, ctx)) in ctx_flexible_sched_in()
2884 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
2886 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
2960 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
2962 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
2966 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
2980 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
2985 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
2997 struct perf_event *event; in perf_adjust_freq_unthr_context() local
3013 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
3014 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
3017 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
3020 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
3022 hwc = &event->hw; in perf_adjust_freq_unthr_context()
3026 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
3027 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
3030 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
3036 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
3038 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
3050 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
3052 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
3054 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
3139 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
3142 if (!event->attr.enable_on_exec) in event_enable_on_exec()
3145 event->attr.enable_on_exec = 0; in event_enable_on_exec()
3146 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
3149 __perf_event_mark_enabled(event); in event_enable_on_exec()
3161 struct perf_event *event; in perf_event_enable_on_exec() local
3183 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
3184 ret = event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
3219 struct perf_event *event; member
3230 struct perf_event *sub, *event = data->event; in __perf_event_read() local
3231 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
3233 struct pmu *pmu = event->pmu; in __perf_event_read()
3248 update_cgrp_time_from_event(event); in __perf_event_read()
3251 update_event_times(event); in __perf_event_read()
3252 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
3256 pmu->read(event); in __perf_event_read()
3263 pmu->read(event); in __perf_event_read()
3265 list_for_each_entry(sub, &event->sibling_list, group_entry) { in __perf_event_read()
3282 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
3284 if (event->pmu->count) in perf_event_count()
3285 return event->pmu->count(event); in perf_event_count()
3287 return __perf_event_count(event); in perf_event_count()
3298 u64 perf_event_read_local(struct perf_event *event) in perf_event_read_local() argument
3310 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
3311 event->hw.target != current); in perf_event_read_local()
3314 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
3315 event->cpu != smp_processor_id()); in perf_event_read_local()
3321 WARN_ON_ONCE(event->attr.inherit); in perf_event_read_local()
3327 WARN_ON_ONCE(event->pmu->count); in perf_event_read_local()
3334 if (event->oncpu == smp_processor_id()) in perf_event_read_local()
3335 event->pmu->read(event); in perf_event_read_local()
3337 val = local64_read(&event->count); in perf_event_read_local()
3343 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
3351 if (event->state == PERF_EVENT_STATE_ACTIVE) { in perf_event_read()
3353 .event = event, in perf_event_read()
3357 smp_call_function_single(event->oncpu, in perf_event_read()
3360 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { in perf_event_read()
3361 struct perf_event_context *ctx = event->ctx; in perf_event_read()
3372 update_cgrp_time_from_event(event); in perf_event_read()
3375 update_group_times(event); in perf_event_read()
3377 update_event_times(event); in perf_event_read()
3443 struct perf_event *event) in find_get_context() argument
3450 int cpu = event->cpu; in find_get_context()
3478 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_context()
3545 static void perf_event_free_filter(struct perf_event *event);
3546 static void perf_event_free_bpf_prog(struct perf_event *event);
3550 struct perf_event *event; in free_event_rcu() local
3552 event = container_of(head, struct perf_event, rcu_head); in free_event_rcu()
3553 if (event->ns) in free_event_rcu()
3554 put_pid_ns(event->ns); in free_event_rcu()
3555 perf_event_free_filter(event); in free_event_rcu()
3556 kfree(event); in free_event_rcu()
3559 static void ring_buffer_attach(struct perf_event *event,
3562 static void unaccount_event_cpu(struct perf_event *event, int cpu) in unaccount_event_cpu() argument
3564 if (event->parent) in unaccount_event_cpu()
3567 if (is_cgroup_event(event)) in unaccount_event_cpu()
3571 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
3573 if (event->parent) in unaccount_event()
3576 if (event->attach_state & PERF_ATTACH_TASK) in unaccount_event()
3578 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
3580 if (event->attr.comm) in unaccount_event()
3582 if (event->attr.task) in unaccount_event()
3584 if (event->attr.freq) in unaccount_event()
3586 if (event->attr.context_switch) { in unaccount_event()
3590 if (is_cgroup_event(event)) in unaccount_event()
3592 if (has_branch_stack(event)) in unaccount_event()
3595 unaccount_event_cpu(event, event->cpu); in unaccount_event()
3610 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
3612 struct pmu *pmu = event->pmu; in exclusive_event_init()
3630 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
3641 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
3643 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
3649 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
3666 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
3670 struct pmu *pmu = event->pmu; in exclusive_event_installable()
3676 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
3683 static void __free_event(struct perf_event *event) in __free_event() argument
3685 if (!event->parent) { in __free_event()
3686 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in __free_event()
3690 perf_event_free_bpf_prog(event); in __free_event()
3692 if (event->destroy) in __free_event()
3693 event->destroy(event); in __free_event()
3695 if (event->ctx) in __free_event()
3696 put_ctx(event->ctx); in __free_event()
3698 if (event->pmu) { in __free_event()
3699 exclusive_event_destroy(event); in __free_event()
3700 module_put(event->pmu->module); in __free_event()
3703 call_rcu(&event->rcu_head, free_event_rcu); in __free_event()
3706 static void _free_event(struct perf_event *event) in _free_event() argument
3708 irq_work_sync(&event->pending); in _free_event()
3710 unaccount_event(event); in _free_event()
3712 if (event->rb) { in _free_event()
3719 mutex_lock(&event->mmap_mutex); in _free_event()
3720 ring_buffer_attach(event, NULL); in _free_event()
3721 mutex_unlock(&event->mmap_mutex); in _free_event()
3724 if (is_cgroup_event(event)) in _free_event()
3725 perf_detach_cgroup(event); in _free_event()
3727 __free_event(event); in _free_event()
3734 static void free_event(struct perf_event *event) in free_event() argument
3736 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
3738 atomic_long_read(&event->refcount), event)) { in free_event()
3743 _free_event(event); in free_event()
3749 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
3754 owner = ACCESS_ONCE(event->owner); in perf_remove_from_owner()
3789 if (event->owner) in perf_remove_from_owner()
3790 list_del_init(&event->owner_entry); in perf_remove_from_owner()
3796 static void put_event(struct perf_event *event) in put_event() argument
3800 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
3803 if (!is_kernel_event(event)) in put_event()
3804 perf_remove_from_owner(event); in put_event()
3818 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); in put_event()
3820 perf_remove_from_context(event, true); in put_event()
3821 perf_event_ctx_unlock(event, ctx); in put_event()
3823 _free_event(event); in put_event()
3826 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
3828 put_event(event); in perf_event_release_kernel()
3848 struct perf_event *event, *tmp; in orphans_remove_work() local
3854 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { in orphans_remove_work()
3855 struct perf_event *parent_event = event->parent; in orphans_remove_work()
3857 if (!is_orphaned_child(event)) in orphans_remove_work()
3860 perf_remove_from_context(event, true); in orphans_remove_work()
3863 list_del_init(&event->child_list); in orphans_remove_work()
3866 free_event(event); in orphans_remove_work()
3878 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
3886 mutex_lock(&event->child_mutex); in perf_event_read_value()
3888 (void)perf_event_read(event, false); in perf_event_read_value()
3889 total += perf_event_count(event); in perf_event_read_value()
3891 *enabled += event->total_time_enabled + in perf_event_read_value()
3892 atomic64_read(&event->child_total_time_enabled); in perf_event_read_value()
3893 *running += event->total_time_running + in perf_event_read_value()
3894 atomic64_read(&event->child_total_time_running); in perf_event_read_value()
3896 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_read_value()
3902 mutex_unlock(&event->child_mutex); in perf_event_read_value()
3950 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
3953 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
3960 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
3984 ret = event->read_size; in perf_read_group()
3985 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
3996 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
4003 values[n++] = perf_event_read_value(event, &enabled, &running); in perf_read_one()
4009 values[n++] = primary_event_id(event); in perf_read_one()
4017 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
4021 if (event->state != PERF_EVENT_STATE_EXIT) in is_event_hup()
4024 mutex_lock(&event->child_mutex); in is_event_hup()
4025 no_children = list_empty(&event->child_list); in is_event_hup()
4026 mutex_unlock(&event->child_mutex); in is_event_hup()
4034 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
4036 u64 read_format = event->attr.read_format; in __perf_read()
4044 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
4047 if (count < event->read_size) in __perf_read()
4050 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
4052 ret = perf_read_group(event, read_format, buf); in __perf_read()
4054 ret = perf_read_one(event, read_format, buf); in __perf_read()
4062 struct perf_event *event = file->private_data; in perf_read() local
4066 ctx = perf_event_ctx_lock(event); in perf_read()
4067 ret = __perf_read(event, buf, count); in perf_read()
4068 perf_event_ctx_unlock(event, ctx); in perf_read()
4075 struct perf_event *event = file->private_data; in perf_poll() local
4079 poll_wait(file, &event->waitq, wait); in perf_poll()
4081 if (is_event_hup(event)) in perf_poll()
4088 mutex_lock(&event->mmap_mutex); in perf_poll()
4089 rb = event->rb; in perf_poll()
4092 mutex_unlock(&event->mmap_mutex); in perf_poll()
4096 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
4098 (void)perf_event_read(event, false); in _perf_event_reset()
4099 local64_set(&event->count, 0); in _perf_event_reset()
4100 perf_event_update_userpage(event); in _perf_event_reset()
4109 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
4114 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
4116 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
4117 func(event); in perf_event_for_each_child()
4118 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
4120 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
4123 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
4126 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
4131 event = event->group_leader; in perf_event_for_each()
4133 perf_event_for_each_child(event, func); in perf_event_for_each()
4134 list_for_each_entry(sibling, &event->sibling_list, group_entry) in perf_event_for_each()
4139 struct perf_event *event; member
4146 struct perf_event *event = pe->event; in __perf_event_period() local
4147 struct perf_event_context *ctx = event->ctx; in __perf_event_period()
4152 if (event->attr.freq) { in __perf_event_period()
4153 event->attr.sample_freq = value; in __perf_event_period()
4155 event->attr.sample_period = value; in __perf_event_period()
4156 event->hw.sample_period = value; in __perf_event_period()
4159 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
4162 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
4165 local64_set(&event->hw.period_left, 0); in __perf_event_period()
4168 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
4176 static int perf_event_period(struct perf_event *event, u64 __user *arg) in perf_event_period() argument
4178 struct period_event pe = { .event = event, }; in perf_event_period()
4179 struct perf_event_context *ctx = event->ctx; in perf_event_period()
4183 if (!is_sampling_event(event)) in perf_event_period()
4192 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in perf_event_period()
4199 cpu_function_call(event->cpu, __perf_event_period, &pe); in perf_event_period()
4214 if (event->attr.freq) { in perf_event_period()
4215 event->attr.sample_freq = value; in perf_event_period()
4217 event->attr.sample_period = value; in perf_event_period()
4218 event->hw.sample_period = value; in perf_event_period()
4221 local64_set(&event->hw.period_left, 0); in perf_event_period()
4243 static int perf_event_set_output(struct perf_event *event,
4245 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4246 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4248 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
4265 return _perf_event_refresh(event, arg); in _perf_ioctl()
4268 return perf_event_period(event, (u64 __user *)arg); in _perf_ioctl()
4272 u64 id = primary_event_id(event); in _perf_ioctl()
4289 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
4292 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
4298 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
4301 return perf_event_set_bpf_prog(event, arg); in _perf_ioctl()
4308 perf_event_for_each(event, func); in _perf_ioctl()
4310 perf_event_for_each_child(event, func); in _perf_ioctl()
4317 struct perf_event *event = file->private_data; in perf_ioctl() local
4321 ctx = perf_event_ctx_lock(event); in perf_ioctl()
4322 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
4323 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
4351 struct perf_event *event; in perf_event_task_enable() local
4354 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_enable()
4355 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
4356 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
4357 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
4367 struct perf_event *event; in perf_event_task_disable() local
4370 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_disable()
4371 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
4372 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
4373 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
4380 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
4382 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
4385 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
4388 return event->pmu->event_idx(event); in perf_event_index()
4391 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4399 ctx_time = event->shadow_ctx_time + *now; in calc_timer_values()
4400 *enabled = ctx_time - event->tstamp_enabled; in calc_timer_values()
4401 *running = ctx_time - event->tstamp_running; in calc_timer_values()
4404 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
4410 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
4427 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
4436 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
4443 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
4456 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
4466 userpg->index = perf_event_index(event); in perf_event_update_userpage()
4467 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
4469 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
4472 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
4475 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
4477 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
4488 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_fault() local
4499 rb = rcu_dereference(event->rb); in perf_mmap_fault()
4521 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
4527 if (event->rb) { in ring_buffer_attach()
4532 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
4534 old_rb = event->rb; in ring_buffer_attach()
4536 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
4539 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
4540 event->rcu_pending = 1; in ring_buffer_attach()
4544 if (event->rcu_pending) { in ring_buffer_attach()
4545 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
4546 event->rcu_pending = 0; in ring_buffer_attach()
4550 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
4554 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
4563 wake_up_all(&event->waitq); in ring_buffer_attach()
4567 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
4572 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
4574 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
4575 wake_up_all(&event->waitq); in ring_buffer_wakeup()
4580 struct ring_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
4585 rb = rcu_dereference(event->rb); in ring_buffer_get()
4607 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
4609 atomic_inc(&event->mmap_count); in perf_mmap_open()
4610 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
4613 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
4615 if (event->pmu->event_mapped) in perf_mmap_open()
4616 event->pmu->event_mapped(event); in perf_mmap_open()
4629 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
4631 struct ring_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
4636 if (event->pmu->event_unmapped) in perf_mmap_close()
4637 event->pmu->event_unmapped(event); in perf_mmap_close()
4645 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
4650 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
4655 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
4658 ring_buffer_attach(event, NULL); in perf_mmap_close()
4659 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
4672 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
4673 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
4682 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
4693 if (event->rb == rb) in perf_mmap_close()
4694 ring_buffer_attach(event, NULL); in perf_mmap_close()
4696 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
4697 put_event(event); in perf_mmap_close()
4733 struct perf_event *event = file->private_data; in perf_mmap() local
4748 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
4766 if (!event->rb) in perf_mmap()
4771 mutex_lock(&event->mmap_mutex); in perf_mmap()
4774 rb = event->rb; in perf_mmap()
4826 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
4828 mutex_lock(&event->mmap_mutex); in perf_mmap()
4829 if (event->rb) { in perf_mmap()
4830 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
4835 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
4841 mutex_unlock(&event->mmap_mutex); in perf_mmap()
4873 WARN_ON(!rb && event->rb); in perf_mmap()
4880 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
4881 event->cpu, flags); in perf_mmap()
4892 ring_buffer_attach(event, rb); in perf_mmap()
4894 perf_event_init_userpage(event); in perf_mmap()
4895 perf_event_update_userpage(event); in perf_mmap()
4897 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
4898 event->attr.aux_watermark, flags); in perf_mmap()
4908 atomic_inc(&event->mmap_count); in perf_mmap()
4913 mutex_unlock(&event->mmap_mutex); in perf_mmap()
4922 if (event->pmu->event_mapped) in perf_mmap()
4923 event->pmu->event_mapped(event); in perf_mmap()
4931 struct perf_event *event = filp->private_data; in perf_fasync() local
4935 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
4962 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
4965 if (event->parent) in perf_event_fasync()
4966 event = event->parent; in perf_event_fasync()
4967 return &event->fasync; in perf_event_fasync()
4970 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
4972 ring_buffer_wakeup(event); in perf_event_wakeup()
4974 if (event->pending_kill) { in perf_event_wakeup()
4975 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
4976 event->pending_kill = 0; in perf_event_wakeup()
4982 struct perf_event *event = container_of(entry, in perf_pending_event() local
4992 if (event->pending_disable) { in perf_pending_event()
4993 event->pending_disable = 0; in perf_pending_event()
4994 __perf_event_disable(event); in perf_pending_event()
4997 if (event->pending_wakeup) { in perf_pending_event()
4998 event->pending_wakeup = 0; in perf_pending_event()
4999 perf_event_wakeup(event); in perf_pending_event()
5162 struct perf_event *event) in __perf_event_header__init_id() argument
5164 u64 sample_type = event->attr.sample_type; in __perf_event_header__init_id()
5167 header->size += event->id_header_size; in __perf_event_header__init_id()
5171 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
5172 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
5176 data->time = perf_event_clock(event); in __perf_event_header__init_id()
5179 data->id = primary_event_id(event); in __perf_event_header__init_id()
5182 data->stream_id = event->id; in __perf_event_header__init_id()
5192 struct perf_event *event) in perf_event_header__init_id() argument
5194 if (event->attr.sample_id_all) in perf_event_header__init_id()
5195 __perf_event_header__init_id(header, data, event); in perf_event_header__init_id()
5222 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
5226 if (event->attr.sample_id_all) in perf_event__output_id_sample()
5231 struct perf_event *event, in perf_output_read_one() argument
5234 u64 read_format = event->attr.read_format; in perf_output_read_one()
5238 values[n++] = perf_event_count(event); in perf_output_read_one()
5241 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
5245 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
5248 values[n++] = primary_event_id(event); in perf_output_read_one()
5257 struct perf_event *event, in perf_output_read_group() argument
5260 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
5261 u64 read_format = event->attr.read_format; in perf_output_read_group()
5273 if (leader != event) in perf_output_read_group()
5285 if ((sub != event) && in perf_output_read_group()
5301 struct perf_event *event) in perf_output_read() argument
5304 u64 read_format = event->attr.read_format; in perf_output_read()
5316 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
5318 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
5319 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
5321 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
5327 struct perf_event *event) in perf_output_sample() argument
5361 perf_output_read(handle, event); in perf_output_sample()
5430 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
5461 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
5469 if (!event->attr.watermark) { in perf_output_sample()
5470 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
5486 struct perf_event *event, in perf_prepare_sample() argument
5489 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
5492 header->size = sizeof(*header) + event->header_size; in perf_prepare_sample()
5497 __perf_event_header__init_id(header, data, event); in perf_prepare_sample()
5505 data->callchain = perf_callchain(event, regs); in perf_prepare_sample()
5542 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
5556 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
5581 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
5590 void perf_event_output(struct perf_event *event, in perf_event_output() argument
5600 perf_prepare_sample(&header, data, event, regs); in perf_event_output()
5602 if (perf_output_begin(&handle, event, header.size)) in perf_event_output()
5605 perf_output_sample(&handle, &header, data, event); in perf_event_output()
5625 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
5634 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
5636 .pid = perf_event_pid(event, task), in perf_event_read_event()
5637 .tid = perf_event_tid(event, task), in perf_event_read_event()
5641 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
5642 ret = perf_output_begin(&handle, event, read_event.header.size); in perf_event_read_event()
5647 perf_output_read(&handle, event); in perf_event_read_event()
5648 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
5653 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
5660 struct perf_event *event; in perf_event_aux_ctx() local
5662 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_event_aux_ctx()
5663 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_event_aux_ctx()
5665 if (!event_filter_match(event)) in perf_event_aux_ctx()
5667 output(event, data); in perf_event_aux_ctx()
5741 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
5743 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
5744 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
5745 event->attr.task; in perf_event_task_match()
5748 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
5757 if (!perf_event_task_match(event)) in perf_event_task_output()
5760 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
5762 ret = perf_output_begin(&handle, event, in perf_event_task_output()
5767 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
5768 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
5770 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
5771 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
5773 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
5777 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
5839 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
5841 return event->attr.comm; in perf_event_comm_match()
5844 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
5853 if (!perf_event_comm_match(event)) in perf_event_comm_output()
5856 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
5857 ret = perf_output_begin(&handle, event, in perf_event_comm_output()
5863 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
5864 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
5870 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
5946 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
5953 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
5954 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
5957 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
5966 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
5969 if (event->attr.mmap2) { in perf_event_mmap_output()
5979 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
5980 ret = perf_output_begin(&handle, event, in perf_event_mmap_output()
5985 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
5986 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
5990 if (event->attr.mmap2) { in perf_event_mmap_output()
6002 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
6163 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
6185 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
6186 ret = perf_output_begin(&handle, event, rec.header.size); in perf_event_aux_event()
6192 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
6200 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
6218 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
6220 ret = perf_output_begin(&handle, event, in perf_log_lost_samples()
6226 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
6245 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
6247 return event->attr.context_switch; in perf_event_switch_match()
6250 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
6257 if (!perf_event_switch_match(event)) in perf_event_switch_output()
6261 if (event->ctx->task) { in perf_event_switch_output()
6268 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
6270 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
6273 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
6275 ret = perf_output_begin(&handle, event, se->event_id.header.size); in perf_event_switch_output()
6279 if (event->ctx->task) in perf_event_switch_output()
6284 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
6319 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
6336 .time = perf_event_clock(event), in perf_log_throttle()
6337 .id = primary_event_id(event), in perf_log_throttle()
6338 .stream_id = event->id, in perf_log_throttle()
6344 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
6346 ret = perf_output_begin(&handle, event, in perf_log_throttle()
6352 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
6356 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
6367 if (event->parent) in perf_log_itrace_start()
6368 event = event->parent; in perf_log_itrace_start()
6370 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
6371 event->hw.itrace_started) in perf_log_itrace_start()
6377 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
6378 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
6380 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
6381 ret = perf_output_begin(&handle, event, rec.header.size); in perf_log_itrace_start()
6387 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
6396 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
6400 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
6401 struct hw_perf_event *hwc = &event->hw; in __perf_event_overflow()
6409 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
6422 perf_log_throttle(event, 0); in __perf_event_overflow()
6428 if (event->attr.freq) { in __perf_event_overflow()
6435 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_overflow()
6443 event->pending_kill = POLL_IN; in __perf_event_overflow()
6444 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
6446 event->pending_kill = POLL_HUP; in __perf_event_overflow()
6447 event->pending_disable = 1; in __perf_event_overflow()
6448 irq_work_queue(&event->pending); in __perf_event_overflow()
6451 if (event->overflow_handler) in __perf_event_overflow()
6452 event->overflow_handler(event, data, regs); in __perf_event_overflow()
6454 perf_event_output(event, data, regs); in __perf_event_overflow()
6456 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
6457 event->pending_wakeup = 1; in __perf_event_overflow()
6458 irq_work_queue(&event->pending); in __perf_event_overflow()
6464 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
6468 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
6493 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
6495 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
6516 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
6520 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
6524 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
6530 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
6542 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
6546 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
6548 local64_add(nr, &event->count); in perf_swevent_event()
6553 if (!is_sampling_event(event)) in perf_swevent_event()
6556 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
6558 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
6560 data->period = event->hw.last_period; in perf_swevent_event()
6562 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
6563 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
6568 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
6571 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
6574 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
6578 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
6581 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
6588 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
6594 if (event->attr.type != type) in perf_swevent_match()
6597 if (event->attr.config != event_id) in perf_swevent_match()
6600 if (perf_exclude_event(event, regs)) in perf_swevent_match()
6636 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
6639 u32 event_id = event->attr.config; in find_swevent_head()
6640 u64 type = event->attr.type; in find_swevent_head()
6648 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
6661 struct perf_event *event; in do_perf_sw_event() local
6669 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
6670 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
6671 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
6721 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
6725 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
6728 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
6731 if (is_sampling_event(event)) { in perf_swevent_add()
6733 perf_swevent_set_period(event); in perf_swevent_add()
6738 head = find_swevent_head(swhash, event); in perf_swevent_add()
6742 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
6743 perf_event_update_userpage(event); in perf_swevent_add()
6748 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
6750 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
6753 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
6755 event->hw.state = 0; in perf_swevent_start()
6758 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
6760 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
6782 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) in swevent_hlist_put_cpu() argument
6794 static void swevent_hlist_put(struct perf_event *event) in swevent_hlist_put() argument
6799 swevent_hlist_put_cpu(event, cpu); in swevent_hlist_put()
6802 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) in swevent_hlist_get_cpu() argument
6825 static int swevent_hlist_get(struct perf_event *event) in swevent_hlist_get() argument
6832 err = swevent_hlist_get_cpu(event, cpu); in swevent_hlist_get()
6845 swevent_hlist_put_cpu(event, cpu); in swevent_hlist_get()
6854 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
6856 u64 event_id = event->attr.config; in sw_perf_event_destroy()
6858 WARN_ON(event->parent); in sw_perf_event_destroy()
6861 swevent_hlist_put(event); in sw_perf_event_destroy()
6864 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
6866 u64 event_id = event->attr.config; in perf_swevent_init()
6868 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
6874 if (has_branch_stack(event)) in perf_swevent_init()
6889 if (!event->parent) { in perf_swevent_init()
6892 err = swevent_hlist_get(event); in perf_swevent_init()
6897 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
6918 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
6924 if (event->parent) in perf_tp_filter_match()
6925 event = event->parent; in perf_tp_filter_match()
6927 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
6932 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
6936 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
6941 if (event->attr.exclude_kernel) in perf_tp_event_match()
6944 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
6955 struct perf_event *event; in perf_tp_event() local
6965 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
6966 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
6967 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
6983 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
6984 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event()
6986 if (event->attr.config != entry->type) in perf_tp_event()
6988 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
6989 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
6999 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
7001 perf_trace_destroy(event); in tp_perf_event_destroy()
7004 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
7008 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
7014 if (has_branch_stack(event)) in perf_tp_event_init()
7017 err = perf_trace_init(event); in perf_tp_event_init()
7021 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
7042 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
7047 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_event_set_filter()
7054 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
7060 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
7062 ftrace_profile_free_filter(event); in perf_event_free_filter()
7065 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
7069 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_event_set_bpf_prog()
7072 if (event->tp_event->prog) in perf_event_set_bpf_prog()
7075 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE)) in perf_event_set_bpf_prog()
7089 event->tp_event->prog = prog; in perf_event_set_bpf_prog()
7094 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
7098 if (!event->tp_event) in perf_event_free_bpf_prog()
7101 prog = event->tp_event->prog; in perf_event_free_bpf_prog()
7103 event->tp_event->prog = NULL; in perf_event_free_bpf_prog()
7114 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
7119 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
7123 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
7128 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
7155 struct perf_event *event; in perf_swevent_hrtimer() local
7158 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
7160 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
7163 event->pmu->read(event); in perf_swevent_hrtimer()
7165 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
7168 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
7169 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
7170 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
7174 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
7180 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
7182 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
7185 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
7201 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
7203 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
7205 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
7213 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
7215 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
7217 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
7227 if (event->attr.freq) { in perf_swevent_init_hrtimer()
7228 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
7230 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
7231 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
7234 event->attr.freq = 0; in perf_swevent_init_hrtimer()
7242 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
7248 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
7249 local64_add(now - prev, &event->count); in cpu_clock_event_update()
7252 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
7254 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
7255 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
7258 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
7260 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
7261 cpu_clock_event_update(event); in cpu_clock_event_stop()
7264 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
7267 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
7268 perf_event_update_userpage(event); in cpu_clock_event_add()
7273 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
7275 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
7278 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
7280 cpu_clock_event_update(event); in cpu_clock_event_read()
7283 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
7285 if (event->attr.type != PERF_TYPE_SOFTWARE) in cpu_clock_event_init()
7288 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
7294 if (has_branch_stack(event)) in cpu_clock_event_init()
7297 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
7319 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
7324 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
7326 local64_add(delta, &event->count); in task_clock_event_update()
7329 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
7331 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
7332 perf_swevent_start_hrtimer(event); in task_clock_event_start()
7335 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
7337 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
7338 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
7341 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
7344 task_clock_event_start(event, flags); in task_clock_event_add()
7345 perf_event_update_userpage(event); in task_clock_event_add()
7350 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
7352 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
7355 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
7358 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
7359 u64 time = event->ctx->time + delta; in task_clock_event_read()
7361 task_clock_event_update(event, time); in task_clock_event_read()
7364 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
7366 if (event->attr.type != PERF_TYPE_SOFTWARE) in task_clock_event_init()
7369 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
7375 if (has_branch_stack(event)) in task_clock_event_init()
7378 perf_swevent_init_hrtimer(event); in task_clock_event_init()
7446 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
7739 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
7747 if (event->group_leader != event) { in perf_try_init_event()
7752 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
7757 event->pmu = pmu; in perf_try_init_event()
7758 ret = pmu->event_init(event); in perf_try_init_event()
7761 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
7769 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
7778 pmu = idr_find(&pmu_idr, event->attr.type); in perf_init_event()
7781 ret = perf_try_init_event(pmu, event); in perf_init_event()
7788 ret = perf_try_init_event(pmu, event); in perf_init_event()
7804 static void account_event_cpu(struct perf_event *event, int cpu) in account_event_cpu() argument
7806 if (event->parent) in account_event_cpu()
7809 if (is_cgroup_event(event)) in account_event_cpu()
7813 static void account_event(struct perf_event *event) in account_event() argument
7815 if (event->parent) in account_event()
7818 if (event->attach_state & PERF_ATTACH_TASK) in account_event()
7820 if (event->attr.mmap || event->attr.mmap_data) in account_event()
7822 if (event->attr.comm) in account_event()
7824 if (event->attr.task) in account_event()
7826 if (event->attr.freq) { in account_event()
7830 if (event->attr.context_switch) { in account_event()
7834 if (has_branch_stack(event)) in account_event()
7836 if (is_cgroup_event(event)) in account_event()
7839 account_event_cpu(event, event->cpu); in account_event()
7854 struct perf_event *event; in perf_event_alloc() local
7863 event = kzalloc(sizeof(*event), GFP_KERNEL); in perf_event_alloc()
7864 if (!event) in perf_event_alloc()
7872 group_leader = event; in perf_event_alloc()
7874 mutex_init(&event->child_mutex); in perf_event_alloc()
7875 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
7877 INIT_LIST_HEAD(&event->group_entry); in perf_event_alloc()
7878 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
7879 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
7880 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
7881 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
7882 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
7885 init_waitqueue_head(&event->waitq); in perf_event_alloc()
7886 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
7888 mutex_init(&event->mmap_mutex); in perf_event_alloc()
7890 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
7891 event->cpu = cpu; in perf_event_alloc()
7892 event->attr = *attr; in perf_event_alloc()
7893 event->group_leader = group_leader; in perf_event_alloc()
7894 event->pmu = NULL; in perf_event_alloc()
7895 event->oncpu = -1; in perf_event_alloc()
7897 event->parent = parent_event; in perf_event_alloc()
7899 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
7900 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
7902 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
7905 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
7911 event->hw.target = task; in perf_event_alloc()
7914 event->clock = &local_clock; in perf_event_alloc()
7916 event->clock = parent_event->clock; in perf_event_alloc()
7923 event->overflow_handler = overflow_handler; in perf_event_alloc()
7924 event->overflow_handler_context = context; in perf_event_alloc()
7926 perf_event__state_init(event); in perf_event_alloc()
7930 hwc = &event->hw; in perf_event_alloc()
7944 if (!has_branch_stack(event)) in perf_event_alloc()
7945 event->attr.branch_sample_type = 0; in perf_event_alloc()
7948 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
7953 pmu = perf_init_event(event); in perf_event_alloc()
7961 err = exclusive_event_init(event); in perf_event_alloc()
7965 if (!event->parent) { in perf_event_alloc()
7966 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
7974 account_event(event); in perf_event_alloc()
7976 return event; in perf_event_alloc()
7979 exclusive_event_destroy(event); in perf_event_alloc()
7982 if (event->destroy) in perf_event_alloc()
7983 event->destroy(event); in perf_event_alloc()
7986 if (is_cgroup_event(event)) in perf_event_alloc()
7987 perf_detach_cgroup(event); in perf_event_alloc()
7988 if (event->ns) in perf_event_alloc()
7989 put_pid_ns(event->ns); in perf_event_alloc()
7990 kfree(event); in perf_event_alloc()
8126 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
8135 if (event == output_event) in perf_event_set_output()
8141 if (output_event->cpu != event->cpu) in perf_event_set_output()
8147 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
8153 if (output_event->clock != event->clock) in perf_event_set_output()
8159 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
8160 event->pmu != output_event->pmu) in perf_event_set_output()
8164 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
8166 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
8176 ring_buffer_attach(event, rb); in perf_event_set_output()
8180 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
8195 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
8201 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
8206 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
8211 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
8215 event->clock = &ktime_get_boot_ns; in perf_event_set_clock()
8219 event->clock = &ktime_get_tai_ns; in perf_event_set_clock()
8226 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
8245 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
8343 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
8345 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
8346 err = PTR_ERR(event); in SYSCALL_DEFINE5()
8350 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
8351 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
8361 pmu = event->pmu; in SYSCALL_DEFINE5()
8364 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
8370 (is_software_event(event) != is_software_event(group_leader))) { in SYSCALL_DEFINE5()
8371 if (is_software_event(event)) { in SYSCALL_DEFINE5()
8395 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
8420 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
8440 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
8455 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
8460 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, in SYSCALL_DEFINE5()
8474 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
8483 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
8557 perf_event__header_size(event); in SYSCALL_DEFINE5()
8558 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
8560 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
8574 event->owner = current; in SYSCALL_DEFINE5()
8577 list_add_tail(&event->owner_entry, &current->perf_event_list); in SYSCALL_DEFINE5()
8605 free_event(event); in SYSCALL_DEFINE5()
8635 struct perf_event *event; in perf_event_create_kernel_counter() local
8642 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
8644 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
8645 err = PTR_ERR(event); in perf_event_create_kernel_counter()
8650 event->owner = EVENT_OWNER_KERNEL; in perf_event_create_kernel_counter()
8652 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
8660 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
8668 perf_install_in_context(ctx, event, cpu); in perf_event_create_kernel_counter()
8672 return event; in perf_event_create_kernel_counter()
8675 free_event(event); in perf_event_create_kernel_counter()
8685 struct perf_event *event, *tmp; in perf_pmu_migrate_context() local
8696 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, in perf_pmu_migrate_context()
8698 perf_remove_from_context(event, false); in perf_pmu_migrate_context()
8699 unaccount_event_cpu(event, src_cpu); in perf_pmu_migrate_context()
8701 list_add(&event->migrate_entry, &events); in perf_pmu_migrate_context()
8717 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
8718 if (event->group_leader == event) in perf_pmu_migrate_context()
8721 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
8722 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
8723 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
8724 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
8725 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
8733 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
8734 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
8735 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
8736 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
8737 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
8738 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
8894 struct perf_event *event, *tmp; in perf_event_exit_task() local
8898 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
8900 list_del_init(&event->owner_entry); in perf_event_exit_task()
8908 event->owner = NULL; in perf_event_exit_task()
8924 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
8927 struct perf_event *parent = event->parent; in perf_free_event()
8933 list_del_init(&event->child_list); in perf_free_event()
8939 perf_group_detach(event); in perf_free_event()
8940 list_del_event(event, ctx); in perf_free_event()
8942 free_event(event); in perf_free_event()
8955 struct perf_event *event, *tmp; in perf_event_free_task() local
8965 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, in perf_event_free_task()
8967 perf_free_event(event, ctx); in perf_event_free_task()
8969 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, in perf_event_free_task()
8971 perf_free_event(event, ctx); in perf_event_free_task()
8995 struct perf_event *event; in perf_event_get() local
9001 event = f.file->private_data; in perf_event_get()
9002 atomic_long_inc(&event->refcount); in perf_event_get()
9005 return event; in perf_event_get()
9008 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
9010 if (!event) in perf_event_attrs()
9013 return &event->attr; in perf_event_attrs()
9129 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
9137 if (!event->attr.inherit) { in inherit_task_group()
9158 ret = inherit_group(event, parent, parent_ctx, in inherit_task_group()
9174 struct perf_event *event; in perf_event_init_context() local
9208 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { in perf_event_init_context()
9209 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
9224 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { in perf_event_init_context()
9225 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
9320 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) in __perf_event_exit_context()