Lines Matching refs:event

129 static bool is_kernel_event(struct perf_event *event)  in is_kernel_event()  argument
131 return event->owner == EVENT_OWNER_KERNEL; in is_kernel_event()
318 static u64 perf_event_time(struct perf_event *event);
332 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
334 return event->clock(); in perf_event_clock()
362 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
364 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match()
368 if (!event->cgrp) in perf_cgroup_match()
382 event->cgrp->css.cgroup); in perf_cgroup_match()
385 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
387 css_put(&event->cgrp->css); in perf_detach_cgroup()
388 event->cgrp = NULL; in perf_detach_cgroup()
391 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
393 return event->cgrp != NULL; in is_cgroup_event()
396 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
400 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
424 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
432 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
439 if (cgrp == event->cgrp) in update_cgrp_time_from_event()
440 __update_cgrp_time(event->cgrp); in update_cgrp_time_from_event()
586 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
606 event->cgrp = cgrp; in perf_cgroup_connect()
614 perf_detach_cgroup(event); in perf_cgroup_connect()
623 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
626 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time()
627 event->shadow_ctx_time = now - t->timestamp; in perf_cgroup_set_shadow_time()
631 perf_cgroup_defer_enabled(struct perf_event *event) in perf_cgroup_defer_enabled() argument
639 if (is_cgroup_event(event) && !perf_cgroup_match(event)) in perf_cgroup_defer_enabled()
640 event->cgrp_defer_enabled = 1; in perf_cgroup_defer_enabled()
644 perf_cgroup_mark_enabled(struct perf_event *event, in perf_cgroup_mark_enabled() argument
648 u64 tstamp = perf_event_time(event); in perf_cgroup_mark_enabled()
650 if (!event->cgrp_defer_enabled) in perf_cgroup_mark_enabled()
653 event->cgrp_defer_enabled = 0; in perf_cgroup_mark_enabled()
655 event->tstamp_enabled = tstamp - event->total_time_enabled; in perf_cgroup_mark_enabled()
656 list_for_each_entry(sub, &event->sibling_list, group_entry) { in perf_cgroup_mark_enabled()
666 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
671 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
674 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
679 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) in perf_cgroup_event_cgrp_time() argument
684 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
702 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
721 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
725 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
731 perf_cgroup_defer_enabled(struct perf_event *event) in perf_cgroup_defer_enabled() argument
736 perf_cgroup_mark_enabled(struct perf_event *event, in perf_cgroup_mark_enabled() argument
971 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
977 ctx = ACCESS_ONCE(event->ctx); in perf_event_ctx_lock_nested()
985 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
995 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
997 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1000 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1026 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1031 if (event->parent) in perf_event_pid()
1032 event = event->parent; in perf_event_pid()
1034 return task_tgid_nr_ns(p, event->ns); in perf_event_pid()
1037 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1042 if (event->parent) in perf_event_tid()
1043 event = event->parent; in perf_event_tid()
1045 return task_pid_nr_ns(p, event->ns); in perf_event_tid()
1052 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1054 u64 id = event->id; in primary_event_id()
1056 if (event->parent) in primary_event_id()
1057 id = event->parent->id; in primary_event_id()
1153 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1155 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1157 if (is_cgroup_event(event)) in perf_event_time()
1158 return perf_cgroup_event_time(event); in perf_event_time()
1167 static void update_event_times(struct perf_event *event) in update_event_times() argument
1169 struct perf_event_context *ctx = event->ctx; in update_event_times()
1172 if (event->state < PERF_EVENT_STATE_INACTIVE || in update_event_times()
1173 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) in update_event_times()
1185 if (is_cgroup_event(event)) in update_event_times()
1186 run_end = perf_cgroup_event_time(event); in update_event_times()
1190 run_end = event->tstamp_stopped; in update_event_times()
1192 event->total_time_enabled = run_end - event->tstamp_enabled; in update_event_times()
1194 if (event->state == PERF_EVENT_STATE_INACTIVE) in update_event_times()
1195 run_end = event->tstamp_stopped; in update_event_times()
1197 run_end = perf_event_time(event); in update_event_times()
1199 event->total_time_running = run_end - event->tstamp_running; in update_event_times()
1208 struct perf_event *event; in update_group_times() local
1211 list_for_each_entry(event, &leader->sibling_list, group_entry) in update_group_times()
1212 update_event_times(event); in update_group_times()
1216 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) in ctx_group_list() argument
1218 if (event->attr.pinned) in ctx_group_list()
1229 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1231 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1232 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1239 if (event->group_leader == event) { in list_add_event()
1242 if (is_software_event(event)) in list_add_event()
1243 event->group_flags |= PERF_GROUP_SOFTWARE; in list_add_event()
1245 list = ctx_group_list(event, ctx); in list_add_event()
1246 list_add_tail(&event->group_entry, list); in list_add_event()
1249 if (is_cgroup_event(event)) in list_add_event()
1252 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1254 if (event->attr.inherit_stat) in list_add_event()
1263 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1265 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1273 static void perf_event__read_size(struct perf_event *event) in perf_event__read_size() argument
1279 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in perf_event__read_size()
1282 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in perf_event__read_size()
1285 if (event->attr.read_format & PERF_FORMAT_ID) in perf_event__read_size()
1288 if (event->attr.read_format & PERF_FORMAT_GROUP) { in perf_event__read_size()
1289 nr += event->group_leader->nr_siblings; in perf_event__read_size()
1294 event->read_size = size; in perf_event__read_size()
1297 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1300 u64 sample_type = event->attr.sample_type; in perf_event__header_size()
1303 perf_event__read_size(event); in perf_event__header_size()
1318 size += event->read_size; in perf_event__header_size()
1326 event->header_size = size; in perf_event__header_size()
1329 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1332 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1353 event->id_header_size = size; in perf_event__id_header_size()
1356 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1358 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1363 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1366 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1368 if (group_leader == event) in perf_group_attach()
1371 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1374 !is_software_event(event)) in perf_group_attach()
1377 list_add_tail(&event->group_entry, &group_leader->sibling_list); in perf_group_attach()
1391 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1395 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
1401 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
1404 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
1406 if (is_cgroup_event(event)) { in list_del_event()
1419 if (event->attr.inherit_stat) in list_del_event()
1422 list_del_rcu(&event->event_entry); in list_del_event()
1424 if (event->group_leader == event) in list_del_event()
1425 list_del_init(&event->group_entry); in list_del_event()
1427 update_group_times(event); in list_del_event()
1436 if (event->state > PERF_EVENT_STATE_OFF) in list_del_event()
1437 event->state = PERF_EVENT_STATE_OFF; in list_del_event()
1442 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
1450 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
1453 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
1458 if (event->group_leader != event) { in perf_group_detach()
1459 list_del_init(&event->group_entry); in perf_group_detach()
1460 event->group_leader->nr_siblings--; in perf_group_detach()
1464 if (!list_empty(&event->group_entry)) in perf_group_detach()
1465 list = &event->group_entry; in perf_group_detach()
1472 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { in perf_group_detach()
1478 sibling->group_flags = event->group_flags; in perf_group_detach()
1480 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
1484 perf_event__header_size(event->group_leader); in perf_group_detach()
1486 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) in perf_group_detach()
1493 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
1495 return event && !is_kernel_event(event) && !event->owner; in is_orphaned_event()
1502 static bool is_orphaned_child(struct perf_event *event) in is_orphaned_child() argument
1504 return is_orphaned_event(event->parent); in is_orphaned_child()
1530 event_filter_match(struct perf_event *event) in event_filter_match() argument
1532 return (event->cpu == -1 || event->cpu == smp_processor_id()) in event_filter_match()
1533 && perf_cgroup_match(event); in event_filter_match()
1537 event_sched_out(struct perf_event *event, in event_sched_out() argument
1541 u64 tstamp = perf_event_time(event); in event_sched_out()
1544 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
1553 if (event->state == PERF_EVENT_STATE_INACTIVE in event_sched_out()
1554 && !event_filter_match(event)) { in event_sched_out()
1555 delta = tstamp - event->tstamp_stopped; in event_sched_out()
1556 event->tstamp_running += delta; in event_sched_out()
1557 event->tstamp_stopped = tstamp; in event_sched_out()
1560 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
1563 perf_pmu_disable(event->pmu); in event_sched_out()
1565 event->tstamp_stopped = tstamp; in event_sched_out()
1566 event->pmu->del(event, 0); in event_sched_out()
1567 event->oncpu = -1; in event_sched_out()
1568 event->state = PERF_EVENT_STATE_INACTIVE; in event_sched_out()
1569 if (event->pending_disable) { in event_sched_out()
1570 event->pending_disable = 0; in event_sched_out()
1571 event->state = PERF_EVENT_STATE_OFF; in event_sched_out()
1574 if (!is_software_event(event)) in event_sched_out()
1578 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
1580 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
1583 if (is_orphaned_child(event)) in event_sched_out()
1586 perf_pmu_enable(event->pmu); in event_sched_out()
1594 struct perf_event *event; in group_sched_out() local
1602 list_for_each_entry(event, &group_event->sibling_list, group_entry) in group_sched_out()
1603 event_sched_out(event, cpuctx, ctx); in group_sched_out()
1610 struct perf_event *event; member
1623 struct perf_event *event = re->event; in __perf_remove_from_context() local
1624 struct perf_event_context *ctx = event->ctx; in __perf_remove_from_context()
1628 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
1630 perf_group_detach(event); in __perf_remove_from_context()
1631 list_del_event(event, ctx); in __perf_remove_from_context()
1655 static void perf_remove_from_context(struct perf_event *event, bool detach_group) in perf_remove_from_context() argument
1657 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
1660 .event = event, in perf_remove_from_context()
1673 cpu_function_call(event->cpu, __perf_remove_from_context, &re); in perf_remove_from_context()
1701 perf_group_detach(event); in perf_remove_from_context()
1702 list_del_event(event, ctx); in perf_remove_from_context()
1711 struct perf_event *event = info; in __perf_event_disable() local
1712 struct perf_event_context *ctx = event->ctx; in __perf_event_disable()
1731 if (event->state >= PERF_EVENT_STATE_INACTIVE) { in __perf_event_disable()
1733 update_cgrp_time_from_event(event); in __perf_event_disable()
1734 update_group_times(event); in __perf_event_disable()
1735 if (event == event->group_leader) in __perf_event_disable()
1736 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1738 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1739 event->state = PERF_EVENT_STATE_OFF; in __perf_event_disable()
1760 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
1762 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
1769 cpu_function_call(event->cpu, __perf_event_disable, event); in _perf_event_disable()
1774 if (!task_function_call(task, __perf_event_disable, event)) in _perf_event_disable()
1781 if (event->state == PERF_EVENT_STATE_ACTIVE) { in _perf_event_disable()
1795 if (event->state == PERF_EVENT_STATE_INACTIVE) { in _perf_event_disable()
1796 update_group_times(event); in _perf_event_disable()
1797 event->state = PERF_EVENT_STATE_OFF; in _perf_event_disable()
1806 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
1810 ctx = perf_event_ctx_lock(event); in perf_event_disable()
1811 _perf_event_disable(event); in perf_event_disable()
1812 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
1816 static void perf_set_shadow_time(struct perf_event *event, in perf_set_shadow_time() argument
1845 if (is_cgroup_event(event)) in perf_set_shadow_time()
1846 perf_cgroup_set_shadow_time(event, tstamp); in perf_set_shadow_time()
1848 event->shadow_ctx_time = tstamp - ctx->timestamp; in perf_set_shadow_time()
1853 static void perf_log_throttle(struct perf_event *event, int enable);
1854 static void perf_log_itrace_start(struct perf_event *event);
1857 event_sched_in(struct perf_event *event, in event_sched_in() argument
1861 u64 tstamp = perf_event_time(event); in event_sched_in()
1866 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
1869 event->state = PERF_EVENT_STATE_ACTIVE; in event_sched_in()
1870 event->oncpu = smp_processor_id(); in event_sched_in()
1877 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
1878 perf_log_throttle(event, 1); in event_sched_in()
1879 event->hw.interrupts = 0; in event_sched_in()
1887 perf_pmu_disable(event->pmu); in event_sched_in()
1889 perf_set_shadow_time(event, ctx, tstamp); in event_sched_in()
1891 perf_log_itrace_start(event); in event_sched_in()
1893 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
1894 event->state = PERF_EVENT_STATE_INACTIVE; in event_sched_in()
1895 event->oncpu = -1; in event_sched_in()
1900 event->tstamp_running += tstamp - event->tstamp_stopped; in event_sched_in()
1902 if (!is_software_event(event)) in event_sched_in()
1906 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
1909 if (event->attr.exclusive) in event_sched_in()
1912 if (is_orphaned_child(event)) in event_sched_in()
1916 perf_pmu_enable(event->pmu); in event_sched_in()
1926 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
1945 list_for_each_entry(event, &group_event->sibling_list, group_entry) { in group_sched_in()
1946 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
1947 partial_group = event; in group_sched_in()
1970 list_for_each_entry(event, &group_event->sibling_list, group_entry) { in group_sched_in()
1971 if (event == partial_group) in group_sched_in()
1975 event->tstamp_running += now - event->tstamp_stopped; in group_sched_in()
1976 event->tstamp_stopped = now; in group_sched_in()
1978 event_sched_out(event, cpuctx, ctx); in group_sched_in()
1993 static int group_can_go_on(struct perf_event *event, in group_can_go_on() argument
2000 if (event->group_flags & PERF_GROUP_SOFTWARE) in group_can_go_on()
2012 if (event->attr.exclusive && cpuctx->active_oncpu) in group_can_go_on()
2021 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2024 u64 tstamp = perf_event_time(event); in add_event_to_ctx()
2026 list_add_event(event, ctx); in add_event_to_ctx()
2027 perf_group_attach(event); in add_event_to_ctx()
2028 event->tstamp_enabled = tstamp; in add_event_to_ctx()
2029 event->tstamp_running = tstamp; in add_event_to_ctx()
2030 event->tstamp_stopped = tstamp; in add_event_to_ctx()
2059 struct perf_event *event = info; in __perf_install_in_context() local
2060 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2098 update_cgrp_time_from_event(event); in __perf_install_in_context()
2100 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2125 struct perf_event *event, in perf_install_in_context() argument
2132 event->ctx = ctx; in perf_install_in_context()
2133 if (event->cpu != -1) in perf_install_in_context()
2134 event->cpu = cpu; in perf_install_in_context()
2141 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2146 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2168 add_event_to_ctx(event, ctx); in perf_install_in_context()
2180 static void __perf_event_mark_enabled(struct perf_event *event) in __perf_event_mark_enabled() argument
2183 u64 tstamp = perf_event_time(event); in __perf_event_mark_enabled()
2185 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_event_mark_enabled()
2186 event->tstamp_enabled = tstamp - event->total_time_enabled; in __perf_event_mark_enabled()
2187 list_for_each_entry(sub, &event->sibling_list, group_entry) { in __perf_event_mark_enabled()
2198 struct perf_event *event = info; in __perf_event_enable() local
2199 struct perf_event_context *ctx = event->ctx; in __perf_event_enable()
2200 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2219 if (event->state >= PERF_EVENT_STATE_INACTIVE) in __perf_event_enable()
2227 __perf_event_mark_enabled(event); in __perf_event_enable()
2229 if (!event_filter_match(event)) { in __perf_event_enable()
2230 if (is_cgroup_event(event)) in __perf_event_enable()
2231 perf_cgroup_defer_enabled(event); in __perf_event_enable()
2239 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_enable()
2242 if (!group_can_go_on(event, cpuctx, 1)) { in __perf_event_enable()
2245 if (event == leader) in __perf_event_enable()
2246 err = group_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2248 err = event_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2256 if (leader != event) { in __perf_event_enable()
2281 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
2283 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
2290 cpu_function_call(event->cpu, __perf_event_enable, event); in _perf_event_enable()
2295 if (event->state >= PERF_EVENT_STATE_INACTIVE) in _perf_event_enable()
2305 if (event->state == PERF_EVENT_STATE_ERROR) in _perf_event_enable()
2306 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
2310 __perf_event_mark_enabled(event); in _perf_event_enable()
2316 if (!task_function_call(task, __perf_event_enable, event)) in _perf_event_enable()
2325 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { in _perf_event_enable()
2341 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
2345 ctx = perf_event_ctx_lock(event); in perf_event_enable()
2346 _perf_event_enable(event); in perf_event_enable()
2347 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
2351 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
2356 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
2359 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
2360 _perf_event_enable(event); in _perf_event_refresh()
2368 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
2373 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
2374 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
2375 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
2385 struct perf_event *event; in ctx_sched_out() local
2399 list_for_each_entry(event, &ctx->pinned_groups, group_entry) in ctx_sched_out()
2400 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2404 list_for_each_entry(event, &ctx->flexible_groups, group_entry) in ctx_sched_out()
2405 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2448 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
2453 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
2463 switch (event->state) { in __perf_event_sync_stat()
2465 event->pmu->read(event); in __perf_event_sync_stat()
2469 update_event_times(event); in __perf_event_sync_stat()
2481 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
2484 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
2485 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
2490 perf_event_update_userpage(event); in __perf_event_sync_stat()
2497 struct perf_event *event, *next_event; in perf_event_sync_stat() local
2504 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
2510 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
2513 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
2515 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
2701 struct perf_event *event; in ctx_pinned_sched_in() local
2703 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { in ctx_pinned_sched_in()
2704 if (event->state <= PERF_EVENT_STATE_OFF) in ctx_pinned_sched_in()
2706 if (!event_filter_match(event)) in ctx_pinned_sched_in()
2710 if (is_cgroup_event(event)) in ctx_pinned_sched_in()
2711 perf_cgroup_mark_enabled(event, ctx); in ctx_pinned_sched_in()
2713 if (group_can_go_on(event, cpuctx, 1)) in ctx_pinned_sched_in()
2714 group_sched_in(event, cpuctx, ctx); in ctx_pinned_sched_in()
2720 if (event->state == PERF_EVENT_STATE_INACTIVE) { in ctx_pinned_sched_in()
2721 update_group_times(event); in ctx_pinned_sched_in()
2722 event->state = PERF_EVENT_STATE_ERROR; in ctx_pinned_sched_in()
2731 struct perf_event *event; in ctx_flexible_sched_in() local
2734 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { in ctx_flexible_sched_in()
2736 if (event->state <= PERF_EVENT_STATE_OFF) in ctx_flexible_sched_in()
2742 if (!event_filter_match(event)) in ctx_flexible_sched_in()
2746 if (is_cgroup_event(event)) in ctx_flexible_sched_in()
2747 perf_cgroup_mark_enabled(event, ctx); in ctx_flexible_sched_in()
2749 if (group_can_go_on(event, cpuctx, can_add_hw)) { in ctx_flexible_sched_in()
2750 if (group_sched_in(event, cpuctx, ctx)) in ctx_flexible_sched_in()
2856 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
2858 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
2932 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
2934 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
2938 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
2952 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
2957 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
2969 struct perf_event *event; in perf_adjust_freq_unthr_context() local
2985 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
2986 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
2989 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
2992 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
2994 hwc = &event->hw; in perf_adjust_freq_unthr_context()
2998 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
2999 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
3002 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
3008 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
3010 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
3022 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
3024 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
3026 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
3111 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
3114 if (!event->attr.enable_on_exec) in event_enable_on_exec()
3117 event->attr.enable_on_exec = 0; in event_enable_on_exec()
3118 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
3121 __perf_event_mark_enabled(event); in event_enable_on_exec()
3133 struct perf_event *event; in perf_event_enable_on_exec() local
3154 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
3155 ret = event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
3200 struct perf_event *event = info; in __perf_event_read() local
3201 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
3217 update_cgrp_time_from_event(event); in __perf_event_read()
3219 update_event_times(event); in __perf_event_read()
3220 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
3221 event->pmu->read(event); in __perf_event_read()
3225 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
3227 if (event->pmu->count) in perf_event_count()
3228 return event->pmu->count(event); in perf_event_count()
3230 return __perf_event_count(event); in perf_event_count()
3233 static u64 perf_event_read(struct perf_event *event) in perf_event_read() argument
3239 if (event->state == PERF_EVENT_STATE_ACTIVE) { in perf_event_read()
3240 smp_call_function_single(event->oncpu, in perf_event_read()
3241 __perf_event_read, event, 1); in perf_event_read()
3242 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { in perf_event_read()
3243 struct perf_event_context *ctx = event->ctx; in perf_event_read()
3254 update_cgrp_time_from_event(event); in perf_event_read()
3256 update_event_times(event); in perf_event_read()
3260 return perf_event_count(event); in perf_event_read()
3332 struct perf_event *event) in find_get_context() argument
3339 int cpu = event->cpu; in find_get_context()
3367 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_context()
3434 static void perf_event_free_filter(struct perf_event *event);
3435 static void perf_event_free_bpf_prog(struct perf_event *event);
3439 struct perf_event *event; in free_event_rcu() local
3441 event = container_of(head, struct perf_event, rcu_head); in free_event_rcu()
3442 if (event->ns) in free_event_rcu()
3443 put_pid_ns(event->ns); in free_event_rcu()
3444 perf_event_free_filter(event); in free_event_rcu()
3445 kfree(event); in free_event_rcu()
3448 static void ring_buffer_attach(struct perf_event *event,
3451 static void unaccount_event_cpu(struct perf_event *event, int cpu) in unaccount_event_cpu() argument
3453 if (event->parent) in unaccount_event_cpu()
3456 if (is_cgroup_event(event)) in unaccount_event_cpu()
3460 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
3462 if (event->parent) in unaccount_event()
3465 if (event->attach_state & PERF_ATTACH_TASK) in unaccount_event()
3467 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
3469 if (event->attr.comm) in unaccount_event()
3471 if (event->attr.task) in unaccount_event()
3473 if (event->attr.freq) in unaccount_event()
3475 if (is_cgroup_event(event)) in unaccount_event()
3477 if (has_branch_stack(event)) in unaccount_event()
3480 unaccount_event_cpu(event, event->cpu); in unaccount_event()
3495 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
3497 struct pmu *pmu = event->pmu; in exclusive_event_init()
3515 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
3526 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
3528 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
3534 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
3551 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
3555 struct pmu *pmu = event->pmu; in exclusive_event_installable()
3561 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
3568 static void __free_event(struct perf_event *event) in __free_event() argument
3570 if (!event->parent) { in __free_event()
3571 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in __free_event()
3575 perf_event_free_bpf_prog(event); in __free_event()
3577 if (event->destroy) in __free_event()
3578 event->destroy(event); in __free_event()
3580 if (event->ctx) in __free_event()
3581 put_ctx(event->ctx); in __free_event()
3583 if (event->pmu) { in __free_event()
3584 exclusive_event_destroy(event); in __free_event()
3585 module_put(event->pmu->module); in __free_event()
3588 call_rcu(&event->rcu_head, free_event_rcu); in __free_event()
3591 static void _free_event(struct perf_event *event) in _free_event() argument
3593 irq_work_sync(&event->pending); in _free_event()
3595 unaccount_event(event); in _free_event()
3597 if (event->rb) { in _free_event()
3604 mutex_lock(&event->mmap_mutex); in _free_event()
3605 ring_buffer_attach(event, NULL); in _free_event()
3606 mutex_unlock(&event->mmap_mutex); in _free_event()
3609 if (is_cgroup_event(event)) in _free_event()
3610 perf_detach_cgroup(event); in _free_event()
3612 __free_event(event); in _free_event()
3619 static void free_event(struct perf_event *event) in free_event() argument
3621 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
3623 atomic_long_read(&event->refcount), event)) { in free_event()
3628 _free_event(event); in free_event()
3634 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
3639 owner = ACCESS_ONCE(event->owner); in perf_remove_from_owner()
3674 if (event->owner) in perf_remove_from_owner()
3675 list_del_init(&event->owner_entry); in perf_remove_from_owner()
3681 static void put_event(struct perf_event *event) in put_event() argument
3685 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
3688 if (!is_kernel_event(event)) in put_event()
3689 perf_remove_from_owner(event); in put_event()
3703 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); in put_event()
3705 perf_remove_from_context(event, true); in put_event()
3706 perf_event_ctx_unlock(event, ctx); in put_event()
3708 _free_event(event); in put_event()
3711 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
3713 put_event(event); in perf_event_release_kernel()
3733 struct perf_event *event, *tmp; in orphans_remove_work() local
3739 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { in orphans_remove_work()
3740 struct perf_event *parent_event = event->parent; in orphans_remove_work()
3742 if (!is_orphaned_child(event)) in orphans_remove_work()
3745 perf_remove_from_context(event, true); in orphans_remove_work()
3748 list_del_init(&event->child_list); in orphans_remove_work()
3751 free_event(event); in orphans_remove_work()
3763 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
3771 mutex_lock(&event->child_mutex); in perf_event_read_value()
3772 total += perf_event_read(event); in perf_event_read_value()
3773 *enabled += event->total_time_enabled + in perf_event_read_value()
3774 atomic64_read(&event->child_total_time_enabled); in perf_event_read_value()
3775 *running += event->total_time_running + in perf_event_read_value()
3776 atomic64_read(&event->child_total_time_running); in perf_event_read_value()
3778 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_read_value()
3783 mutex_unlock(&event->child_mutex); in perf_event_read_value()
3789 static int perf_event_read_group(struct perf_event *event, in perf_event_read_group() argument
3792 struct perf_event *leader = event->group_leader, *sub; in perf_event_read_group()
3837 static int perf_event_read_one(struct perf_event *event, in perf_event_read_one() argument
3844 values[n++] = perf_event_read_value(event, &enabled, &running); in perf_event_read_one()
3850 values[n++] = primary_event_id(event); in perf_event_read_one()
3858 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
3862 if (event->state != PERF_EVENT_STATE_EXIT) in is_event_hup()
3865 mutex_lock(&event->child_mutex); in is_event_hup()
3866 no_children = list_empty(&event->child_list); in is_event_hup()
3867 mutex_unlock(&event->child_mutex); in is_event_hup()
3875 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) in perf_read_hw() argument
3877 u64 read_format = event->attr.read_format; in perf_read_hw()
3885 if (event->state == PERF_EVENT_STATE_ERROR) in perf_read_hw()
3888 if (count < event->read_size) in perf_read_hw()
3891 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_read_hw()
3893 ret = perf_event_read_group(event, read_format, buf); in perf_read_hw()
3895 ret = perf_event_read_one(event, read_format, buf); in perf_read_hw()
3903 struct perf_event *event = file->private_data; in perf_read() local
3907 ctx = perf_event_ctx_lock(event); in perf_read()
3908 ret = perf_read_hw(event, buf, count); in perf_read()
3909 perf_event_ctx_unlock(event, ctx); in perf_read()
3916 struct perf_event *event = file->private_data; in perf_poll() local
3920 poll_wait(file, &event->waitq, wait); in perf_poll()
3922 if (is_event_hup(event)) in perf_poll()
3929 mutex_lock(&event->mmap_mutex); in perf_poll()
3930 rb = event->rb; in perf_poll()
3933 mutex_unlock(&event->mmap_mutex); in perf_poll()
3937 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
3939 (void)perf_event_read(event); in _perf_event_reset()
3940 local64_set(&event->count, 0); in _perf_event_reset()
3941 perf_event_update_userpage(event); in _perf_event_reset()
3950 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
3955 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
3957 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
3958 func(event); in perf_event_for_each_child()
3959 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
3961 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
3964 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
3967 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
3972 event = event->group_leader; in perf_event_for_each()
3974 perf_event_for_each_child(event, func); in perf_event_for_each()
3975 list_for_each_entry(sibling, &event->sibling_list, group_entry) in perf_event_for_each()
3980 struct perf_event *event; member
3987 struct perf_event *event = pe->event; in __perf_event_period() local
3988 struct perf_event_context *ctx = event->ctx; in __perf_event_period()
3993 if (event->attr.freq) { in __perf_event_period()
3994 event->attr.sample_freq = value; in __perf_event_period()
3996 event->attr.sample_period = value; in __perf_event_period()
3997 event->hw.sample_period = value; in __perf_event_period()
4000 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
4003 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
4006 local64_set(&event->hw.period_left, 0); in __perf_event_period()
4009 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
4017 static int perf_event_period(struct perf_event *event, u64 __user *arg) in perf_event_period() argument
4019 struct period_event pe = { .event = event, }; in perf_event_period()
4020 struct perf_event_context *ctx = event->ctx; in perf_event_period()
4024 if (!is_sampling_event(event)) in perf_event_period()
4033 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in perf_event_period()
4040 cpu_function_call(event->cpu, __perf_event_period, &pe); in perf_event_period()
4077 static int perf_event_set_output(struct perf_event *event,
4079 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4080 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4082 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
4099 return _perf_event_refresh(event, arg); in _perf_ioctl()
4102 return perf_event_period(event, (u64 __user *)arg); in _perf_ioctl()
4106 u64 id = primary_event_id(event); in _perf_ioctl()
4123 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
4126 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
4132 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
4135 return perf_event_set_bpf_prog(event, arg); in _perf_ioctl()
4142 perf_event_for_each(event, func); in _perf_ioctl()
4144 perf_event_for_each_child(event, func); in _perf_ioctl()
4151 struct perf_event *event = file->private_data; in perf_ioctl() local
4155 ctx = perf_event_ctx_lock(event); in perf_ioctl()
4156 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
4157 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
4185 struct perf_event *event; in perf_event_task_enable() local
4188 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_enable()
4189 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
4190 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
4191 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
4201 struct perf_event *event; in perf_event_task_disable() local
4204 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_disable()
4205 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
4206 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
4207 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
4214 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
4216 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
4219 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
4222 return event->pmu->event_idx(event); in perf_event_index()
4225 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4233 ctx_time = event->shadow_ctx_time + *now; in calc_timer_values()
4234 *enabled = ctx_time - event->tstamp_enabled; in calc_timer_values()
4235 *running = ctx_time - event->tstamp_running; in calc_timer_values()
4238 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
4244 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
4261 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
4270 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
4277 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
4290 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
4300 userpg->index = perf_event_index(event); in perf_event_update_userpage()
4301 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
4303 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
4306 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
4309 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
4311 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
4322 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_fault() local
4333 rb = rcu_dereference(event->rb); in perf_mmap_fault()
4355 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
4361 if (event->rb) { in ring_buffer_attach()
4366 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
4368 old_rb = event->rb; in ring_buffer_attach()
4370 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
4373 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
4374 event->rcu_pending = 1; in ring_buffer_attach()
4378 if (event->rcu_pending) { in ring_buffer_attach()
4379 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
4380 event->rcu_pending = 0; in ring_buffer_attach()
4384 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
4388 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
4397 wake_up_all(&event->waitq); in ring_buffer_attach()
4401 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
4406 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
4408 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
4409 wake_up_all(&event->waitq); in ring_buffer_wakeup()
4414 struct ring_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
4419 rb = rcu_dereference(event->rb); in ring_buffer_get()
4441 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
4443 atomic_inc(&event->mmap_count); in perf_mmap_open()
4444 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
4447 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
4449 if (event->pmu->event_mapped) in perf_mmap_open()
4450 event->pmu->event_mapped(event); in perf_mmap_open()
4463 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
4465 struct ring_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
4470 if (event->pmu->event_unmapped) in perf_mmap_close()
4471 event->pmu->event_unmapped(event); in perf_mmap_close()
4479 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
4484 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
4489 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
4492 ring_buffer_attach(event, NULL); in perf_mmap_close()
4493 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
4506 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
4507 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
4516 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
4527 if (event->rb == rb) in perf_mmap_close()
4528 ring_buffer_attach(event, NULL); in perf_mmap_close()
4530 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
4531 put_event(event); in perf_mmap_close()
4567 struct perf_event *event = file->private_data; in perf_mmap() local
4582 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
4600 if (!event->rb) in perf_mmap()
4605 mutex_lock(&event->mmap_mutex); in perf_mmap()
4608 rb = event->rb; in perf_mmap()
4660 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
4662 mutex_lock(&event->mmap_mutex); in perf_mmap()
4663 if (event->rb) { in perf_mmap()
4664 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
4669 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
4675 mutex_unlock(&event->mmap_mutex); in perf_mmap()
4707 WARN_ON(!rb && event->rb); in perf_mmap()
4714 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
4715 event->cpu, flags); in perf_mmap()
4726 ring_buffer_attach(event, rb); in perf_mmap()
4728 perf_event_init_userpage(event); in perf_mmap()
4729 perf_event_update_userpage(event); in perf_mmap()
4731 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
4732 event->attr.aux_watermark, flags); in perf_mmap()
4742 atomic_inc(&event->mmap_count); in perf_mmap()
4747 mutex_unlock(&event->mmap_mutex); in perf_mmap()
4756 if (event->pmu->event_mapped) in perf_mmap()
4757 event->pmu->event_mapped(event); in perf_mmap()
4765 struct perf_event *event = filp->private_data; in perf_fasync() local
4769 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
4796 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
4799 if (event->parent) in perf_event_fasync()
4800 event = event->parent; in perf_event_fasync()
4801 return &event->fasync; in perf_event_fasync()
4804 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
4806 ring_buffer_wakeup(event); in perf_event_wakeup()
4808 if (event->pending_kill) { in perf_event_wakeup()
4809 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
4810 event->pending_kill = 0; in perf_event_wakeup()
4816 struct perf_event *event = container_of(entry, in perf_pending_event() local
4826 if (event->pending_disable) { in perf_pending_event()
4827 event->pending_disable = 0; in perf_pending_event()
4828 __perf_event_disable(event); in perf_pending_event()
4831 if (event->pending_wakeup) { in perf_pending_event()
4832 event->pending_wakeup = 0; in perf_pending_event()
4833 perf_event_wakeup(event); in perf_pending_event()
4996 struct perf_event *event) in __perf_event_header__init_id() argument
4998 u64 sample_type = event->attr.sample_type; in __perf_event_header__init_id()
5001 header->size += event->id_header_size; in __perf_event_header__init_id()
5005 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
5006 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
5010 data->time = perf_event_clock(event); in __perf_event_header__init_id()
5013 data->id = primary_event_id(event); in __perf_event_header__init_id()
5016 data->stream_id = event->id; in __perf_event_header__init_id()
5026 struct perf_event *event) in perf_event_header__init_id() argument
5028 if (event->attr.sample_id_all) in perf_event_header__init_id()
5029 __perf_event_header__init_id(header, data, event); in perf_event_header__init_id()
5056 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
5060 if (event->attr.sample_id_all) in perf_event__output_id_sample()
5065 struct perf_event *event, in perf_output_read_one() argument
5068 u64 read_format = event->attr.read_format; in perf_output_read_one()
5072 values[n++] = perf_event_count(event); in perf_output_read_one()
5075 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
5079 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
5082 values[n++] = primary_event_id(event); in perf_output_read_one()
5091 struct perf_event *event, in perf_output_read_group() argument
5094 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
5095 u64 read_format = event->attr.read_format; in perf_output_read_group()
5107 if (leader != event) in perf_output_read_group()
5119 if ((sub != event) && in perf_output_read_group()
5135 struct perf_event *event) in perf_output_read() argument
5138 u64 read_format = event->attr.read_format; in perf_output_read()
5150 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
5152 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
5153 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
5155 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
5161 struct perf_event *event) in perf_output_sample() argument
5195 perf_output_read(handle, event); in perf_output_sample()
5258 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
5289 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
5297 if (!event->attr.watermark) { in perf_output_sample()
5298 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
5314 struct perf_event *event, in perf_prepare_sample() argument
5317 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
5320 header->size = sizeof(*header) + event->header_size; in perf_prepare_sample()
5325 __perf_event_header__init_id(header, data, event); in perf_prepare_sample()
5333 data->callchain = perf_callchain(event, regs); in perf_prepare_sample()
5371 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
5385 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
5410 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
5419 static void perf_event_output(struct perf_event *event, in perf_event_output() argument
5429 perf_prepare_sample(&header, data, event, regs); in perf_event_output()
5431 if (perf_output_begin(&handle, event, header.size)) in perf_event_output()
5434 perf_output_sample(&handle, &header, data, event); in perf_event_output()
5454 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
5463 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
5465 .pid = perf_event_pid(event, task), in perf_event_read_event()
5466 .tid = perf_event_tid(event, task), in perf_event_read_event()
5470 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
5471 ret = perf_output_begin(&handle, event, read_event.header.size); in perf_event_read_event()
5476 perf_output_read(&handle, event); in perf_event_read_event()
5477 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
5482 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
5489 struct perf_event *event; in perf_event_aux_ctx() local
5491 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_event_aux_ctx()
5492 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_event_aux_ctx()
5494 if (!event_filter_match(event)) in perf_event_aux_ctx()
5496 output(event, data); in perf_event_aux_ctx()
5556 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
5558 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
5559 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
5560 event->attr.task; in perf_event_task_match()
5563 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
5572 if (!perf_event_task_match(event)) in perf_event_task_output()
5575 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
5577 ret = perf_output_begin(&handle, event, in perf_event_task_output()
5582 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
5583 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
5585 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
5586 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
5588 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
5592 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
5654 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
5656 return event->attr.comm; in perf_event_comm_match()
5659 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
5668 if (!perf_event_comm_match(event)) in perf_event_comm_output()
5671 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
5672 ret = perf_output_begin(&handle, event, in perf_event_comm_output()
5678 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
5679 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
5685 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
5761 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
5768 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
5769 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
5772 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
5781 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
5784 if (event->attr.mmap2) { in perf_event_mmap_output()
5794 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
5795 ret = perf_output_begin(&handle, event, in perf_event_mmap_output()
5800 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
5801 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
5805 if (event->attr.mmap2) { in perf_event_mmap_output()
5817 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
5978 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
6000 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
6001 ret = perf_output_begin(&handle, event, rec.header.size); in perf_event_aux_event()
6007 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
6016 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
6033 .time = perf_event_clock(event), in perf_log_throttle()
6034 .id = primary_event_id(event), in perf_log_throttle()
6035 .stream_id = event->id, in perf_log_throttle()
6041 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
6043 ret = perf_output_begin(&handle, event, in perf_log_throttle()
6049 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
6053 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
6064 if (event->parent) in perf_log_itrace_start()
6065 event = event->parent; in perf_log_itrace_start()
6067 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
6068 event->hw.itrace_started) in perf_log_itrace_start()
6071 event->hw.itrace_started = 1; in perf_log_itrace_start()
6076 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
6077 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
6079 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
6080 ret = perf_output_begin(&handle, event, rec.header.size); in perf_log_itrace_start()
6086 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
6095 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
6099 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
6100 struct hw_perf_event *hwc = &event->hw; in __perf_event_overflow()
6108 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
6121 perf_log_throttle(event, 0); in __perf_event_overflow()
6127 if (event->attr.freq) { in __perf_event_overflow()
6134 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_overflow()
6142 event->pending_kill = POLL_IN; in __perf_event_overflow()
6143 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
6145 event->pending_kill = POLL_HUP; in __perf_event_overflow()
6146 event->pending_disable = 1; in __perf_event_overflow()
6147 irq_work_queue(&event->pending); in __perf_event_overflow()
6150 if (event->overflow_handler) in __perf_event_overflow()
6151 event->overflow_handler(event, data, regs); in __perf_event_overflow()
6153 perf_event_output(event, data, regs); in __perf_event_overflow()
6155 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
6156 event->pending_wakeup = 1; in __perf_event_overflow()
6157 irq_work_queue(&event->pending); in __perf_event_overflow()
6163 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
6167 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
6195 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
6197 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
6218 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
6222 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
6226 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
6232 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
6244 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
6248 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
6250 local64_add(nr, &event->count); in perf_swevent_event()
6255 if (!is_sampling_event(event)) in perf_swevent_event()
6258 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
6260 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
6262 data->period = event->hw.last_period; in perf_swevent_event()
6264 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
6265 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
6270 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
6273 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
6276 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
6280 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
6283 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
6290 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
6296 if (event->attr.type != type) in perf_swevent_match()
6299 if (event->attr.config != event_id) in perf_swevent_match()
6302 if (perf_exclude_event(event, regs)) in perf_swevent_match()
6338 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
6341 u32 event_id = event->attr.config; in find_swevent_head()
6342 u64 type = event->attr.type; in find_swevent_head()
6350 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
6363 struct perf_event *event; in do_perf_sw_event() local
6371 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
6372 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
6373 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
6423 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
6427 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
6430 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
6433 if (is_sampling_event(event)) { in perf_swevent_add()
6435 perf_swevent_set_period(event); in perf_swevent_add()
6440 head = find_swevent_head(swhash, event); in perf_swevent_add()
6450 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
6451 perf_event_update_userpage(event); in perf_swevent_add()
6456 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
6458 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
6461 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
6463 event->hw.state = 0; in perf_swevent_start()
6466 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
6468 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
6490 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) in swevent_hlist_put_cpu() argument
6502 static void swevent_hlist_put(struct perf_event *event) in swevent_hlist_put() argument
6507 swevent_hlist_put_cpu(event, cpu); in swevent_hlist_put()
6510 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) in swevent_hlist_get_cpu() argument
6534 static int swevent_hlist_get(struct perf_event *event) in swevent_hlist_get() argument
6541 err = swevent_hlist_get_cpu(event, cpu); in swevent_hlist_get()
6554 swevent_hlist_put_cpu(event, cpu); in swevent_hlist_get()
6563 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
6565 u64 event_id = event->attr.config; in sw_perf_event_destroy()
6567 WARN_ON(event->parent); in sw_perf_event_destroy()
6570 swevent_hlist_put(event); in sw_perf_event_destroy()
6573 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
6575 u64 event_id = event->attr.config; in perf_swevent_init()
6577 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
6583 if (has_branch_stack(event)) in perf_swevent_init()
6598 if (!event->parent) { in perf_swevent_init()
6601 err = swevent_hlist_get(event); in perf_swevent_init()
6606 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
6627 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
6632 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
6637 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
6641 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
6646 if (event->attr.exclude_kernel) in perf_tp_event_match()
6649 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
6660 struct perf_event *event; in perf_tp_event() local
6670 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
6671 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
6672 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
6688 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
6689 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event()
6691 if (event->attr.config != entry->type) in perf_tp_event()
6693 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
6694 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
6704 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
6706 perf_trace_destroy(event); in tp_perf_event_destroy()
6709 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
6713 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
6719 if (has_branch_stack(event)) in perf_tp_event_init()
6722 err = perf_trace_init(event); in perf_tp_event_init()
6726 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
6747 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
6752 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_event_set_filter()
6759 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
6765 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
6767 ftrace_profile_free_filter(event); in perf_event_free_filter()
6770 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
6774 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_event_set_bpf_prog()
6777 if (event->tp_event->prog) in perf_event_set_bpf_prog()
6780 if (!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) in perf_event_set_bpf_prog()
6794 event->tp_event->prog = prog; in perf_event_set_bpf_prog()
6799 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
6803 if (!event->tp_event) in perf_event_free_bpf_prog()
6806 prog = event->tp_event->prog; in perf_event_free_bpf_prog()
6808 event->tp_event->prog = NULL; in perf_event_free_bpf_prog()
6819 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
6824 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
6828 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
6833 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
6860 struct perf_event *event; in perf_swevent_hrtimer() local
6863 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
6865 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
6868 event->pmu->read(event); in perf_swevent_hrtimer()
6870 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
6873 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
6874 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
6875 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
6879 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
6885 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
6887 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
6890 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
6907 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
6909 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
6911 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
6919 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
6921 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
6923 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
6933 if (event->attr.freq) { in perf_swevent_init_hrtimer()
6934 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
6936 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
6937 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
6940 event->attr.freq = 0; in perf_swevent_init_hrtimer()
6948 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
6954 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
6955 local64_add(now - prev, &event->count); in cpu_clock_event_update()
6958 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
6960 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
6961 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
6964 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
6966 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
6967 cpu_clock_event_update(event); in cpu_clock_event_stop()
6970 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
6973 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
6974 perf_event_update_userpage(event); in cpu_clock_event_add()
6979 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
6981 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
6984 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
6986 cpu_clock_event_update(event); in cpu_clock_event_read()
6989 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
6991 if (event->attr.type != PERF_TYPE_SOFTWARE) in cpu_clock_event_init()
6994 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
7000 if (has_branch_stack(event)) in cpu_clock_event_init()
7003 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
7025 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
7030 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
7032 local64_add(delta, &event->count); in task_clock_event_update()
7035 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
7037 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
7038 perf_swevent_start_hrtimer(event); in task_clock_event_start()
7041 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
7043 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
7044 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
7047 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
7050 task_clock_event_start(event, flags); in task_clock_event_add()
7051 perf_event_update_userpage(event); in task_clock_event_add()
7056 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
7058 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
7061 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
7064 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
7065 u64 time = event->ctx->time + delta; in task_clock_event_read()
7067 task_clock_event_update(event, time); in task_clock_event_read()
7070 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
7072 if (event->attr.type != PERF_TYPE_SOFTWARE) in task_clock_event_init()
7075 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
7081 if (has_branch_stack(event)) in task_clock_event_init()
7084 perf_swevent_init_hrtimer(event); in task_clock_event_init()
7127 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
7414 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
7422 if (event->group_leader != event) { in perf_try_init_event()
7427 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
7432 event->pmu = pmu; in perf_try_init_event()
7433 ret = pmu->event_init(event); in perf_try_init_event()
7436 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
7444 struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
7453 pmu = idr_find(&pmu_idr, event->attr.type); in perf_init_event()
7456 ret = perf_try_init_event(pmu, event); in perf_init_event()
7463 ret = perf_try_init_event(pmu, event); in perf_init_event()
7479 static void account_event_cpu(struct perf_event *event, int cpu) in account_event_cpu() argument
7481 if (event->parent) in account_event_cpu()
7484 if (is_cgroup_event(event)) in account_event_cpu()
7488 static void account_event(struct perf_event *event) in account_event() argument
7490 if (event->parent) in account_event()
7493 if (event->attach_state & PERF_ATTACH_TASK) in account_event()
7495 if (event->attr.mmap || event->attr.mmap_data) in account_event()
7497 if (event->attr.comm) in account_event()
7499 if (event->attr.task) in account_event()
7501 if (event->attr.freq) { in account_event()
7505 if (has_branch_stack(event)) in account_event()
7507 if (is_cgroup_event(event)) in account_event()
7510 account_event_cpu(event, event->cpu); in account_event()
7525 struct perf_event *event; in perf_event_alloc() local
7534 event = kzalloc(sizeof(*event), GFP_KERNEL); in perf_event_alloc()
7535 if (!event) in perf_event_alloc()
7543 group_leader = event; in perf_event_alloc()
7545 mutex_init(&event->child_mutex); in perf_event_alloc()
7546 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
7548 INIT_LIST_HEAD(&event->group_entry); in perf_event_alloc()
7549 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
7550 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
7551 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
7552 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
7553 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
7556 init_waitqueue_head(&event->waitq); in perf_event_alloc()
7557 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
7559 mutex_init(&event->mmap_mutex); in perf_event_alloc()
7561 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
7562 event->cpu = cpu; in perf_event_alloc()
7563 event->attr = *attr; in perf_event_alloc()
7564 event->group_leader = group_leader; in perf_event_alloc()
7565 event->pmu = NULL; in perf_event_alloc()
7566 event->oncpu = -1; in perf_event_alloc()
7568 event->parent = parent_event; in perf_event_alloc()
7570 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
7571 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
7573 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
7576 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
7582 event->hw.target = task; in perf_event_alloc()
7585 event->clock = &local_clock; in perf_event_alloc()
7587 event->clock = parent_event->clock; in perf_event_alloc()
7594 event->overflow_handler = overflow_handler; in perf_event_alloc()
7595 event->overflow_handler_context = context; in perf_event_alloc()
7597 perf_event__state_init(event); in perf_event_alloc()
7601 hwc = &event->hw; in perf_event_alloc()
7615 if (!has_branch_stack(event)) in perf_event_alloc()
7616 event->attr.branch_sample_type = 0; in perf_event_alloc()
7619 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
7624 pmu = perf_init_event(event); in perf_event_alloc()
7632 err = exclusive_event_init(event); in perf_event_alloc()
7636 if (!event->parent) { in perf_event_alloc()
7637 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
7645 account_event(event); in perf_event_alloc()
7647 return event; in perf_event_alloc()
7650 exclusive_event_destroy(event); in perf_event_alloc()
7653 if (event->destroy) in perf_event_alloc()
7654 event->destroy(event); in perf_event_alloc()
7657 if (is_cgroup_event(event)) in perf_event_alloc()
7658 perf_detach_cgroup(event); in perf_event_alloc()
7659 if (event->ns) in perf_event_alloc()
7660 put_pid_ns(event->ns); in perf_event_alloc()
7661 kfree(event); in perf_event_alloc()
7797 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
7806 if (event == output_event) in perf_event_set_output()
7812 if (output_event->cpu != event->cpu) in perf_event_set_output()
7818 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
7824 if (output_event->clock != event->clock) in perf_event_set_output()
7830 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
7831 event->pmu != output_event->pmu) in perf_event_set_output()
7835 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
7837 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
7847 ring_buffer_attach(event, rb); in perf_event_set_output()
7851 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
7866 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
7872 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
7877 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
7882 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
7886 event->clock = &ktime_get_boot_ns; in perf_event_set_clock()
7890 event->clock = &ktime_get_tai_ns; in perf_event_set_clock()
7897 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
7916 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
7996 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
7998 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
7999 err = PTR_ERR(event); in SYSCALL_DEFINE5()
8003 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
8004 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
8014 pmu = event->pmu; in SYSCALL_DEFINE5()
8017 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
8023 (is_software_event(event) != is_software_event(group_leader))) { in SYSCALL_DEFINE5()
8024 if (is_software_event(event)) { in SYSCALL_DEFINE5()
8048 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
8078 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
8098 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
8113 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
8118 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, in SYSCALL_DEFINE5()
8181 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
8188 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
8199 event->owner = current; in SYSCALL_DEFINE5()
8202 list_add_tail(&event->owner_entry, &current->perf_event_list); in SYSCALL_DEFINE5()
8208 perf_event__header_size(event); in SYSCALL_DEFINE5()
8209 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
8230 free_event(event); in SYSCALL_DEFINE5()
8257 struct perf_event *event; in perf_event_create_kernel_counter() local
8264 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
8266 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
8267 err = PTR_ERR(event); in perf_event_create_kernel_counter()
8272 event->owner = EVENT_OWNER_KERNEL; in perf_event_create_kernel_counter()
8274 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
8282 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
8290 perf_install_in_context(ctx, event, cpu); in perf_event_create_kernel_counter()
8294 return event; in perf_event_create_kernel_counter()
8297 free_event(event); in perf_event_create_kernel_counter()
8307 struct perf_event *event, *tmp; in perf_pmu_migrate_context() local
8318 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, in perf_pmu_migrate_context()
8320 perf_remove_from_context(event, false); in perf_pmu_migrate_context()
8321 unaccount_event_cpu(event, src_cpu); in perf_pmu_migrate_context()
8323 list_add(&event->migrate_entry, &events); in perf_pmu_migrate_context()
8339 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
8340 if (event->group_leader == event) in perf_pmu_migrate_context()
8343 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
8344 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
8345 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
8346 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
8347 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
8355 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
8356 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
8357 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
8358 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
8359 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
8360 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
8515 struct perf_event *event, *tmp; in perf_event_exit_task() local
8519 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
8521 list_del_init(&event->owner_entry); in perf_event_exit_task()
8529 event->owner = NULL; in perf_event_exit_task()
8537 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
8540 struct perf_event *parent = event->parent; in perf_free_event()
8546 list_del_init(&event->child_list); in perf_free_event()
8552 perf_group_detach(event); in perf_free_event()
8553 list_del_event(event, ctx); in perf_free_event()
8555 free_event(event); in perf_free_event()
8568 struct perf_event *event, *tmp; in perf_event_free_task() local
8578 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, in perf_event_free_task()
8580 perf_free_event(event, ctx); in perf_event_free_task()
8582 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, in perf_event_free_task()
8584 perf_free_event(event, ctx); in perf_event_free_task()
8717 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
8725 if (!event->attr.inherit) { in inherit_task_group()
8746 ret = inherit_group(event, parent, parent_ctx, in inherit_task_group()
8762 struct perf_event *event; in perf_event_init_context() local
8796 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { in perf_event_init_context()
8797 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
8812 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { in perf_event_init_context()
8813 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
8909 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) in __perf_event_exit_context()