Lines Matching refs:ctx
317 static void update_context_time(struct perf_event_context *ctx);
338 __get_cpu_context(struct perf_event_context *ctx) in __get_cpu_context() argument
340 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); in __get_cpu_context()
344 struct perf_event_context *ctx) in perf_ctx_lock() argument
346 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
347 if (ctx) in perf_ctx_lock()
348 raw_spin_lock(&ctx->lock); in perf_ctx_lock()
352 struct perf_event_context *ctx) in perf_ctx_unlock() argument
354 if (ctx) in perf_ctx_unlock()
355 raw_spin_unlock(&ctx->lock); in perf_ctx_unlock()
356 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
364 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match() local
365 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_cgroup_match()
445 struct perf_event_context *ctx) in perf_cgroup_set_timestamp() argument
455 if (!task || !ctx->nr_cgroups) in perf_cgroup_set_timestamp()
460 info->timestamp = ctx->timestamp; in perf_cgroup_set_timestamp()
503 if (cpuctx->ctx.nr_cgroups > 0) { in perf_cgroup_switch()
505 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
526 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
645 struct perf_event_context *ctx) in perf_cgroup_mark_enabled() argument
711 struct perf_event_context *ctx) in perf_cgroup_set_timestamp() argument
737 struct perf_event_context *ctx) in perf_cgroup_mark_enabled() argument
804 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_cpu_hrtimer_init()
828 struct pmu *pmu = cpuctx->ctx.pmu; in perf_cpu_hrtimer_restart()
864 static void perf_event_ctx_activate(struct perf_event_context *ctx) in perf_event_ctx_activate() argument
870 WARN_ON(!list_empty(&ctx->active_ctx_list)); in perf_event_ctx_activate()
872 list_add(&ctx->active_ctx_list, head); in perf_event_ctx_activate()
875 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) in perf_event_ctx_deactivate() argument
879 WARN_ON(list_empty(&ctx->active_ctx_list)); in perf_event_ctx_deactivate()
881 list_del_init(&ctx->active_ctx_list); in perf_event_ctx_deactivate()
884 static void get_ctx(struct perf_event_context *ctx) in get_ctx() argument
886 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); in get_ctx()
891 struct perf_event_context *ctx; in free_ctx() local
893 ctx = container_of(head, struct perf_event_context, rcu_head); in free_ctx()
894 kfree(ctx->task_ctx_data); in free_ctx()
895 kfree(ctx); in free_ctx()
898 static void put_ctx(struct perf_event_context *ctx) in put_ctx() argument
900 if (atomic_dec_and_test(&ctx->refcount)) { in put_ctx()
901 if (ctx->parent_ctx) in put_ctx()
902 put_ctx(ctx->parent_ctx); in put_ctx()
903 if (ctx->task) in put_ctx()
904 put_task_struct(ctx->task); in put_ctx()
905 call_rcu(&ctx->rcu_head, free_ctx); in put_ctx()
973 struct perf_event_context *ctx; in perf_event_ctx_lock_nested() local
977 ctx = ACCESS_ONCE(event->ctx); in perf_event_ctx_lock_nested()
978 if (!atomic_inc_not_zero(&ctx->refcount)) { in perf_event_ctx_lock_nested()
984 mutex_lock_nested(&ctx->mutex, nesting); in perf_event_ctx_lock_nested()
985 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
986 mutex_unlock(&ctx->mutex); in perf_event_ctx_lock_nested()
987 put_ctx(ctx); in perf_event_ctx_lock_nested()
991 return ctx; in perf_event_ctx_lock_nested()
1001 struct perf_event_context *ctx) in perf_event_ctx_unlock() argument
1003 mutex_unlock(&ctx->mutex); in perf_event_ctx_unlock()
1004 put_ctx(ctx); in perf_event_ctx_unlock()
1013 unclone_ctx(struct perf_event_context *ctx) in unclone_ctx() argument
1015 struct perf_event_context *parent_ctx = ctx->parent_ctx; in unclone_ctx()
1017 lockdep_assert_held(&ctx->lock); in unclone_ctx()
1020 ctx->parent_ctx = NULL; in unclone_ctx()
1021 ctx->generation++; in unclone_ctx()
1070 struct perf_event_context *ctx; in perf_lock_task_context() local
1084 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); in perf_lock_task_context()
1085 if (ctx) { in perf_lock_task_context()
1096 raw_spin_lock_irqsave(&ctx->lock, *flags); in perf_lock_task_context()
1097 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { in perf_lock_task_context()
1098 raw_spin_unlock_irqrestore(&ctx->lock, *flags); in perf_lock_task_context()
1104 if (!atomic_inc_not_zero(&ctx->refcount)) { in perf_lock_task_context()
1105 raw_spin_unlock_irqrestore(&ctx->lock, *flags); in perf_lock_task_context()
1106 ctx = NULL; in perf_lock_task_context()
1111 return ctx; in perf_lock_task_context()
1122 struct perf_event_context *ctx; in perf_pin_task_context() local
1125 ctx = perf_lock_task_context(task, ctxn, &flags); in perf_pin_task_context()
1126 if (ctx) { in perf_pin_task_context()
1127 ++ctx->pin_count; in perf_pin_task_context()
1128 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_pin_task_context()
1130 return ctx; in perf_pin_task_context()
1133 static void perf_unpin_context(struct perf_event_context *ctx) in perf_unpin_context() argument
1137 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_unpin_context()
1138 --ctx->pin_count; in perf_unpin_context()
1139 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_unpin_context()
1145 static void update_context_time(struct perf_event_context *ctx) in update_context_time() argument
1149 ctx->time += now - ctx->timestamp; in update_context_time()
1150 ctx->timestamp = now; in update_context_time()
1155 struct perf_event_context *ctx = event->ctx; in perf_event_time() local
1160 return ctx ? ctx->time : 0; in perf_event_time()
1169 struct perf_event_context *ctx = event->ctx; in update_event_times() local
1187 else if (ctx->is_active) in update_event_times()
1188 run_end = ctx->time; in update_event_times()
1216 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) in ctx_group_list() argument
1219 return &ctx->pinned_groups; in ctx_group_list()
1221 return &ctx->flexible_groups; in ctx_group_list()
1229 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1245 list = ctx_group_list(event, ctx); in list_add_event()
1250 ctx->nr_cgroups++; in list_add_event()
1252 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1253 ctx->nr_events++; in list_add_event()
1255 ctx->nr_stat++; in list_add_event()
1257 ctx->generation++; in list_add_event()
1371 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1391 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1395 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
1396 lockdep_assert_held(&ctx->lock); in list_del_event()
1407 ctx->nr_cgroups--; in list_del_event()
1408 cpuctx = __get_cpu_context(ctx); in list_del_event()
1414 if (!ctx->nr_cgroups) in list_del_event()
1418 ctx->nr_events--; in list_del_event()
1420 ctx->nr_stat--; in list_del_event()
1439 ctx->generation++; in list_del_event()
1480 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
1509 static void schedule_orphans_remove(struct perf_event_context *ctx) in schedule_orphans_remove() argument
1511 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) in schedule_orphans_remove()
1514 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { in schedule_orphans_remove()
1515 get_ctx(ctx); in schedule_orphans_remove()
1516 ctx->orphans_remove_sched = true; in schedule_orphans_remove()
1539 struct perf_event_context *ctx) in event_sched_out() argument
1544 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
1545 lockdep_assert_held(&ctx->lock); in event_sched_out()
1576 if (!--ctx->nr_active) in event_sched_out()
1577 perf_event_ctx_deactivate(ctx); in event_sched_out()
1579 ctx->nr_freq--; in event_sched_out()
1584 schedule_orphans_remove(ctx); in event_sched_out()
1592 struct perf_event_context *ctx) in group_sched_out() argument
1597 event_sched_out(group_event, cpuctx, ctx); in group_sched_out()
1603 event_sched_out(event, cpuctx, ctx); in group_sched_out()
1624 struct perf_event_context *ctx = event->ctx; in __perf_remove_from_context() local
1625 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_remove_from_context()
1627 raw_spin_lock(&ctx->lock); in __perf_remove_from_context()
1628 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
1631 list_del_event(event, ctx); in __perf_remove_from_context()
1632 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { in __perf_remove_from_context()
1633 ctx->is_active = 0; in __perf_remove_from_context()
1636 raw_spin_unlock(&ctx->lock); in __perf_remove_from_context()
1657 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context() local
1658 struct task_struct *task = ctx->task; in perf_remove_from_context()
1664 lockdep_assert_held(&ctx->mutex); in perf_remove_from_context()
1681 raw_spin_lock_irq(&ctx->lock); in perf_remove_from_context()
1686 if (ctx->is_active) { in perf_remove_from_context()
1687 raw_spin_unlock_irq(&ctx->lock); in perf_remove_from_context()
1692 task = ctx->task; in perf_remove_from_context()
1702 list_del_event(event, ctx); in perf_remove_from_context()
1703 raw_spin_unlock_irq(&ctx->lock); in perf_remove_from_context()
1712 struct perf_event_context *ctx = event->ctx; in __perf_event_disable() local
1713 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_disable()
1722 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_disable()
1725 raw_spin_lock(&ctx->lock); in __perf_event_disable()
1732 update_context_time(ctx); in __perf_event_disable()
1736 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1738 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1742 raw_spin_unlock(&ctx->lock); in __perf_event_disable()
1762 struct perf_event_context *ctx = event->ctx; in _perf_event_disable() local
1763 struct task_struct *task = ctx->task; in _perf_event_disable()
1777 raw_spin_lock_irq(&ctx->lock); in _perf_event_disable()
1782 raw_spin_unlock_irq(&ctx->lock); in _perf_event_disable()
1787 task = ctx->task; in _perf_event_disable()
1799 raw_spin_unlock_irq(&ctx->lock); in _perf_event_disable()
1808 struct perf_event_context *ctx; in perf_event_disable() local
1810 ctx = perf_event_ctx_lock(event); in perf_event_disable()
1812 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
1817 struct perf_event_context *ctx, in perf_set_shadow_time() argument
1848 event->shadow_ctx_time = tstamp - ctx->timestamp; in perf_set_shadow_time()
1859 struct perf_event_context *ctx) in event_sched_in() argument
1864 lockdep_assert_held(&ctx->lock); in event_sched_in()
1889 perf_set_shadow_time(event, ctx, tstamp); in event_sched_in()
1904 if (!ctx->nr_active++) in event_sched_in()
1905 perf_event_ctx_activate(ctx); in event_sched_in()
1907 ctx->nr_freq++; in event_sched_in()
1913 schedule_orphans_remove(ctx); in event_sched_in()
1924 struct perf_event_context *ctx) in group_sched_in() argument
1927 struct pmu *pmu = ctx->pmu; in group_sched_in()
1928 u64 now = ctx->time; in group_sched_in()
1936 if (event_sched_in(group_event, cpuctx, ctx)) { in group_sched_in()
1946 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
1978 event_sched_out(event, cpuctx, ctx); in group_sched_in()
1981 event_sched_out(group_event, cpuctx, ctx); in group_sched_in()
2022 struct perf_event_context *ctx) in add_event_to_ctx() argument
2026 list_add_event(event, ctx); in add_event_to_ctx()
2033 static void task_ctx_sched_out(struct perf_event_context *ctx);
2035 ctx_sched_in(struct perf_event_context *ctx,
2041 struct perf_event_context *ctx, in perf_event_sched_in() argument
2045 if (ctx) in perf_event_sched_in()
2046 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2048 if (ctx) in perf_event_sched_in()
2049 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2060 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context() local
2061 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_install_in_context()
2066 perf_pmu_disable(cpuctx->ctx.pmu); in __perf_install_in_context()
2078 if (ctx->task && task_ctx != ctx) { in __perf_install_in_context()
2081 raw_spin_lock(&ctx->lock); in __perf_install_in_context()
2082 task_ctx = ctx; in __perf_install_in_context()
2092 update_context_time(ctx); in __perf_install_in_context()
2100 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2107 perf_pmu_enable(cpuctx->ctx.pmu); in __perf_install_in_context()
2124 perf_install_in_context(struct perf_event_context *ctx, in perf_install_in_context() argument
2128 struct task_struct *task = ctx->task; in perf_install_in_context()
2130 lockdep_assert_held(&ctx->mutex); in perf_install_in_context()
2132 event->ctx = ctx; in perf_install_in_context()
2149 raw_spin_lock_irq(&ctx->lock); in perf_install_in_context()
2154 if (ctx->is_active) { in perf_install_in_context()
2155 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2160 task = ctx->task; in perf_install_in_context()
2168 add_event_to_ctx(event, ctx); in perf_install_in_context()
2169 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2199 struct perf_event_context *ctx = event->ctx; in __perf_event_enable() local
2201 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_enable()
2213 if (!ctx->is_active) in __perf_event_enable()
2216 raw_spin_lock(&ctx->lock); in __perf_event_enable()
2217 update_context_time(ctx); in __perf_event_enable()
2225 perf_cgroup_set_timestamp(current, ctx); in __perf_event_enable()
2246 err = group_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2248 err = event_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2257 group_sched_out(leader, cpuctx, ctx); in __perf_event_enable()
2267 raw_spin_unlock(&ctx->lock); in __perf_event_enable()
2283 struct perf_event_context *ctx = event->ctx; in _perf_event_enable() local
2284 struct task_struct *task = ctx->task; in _perf_event_enable()
2294 raw_spin_lock_irq(&ctx->lock); in _perf_event_enable()
2309 if (!ctx->is_active) { in _perf_event_enable()
2314 raw_spin_unlock_irq(&ctx->lock); in _perf_event_enable()
2319 raw_spin_lock_irq(&ctx->lock); in _perf_event_enable()
2325 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { in _perf_event_enable()
2330 task = ctx->task; in _perf_event_enable()
2335 raw_spin_unlock_irq(&ctx->lock); in _perf_event_enable()
2343 struct perf_event_context *ctx; in perf_event_enable() local
2345 ctx = perf_event_ctx_lock(event); in perf_event_enable()
2347 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
2370 struct perf_event_context *ctx; in perf_event_refresh() local
2373 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
2375 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
2381 static void ctx_sched_out(struct perf_event_context *ctx, in ctx_sched_out() argument
2386 int is_active = ctx->is_active; in ctx_sched_out()
2388 ctx->is_active &= ~event_type; in ctx_sched_out()
2389 if (likely(!ctx->nr_events)) in ctx_sched_out()
2392 update_context_time(ctx); in ctx_sched_out()
2394 if (!ctx->nr_active) in ctx_sched_out()
2397 perf_pmu_disable(ctx->pmu); in ctx_sched_out()
2399 list_for_each_entry(event, &ctx->pinned_groups, group_entry) in ctx_sched_out()
2400 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2404 list_for_each_entry(event, &ctx->flexible_groups, group_entry) in ctx_sched_out()
2405 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2407 perf_pmu_enable(ctx->pmu); in ctx_sched_out()
2494 static void perf_event_sync_stat(struct perf_event_context *ctx, in perf_event_sync_stat() argument
2499 if (!ctx->nr_stat) in perf_event_sync_stat()
2502 update_context_time(ctx); in perf_event_sync_stat()
2504 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
2510 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
2523 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; in perf_event_context_sched_out() local
2529 if (likely(!ctx)) in perf_event_context_sched_out()
2532 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_out()
2541 parent = rcu_dereference(ctx->parent_ctx); in perf_event_context_sched_out()
2548 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { in perf_event_context_sched_out()
2558 raw_spin_lock(&ctx->lock); in perf_event_context_sched_out()
2560 if (context_equiv(ctx, next_ctx)) { in perf_event_context_sched_out()
2566 next->perf_event_ctxp[ctxn] = ctx; in perf_event_context_sched_out()
2567 ctx->task = next; in perf_event_context_sched_out()
2570 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); in perf_event_context_sched_out()
2574 perf_event_sync_stat(ctx, next_ctx); in perf_event_context_sched_out()
2577 raw_spin_unlock(&ctx->lock); in perf_event_context_sched_out()
2583 raw_spin_lock(&ctx->lock); in perf_event_context_sched_out()
2584 ctx_sched_out(ctx, cpuctx, EVENT_ALL); in perf_event_context_sched_out()
2586 raw_spin_unlock(&ctx->lock); in perf_event_context_sched_out()
2674 static void task_ctx_sched_out(struct perf_event_context *ctx) in task_ctx_sched_out() argument
2676 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in task_ctx_sched_out()
2681 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2684 ctx_sched_out(ctx, cpuctx, EVENT_ALL); in task_ctx_sched_out()
2694 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); in cpu_ctx_sched_out()
2698 ctx_pinned_sched_in(struct perf_event_context *ctx, in ctx_pinned_sched_in() argument
2703 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { in ctx_pinned_sched_in()
2711 perf_cgroup_mark_enabled(event, ctx); in ctx_pinned_sched_in()
2714 group_sched_in(event, cpuctx, ctx); in ctx_pinned_sched_in()
2728 ctx_flexible_sched_in(struct perf_event_context *ctx, in ctx_flexible_sched_in() argument
2734 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { in ctx_flexible_sched_in()
2747 perf_cgroup_mark_enabled(event, ctx); in ctx_flexible_sched_in()
2750 if (group_sched_in(event, cpuctx, ctx)) in ctx_flexible_sched_in()
2757 ctx_sched_in(struct perf_event_context *ctx, in ctx_sched_in() argument
2763 int is_active = ctx->is_active; in ctx_sched_in()
2765 ctx->is_active |= event_type; in ctx_sched_in()
2766 if (likely(!ctx->nr_events)) in ctx_sched_in()
2770 ctx->timestamp = now; in ctx_sched_in()
2771 perf_cgroup_set_timestamp(task, ctx); in ctx_sched_in()
2777 ctx_pinned_sched_in(ctx, cpuctx); in ctx_sched_in()
2781 ctx_flexible_sched_in(ctx, cpuctx); in ctx_sched_in()
2788 struct perf_event_context *ctx = &cpuctx->ctx; in cpu_ctx_sched_in() local
2790 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
2793 static void perf_event_context_sched_in(struct perf_event_context *ctx, in perf_event_context_sched_in() argument
2798 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_in()
2799 if (cpuctx->task_ctx == ctx) in perf_event_context_sched_in()
2802 perf_ctx_lock(cpuctx, ctx); in perf_event_context_sched_in()
2803 perf_pmu_disable(ctx->pmu); in perf_event_context_sched_in()
2811 if (ctx->nr_events) in perf_event_context_sched_in()
2812 cpuctx->task_ctx = ctx; in perf_event_context_sched_in()
2816 perf_pmu_enable(ctx->pmu); in perf_event_context_sched_in()
2817 perf_ctx_unlock(cpuctx, ctx); in perf_event_context_sched_in()
2834 struct perf_event_context *ctx; in __perf_event_task_sched_in() local
2838 ctx = task->perf_event_ctxp[ctxn]; in __perf_event_task_sched_in()
2839 if (likely(!ctx)) in __perf_event_task_sched_in()
2842 perf_event_context_sched_in(ctx, task); in __perf_event_task_sched_in()
2966 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, in perf_adjust_freq_unthr_context() argument
2979 if (!(ctx->nr_freq || needs_unthr)) in perf_adjust_freq_unthr_context()
2982 raw_spin_lock(&ctx->lock); in perf_adjust_freq_unthr_context()
2983 perf_pmu_disable(ctx->pmu); in perf_adjust_freq_unthr_context()
2985 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
3029 perf_pmu_enable(ctx->pmu); in perf_adjust_freq_unthr_context()
3030 raw_spin_unlock(&ctx->lock); in perf_adjust_freq_unthr_context()
3036 static void rotate_ctx(struct perf_event_context *ctx) in rotate_ctx() argument
3042 if (!ctx->rotate_disable) in rotate_ctx()
3043 list_rotate_left(&ctx->flexible_groups); in rotate_ctx()
3048 struct perf_event_context *ctx = NULL; in perf_rotate_context() local
3051 if (cpuctx->ctx.nr_events) { in perf_rotate_context()
3052 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) in perf_rotate_context()
3056 ctx = cpuctx->task_ctx; in perf_rotate_context()
3057 if (ctx && ctx->nr_events) { in perf_rotate_context()
3058 if (ctx->nr_events != ctx->nr_active) in perf_rotate_context()
3066 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
3069 if (ctx) in perf_rotate_context()
3070 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
3072 rotate_ctx(&cpuctx->ctx); in perf_rotate_context()
3073 if (ctx) in perf_rotate_context()
3074 rotate_ctx(ctx); in perf_rotate_context()
3076 perf_event_sched_in(cpuctx, ctx, current); in perf_rotate_context()
3078 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
3099 struct perf_event_context *ctx, *tmp; in perf_event_task_tick() local
3107 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) in perf_event_task_tick()
3108 perf_adjust_freq_unthr_context(ctx, throttled); in perf_event_task_tick()
3112 struct perf_event_context *ctx) in event_enable_on_exec() argument
3130 static void perf_event_enable_on_exec(struct perf_event_context *ctx) in perf_event_enable_on_exec() argument
3139 if (!ctx || !ctx->nr_events) in perf_event_enable_on_exec()
3151 raw_spin_lock(&ctx->lock); in perf_event_enable_on_exec()
3152 task_ctx_sched_out(ctx); in perf_event_enable_on_exec()
3154 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
3155 ret = event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
3164 clone_ctx = unclone_ctx(ctx); in perf_event_enable_on_exec()
3166 raw_spin_unlock(&ctx->lock); in perf_event_enable_on_exec()
3171 perf_event_context_sched_in(ctx, ctx->task); in perf_event_enable_on_exec()
3181 struct perf_event_context *ctx; in perf_event_exec() local
3186 ctx = current->perf_event_ctxp[ctxn]; in perf_event_exec()
3187 if (!ctx) in perf_event_exec()
3190 perf_event_enable_on_exec(ctx); in perf_event_exec()
3201 struct perf_event_context *ctx = event->ctx; in __perf_event_read() local
3202 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_read()
3211 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
3214 raw_spin_lock(&ctx->lock); in __perf_event_read()
3215 if (ctx->is_active) { in __perf_event_read()
3216 update_context_time(ctx); in __perf_event_read()
3222 raw_spin_unlock(&ctx->lock); in __perf_event_read()
3243 struct perf_event_context *ctx = event->ctx; in perf_event_read() local
3246 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_event_read()
3252 if (ctx->is_active) { in perf_event_read()
3253 update_context_time(ctx); in perf_event_read()
3257 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_event_read()
3266 static void __perf_event_init_context(struct perf_event_context *ctx) in __perf_event_init_context() argument
3268 raw_spin_lock_init(&ctx->lock); in __perf_event_init_context()
3269 mutex_init(&ctx->mutex); in __perf_event_init_context()
3270 INIT_LIST_HEAD(&ctx->active_ctx_list); in __perf_event_init_context()
3271 INIT_LIST_HEAD(&ctx->pinned_groups); in __perf_event_init_context()
3272 INIT_LIST_HEAD(&ctx->flexible_groups); in __perf_event_init_context()
3273 INIT_LIST_HEAD(&ctx->event_list); in __perf_event_init_context()
3274 atomic_set(&ctx->refcount, 1); in __perf_event_init_context()
3275 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); in __perf_event_init_context()
3281 struct perf_event_context *ctx; in alloc_perf_context() local
3283 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); in alloc_perf_context()
3284 if (!ctx) in alloc_perf_context()
3287 __perf_event_init_context(ctx); in alloc_perf_context()
3289 ctx->task = task; in alloc_perf_context()
3292 ctx->pmu = pmu; in alloc_perf_context()
3294 return ctx; in alloc_perf_context()
3334 struct perf_event_context *ctx, *clone_ctx = NULL; in find_get_context() local
3355 ctx = &cpuctx->ctx; in find_get_context()
3356 get_ctx(ctx); in find_get_context()
3357 ++ctx->pin_count; in find_get_context()
3359 return ctx; in find_get_context()
3376 ctx = perf_lock_task_context(task, ctxn, &flags); in find_get_context()
3377 if (ctx) { in find_get_context()
3378 clone_ctx = unclone_ctx(ctx); in find_get_context()
3379 ++ctx->pin_count; in find_get_context()
3381 if (task_ctx_data && !ctx->task_ctx_data) { in find_get_context()
3382 ctx->task_ctx_data = task_ctx_data; in find_get_context()
3385 raw_spin_unlock_irqrestore(&ctx->lock, flags); in find_get_context()
3390 ctx = alloc_perf_context(pmu, task); in find_get_context()
3392 if (!ctx) in find_get_context()
3396 ctx->task_ctx_data = task_ctx_data; in find_get_context()
3411 get_ctx(ctx); in find_get_context()
3412 ++ctx->pin_count; in find_get_context()
3413 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); in find_get_context()
3418 put_ctx(ctx); in find_get_context()
3427 return ctx; in find_get_context()
3552 struct perf_event_context *ctx) in exclusive_event_installable() argument
3560 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { in exclusive_event_installable()
3580 if (event->ctx) in __free_event()
3581 put_ctx(event->ctx); in __free_event()
3683 struct perf_event_context *ctx; in put_event() local
3703 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); in put_event()
3704 WARN_ON_ONCE(ctx->parent_ctx); in put_event()
3706 perf_event_ctx_unlock(event, ctx); in put_event()
3732 struct perf_event_context *ctx; in orphans_remove_work() local
3735 ctx = container_of(work, struct perf_event_context, in orphans_remove_work()
3738 mutex_lock(&ctx->mutex); in orphans_remove_work()
3739 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { in orphans_remove_work()
3755 raw_spin_lock_irq(&ctx->lock); in orphans_remove_work()
3756 ctx->orphans_remove_sched = false; in orphans_remove_work()
3757 raw_spin_unlock_irq(&ctx->lock); in orphans_remove_work()
3758 mutex_unlock(&ctx->mutex); in orphans_remove_work()
3760 put_ctx(ctx); in orphans_remove_work()
3793 struct perf_event_context *ctx = leader->ctx; in perf_event_read_group() local
3798 lockdep_assert_held(&ctx->mutex); in perf_event_read_group()
3891 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_read_hw()
3904 struct perf_event_context *ctx; in perf_read() local
3907 ctx = perf_event_ctx_lock(event); in perf_read()
3909 perf_event_ctx_unlock(event, ctx); in perf_read()
3955 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
3967 struct perf_event_context *ctx = event->ctx; in perf_event_for_each() local
3970 lockdep_assert_held(&ctx->mutex); in perf_event_for_each()
3988 struct perf_event_context *ctx = event->ctx; in __perf_event_period() local
3992 raw_spin_lock(&ctx->lock); in __perf_event_period()
4002 perf_pmu_disable(ctx->pmu); in __perf_event_period()
4010 perf_pmu_enable(ctx->pmu); in __perf_event_period()
4012 raw_spin_unlock(&ctx->lock); in __perf_event_period()
4020 struct perf_event_context *ctx = event->ctx; in perf_event_period() local
4036 task = ctx->task; in perf_event_period()
4048 raw_spin_lock_irq(&ctx->lock); in perf_event_period()
4049 if (ctx->is_active) { in perf_event_period()
4050 raw_spin_unlock_irq(&ctx->lock); in perf_event_period()
4051 task = ctx->task; in perf_event_period()
4056 raw_spin_unlock_irq(&ctx->lock); in perf_event_period()
4152 struct perf_event_context *ctx; in perf_ioctl() local
4155 ctx = perf_event_ctx_lock(event); in perf_ioctl()
4157 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
4184 struct perf_event_context *ctx; in perf_event_task_enable() local
4189 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
4191 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
4200 struct perf_event_context *ctx; in perf_event_task_disable() local
4205 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
4207 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
4660 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
5485 perf_event_aux_ctx(struct perf_event_context *ctx, in perf_event_aux_ctx() argument
5491 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_event_aux_ctx()
5505 struct perf_event_context *ctx; in perf_event_aux() local
5514 perf_event_aux_ctx(&cpuctx->ctx, output, data); in perf_event_aux()
5520 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); in perf_event_aux()
5521 if (ctx) in perf_event_aux()
5522 perf_event_aux_ctx(ctx, output, data); in perf_event_aux()
6350 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
6680 struct perf_event_context *ctx; in perf_tp_event() local
6684 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); in perf_tp_event()
6685 if (!ctx) in perf_tp_event()
6688 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
7037 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
7044 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
7064 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
7065 u64 time = event->ctx->time + delta; in task_clock_event_read()
7334 __perf_event_init_context(&cpuctx->ctx); in perf_pmu_register()
7335 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_pmu_register()
7336 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_pmu_register()
7337 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
7416 struct perf_event_context *ctx = NULL; in perf_try_init_event() local
7427 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
7429 BUG_ON(!ctx); in perf_try_init_event()
7435 if (ctx) in perf_try_init_event()
7436 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
7818 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
7918 struct perf_event_context *ctx, *uninitialized_var(gctx); in SYSCALL_DEFINE5() local
8048 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
8049 if (IS_ERR(ctx)) { in SYSCALL_DEFINE5()
8050 err = PTR_ERR(ctx); in SYSCALL_DEFINE5()
8090 if (group_leader->ctx->task != ctx->task) in SYSCALL_DEFINE5()
8101 if (group_leader->ctx != ctx) in SYSCALL_DEFINE5()
8126 gctx = group_leader->ctx; in SYSCALL_DEFINE5()
8132 mutex_lock_double(&gctx->mutex, &ctx->mutex); in SYSCALL_DEFINE5()
8142 mutex_lock(&ctx->mutex); in SYSCALL_DEFINE5()
8145 WARN_ON_ONCE(ctx->parent_ctx); in SYSCALL_DEFINE5()
8167 perf_install_in_context(ctx, sibling, sibling->cpu); in SYSCALL_DEFINE5()
8168 get_ctx(ctx); in SYSCALL_DEFINE5()
8177 perf_install_in_context(ctx, group_leader, group_leader->cpu); in SYSCALL_DEFINE5()
8178 get_ctx(ctx); in SYSCALL_DEFINE5()
8181 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
8183 mutex_unlock(&ctx->mutex); in SYSCALL_DEFINE5()
8188 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
8189 perf_unpin_context(ctx); in SYSCALL_DEFINE5()
8195 mutex_unlock(&ctx->mutex); in SYSCALL_DEFINE5()
8222 perf_unpin_context(ctx); in SYSCALL_DEFINE5()
8223 put_ctx(ctx); in SYSCALL_DEFINE5()
8256 struct perf_event_context *ctx; in perf_event_create_kernel_counter() local
8274 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
8275 if (IS_ERR(ctx)) { in perf_event_create_kernel_counter()
8276 err = PTR_ERR(ctx); in perf_event_create_kernel_counter()
8280 WARN_ON_ONCE(ctx->parent_ctx); in perf_event_create_kernel_counter()
8281 mutex_lock(&ctx->mutex); in perf_event_create_kernel_counter()
8282 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
8283 mutex_unlock(&ctx->mutex); in perf_event_create_kernel_counter()
8284 perf_unpin_context(ctx); in perf_event_create_kernel_counter()
8285 put_ctx(ctx); in perf_event_create_kernel_counter()
8290 perf_install_in_context(ctx, event, cpu); in perf_event_create_kernel_counter()
8291 perf_unpin_context(ctx); in perf_event_create_kernel_counter()
8292 mutex_unlock(&ctx->mutex); in perf_event_create_kernel_counter()
8310 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
8311 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
8391 WARN_ON_ONCE(parent_event->ctx->parent_ctx); in sync_child_event()
8538 struct perf_event_context *ctx) in perf_free_event() argument
8551 raw_spin_lock_irq(&ctx->lock); in perf_free_event()
8553 list_del_event(event, ctx); in perf_free_event()
8554 raw_spin_unlock_irq(&ctx->lock); in perf_free_event()
8567 struct perf_event_context *ctx; in perf_event_free_task() local
8572 ctx = task->perf_event_ctxp[ctxn]; in perf_event_free_task()
8573 if (!ctx) in perf_event_free_task()
8576 mutex_lock(&ctx->mutex); in perf_event_free_task()
8578 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, in perf_event_free_task()
8580 perf_free_event(event, ctx); in perf_event_free_task()
8582 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, in perf_event_free_task()
8584 perf_free_event(event, ctx); in perf_event_free_task()
8586 if (!list_empty(&ctx->pinned_groups) || in perf_event_free_task()
8587 !list_empty(&ctx->flexible_groups)) in perf_event_free_task()
8590 mutex_unlock(&ctx->mutex); in perf_event_free_task()
8592 put_ctx(ctx); in perf_event_free_task()
8664 child_event->ctx = child_ctx; in inherit_event()
8685 WARN_ON_ONCE(parent_event->ctx->parent_ctx); in inherit_event()
8906 struct perf_event_context *ctx = __info; in __perf_event_exit_context() local
8909 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) in __perf_event_exit_context()
8916 struct perf_event_context *ctx; in perf_event_exit_cpu_context() local
8922 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; in perf_event_exit_cpu_context()
8924 mutex_lock(&ctx->mutex); in perf_event_exit_cpu_context()
8925 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); in perf_event_exit_cpu_context()
8926 mutex_unlock(&ctx->mutex); in perf_event_exit_cpu_context()