Lines Matching refs:ctx

320 static void update_context_time(struct perf_event_context *ctx);
341 __get_cpu_context(struct perf_event_context *ctx) in __get_cpu_context() argument
343 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); in __get_cpu_context()
347 struct perf_event_context *ctx) in perf_ctx_lock() argument
349 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
350 if (ctx) in perf_ctx_lock()
351 raw_spin_lock(&ctx->lock); in perf_ctx_lock()
355 struct perf_event_context *ctx) in perf_ctx_unlock() argument
357 if (ctx) in perf_ctx_unlock()
358 raw_spin_unlock(&ctx->lock); in perf_ctx_unlock()
359 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
367 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match() local
368 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_cgroup_match()
438 cgrp = perf_cgroup_from_task(current, event->ctx); in update_cgrp_time_from_event()
448 struct perf_event_context *ctx) in perf_cgroup_set_timestamp() argument
458 if (!task || !ctx->nr_cgroups) in perf_cgroup_set_timestamp()
461 cgrp = perf_cgroup_from_task(task, ctx); in perf_cgroup_set_timestamp()
463 info->timestamp = ctx->timestamp; in perf_cgroup_set_timestamp()
505 if (cpuctx->ctx.nr_cgroups > 0) { in perf_cgroup_switch()
507 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
527 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx); in perf_cgroup_switch()
530 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
657 struct perf_event_context *ctx) in perf_cgroup_mark_enabled() argument
723 struct perf_event_context *ctx) in perf_cgroup_set_timestamp() argument
749 struct perf_event_context *ctx) in perf_cgroup_mark_enabled() argument
785 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_mux_hrtimer_init()
810 struct pmu *pmu = cpuctx->ctx.pmu; in perf_mux_hrtimer_restart()
850 static void perf_event_ctx_activate(struct perf_event_context *ctx) in perf_event_ctx_activate() argument
856 WARN_ON(!list_empty(&ctx->active_ctx_list)); in perf_event_ctx_activate()
858 list_add(&ctx->active_ctx_list, head); in perf_event_ctx_activate()
861 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) in perf_event_ctx_deactivate() argument
865 WARN_ON(list_empty(&ctx->active_ctx_list)); in perf_event_ctx_deactivate()
867 list_del_init(&ctx->active_ctx_list); in perf_event_ctx_deactivate()
870 static void get_ctx(struct perf_event_context *ctx) in get_ctx() argument
872 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); in get_ctx()
877 struct perf_event_context *ctx; in free_ctx() local
879 ctx = container_of(head, struct perf_event_context, rcu_head); in free_ctx()
880 kfree(ctx->task_ctx_data); in free_ctx()
881 kfree(ctx); in free_ctx()
884 static void put_ctx(struct perf_event_context *ctx) in put_ctx() argument
886 if (atomic_dec_and_test(&ctx->refcount)) { in put_ctx()
887 if (ctx->parent_ctx) in put_ctx()
888 put_ctx(ctx->parent_ctx); in put_ctx()
889 if (ctx->task) in put_ctx()
890 put_task_struct(ctx->task); in put_ctx()
891 call_rcu(&ctx->rcu_head, free_ctx); in put_ctx()
960 struct perf_event_context *ctx; in perf_event_ctx_lock_nested() local
964 ctx = ACCESS_ONCE(event->ctx); in perf_event_ctx_lock_nested()
965 if (!atomic_inc_not_zero(&ctx->refcount)) { in perf_event_ctx_lock_nested()
971 mutex_lock_nested(&ctx->mutex, nesting); in perf_event_ctx_lock_nested()
972 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
973 mutex_unlock(&ctx->mutex); in perf_event_ctx_lock_nested()
974 put_ctx(ctx); in perf_event_ctx_lock_nested()
978 return ctx; in perf_event_ctx_lock_nested()
988 struct perf_event_context *ctx) in perf_event_ctx_unlock() argument
990 mutex_unlock(&ctx->mutex); in perf_event_ctx_unlock()
991 put_ctx(ctx); in perf_event_ctx_unlock()
1000 unclone_ctx(struct perf_event_context *ctx) in unclone_ctx() argument
1002 struct perf_event_context *parent_ctx = ctx->parent_ctx; in unclone_ctx()
1004 lockdep_assert_held(&ctx->lock); in unclone_ctx()
1007 ctx->parent_ctx = NULL; in unclone_ctx()
1008 ctx->generation++; in unclone_ctx()
1057 struct perf_event_context *ctx; in perf_lock_task_context() local
1071 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); in perf_lock_task_context()
1072 if (ctx) { in perf_lock_task_context()
1083 raw_spin_lock(&ctx->lock); in perf_lock_task_context()
1084 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { in perf_lock_task_context()
1085 raw_spin_unlock(&ctx->lock); in perf_lock_task_context()
1091 if (!atomic_inc_not_zero(&ctx->refcount)) { in perf_lock_task_context()
1092 raw_spin_unlock(&ctx->lock); in perf_lock_task_context()
1093 ctx = NULL; in perf_lock_task_context()
1097 if (!ctx) in perf_lock_task_context()
1099 return ctx; in perf_lock_task_context()
1110 struct perf_event_context *ctx; in perf_pin_task_context() local
1113 ctx = perf_lock_task_context(task, ctxn, &flags); in perf_pin_task_context()
1114 if (ctx) { in perf_pin_task_context()
1115 ++ctx->pin_count; in perf_pin_task_context()
1116 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_pin_task_context()
1118 return ctx; in perf_pin_task_context()
1121 static void perf_unpin_context(struct perf_event_context *ctx) in perf_unpin_context() argument
1125 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_unpin_context()
1126 --ctx->pin_count; in perf_unpin_context()
1127 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_unpin_context()
1133 static void update_context_time(struct perf_event_context *ctx) in update_context_time() argument
1137 ctx->time += now - ctx->timestamp; in update_context_time()
1138 ctx->timestamp = now; in update_context_time()
1143 struct perf_event_context *ctx = event->ctx; in perf_event_time() local
1148 return ctx ? ctx->time : 0; in perf_event_time()
1157 struct perf_event_context *ctx = event->ctx; in update_event_times() local
1175 else if (ctx->is_active) in update_event_times()
1176 run_end = ctx->time; in update_event_times()
1204 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) in ctx_group_list() argument
1207 return &ctx->pinned_groups; in ctx_group_list()
1209 return &ctx->flexible_groups; in ctx_group_list()
1217 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1233 list = ctx_group_list(event, ctx); in list_add_event()
1238 ctx->nr_cgroups++; in list_add_event()
1240 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1241 ctx->nr_events++; in list_add_event()
1243 ctx->nr_stat++; in list_add_event()
1245 ctx->generation++; in list_add_event()
1384 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1404 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1408 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
1409 lockdep_assert_held(&ctx->lock); in list_del_event()
1420 ctx->nr_cgroups--; in list_del_event()
1421 cpuctx = __get_cpu_context(ctx); in list_del_event()
1427 if (!ctx->nr_cgroups) in list_del_event()
1431 ctx->nr_events--; in list_del_event()
1433 ctx->nr_stat--; in list_del_event()
1452 ctx->generation++; in list_del_event()
1493 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
1522 static void schedule_orphans_remove(struct perf_event_context *ctx) in schedule_orphans_remove() argument
1524 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) in schedule_orphans_remove()
1527 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { in schedule_orphans_remove()
1528 get_ctx(ctx); in schedule_orphans_remove()
1529 ctx->orphans_remove_sched = true; in schedule_orphans_remove()
1558 struct perf_event_context *ctx) in event_sched_out() argument
1563 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
1564 lockdep_assert_held(&ctx->lock); in event_sched_out()
1595 if (!--ctx->nr_active) in event_sched_out()
1596 perf_event_ctx_deactivate(ctx); in event_sched_out()
1598 ctx->nr_freq--; in event_sched_out()
1603 schedule_orphans_remove(ctx); in event_sched_out()
1611 struct perf_event_context *ctx) in group_sched_out() argument
1616 event_sched_out(group_event, cpuctx, ctx); in group_sched_out()
1622 event_sched_out(event, cpuctx, ctx); in group_sched_out()
1643 struct perf_event_context *ctx = event->ctx; in __perf_remove_from_context() local
1644 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_remove_from_context()
1646 raw_spin_lock(&ctx->lock); in __perf_remove_from_context()
1647 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
1650 list_del_event(event, ctx); in __perf_remove_from_context()
1651 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { in __perf_remove_from_context()
1652 ctx->is_active = 0; in __perf_remove_from_context()
1655 raw_spin_unlock(&ctx->lock); in __perf_remove_from_context()
1676 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context() local
1677 struct task_struct *task = ctx->task; in perf_remove_from_context()
1683 lockdep_assert_held(&ctx->mutex); in perf_remove_from_context()
1700 raw_spin_lock_irq(&ctx->lock); in perf_remove_from_context()
1705 if (ctx->is_active) { in perf_remove_from_context()
1706 raw_spin_unlock_irq(&ctx->lock); in perf_remove_from_context()
1711 task = ctx->task; in perf_remove_from_context()
1721 list_del_event(event, ctx); in perf_remove_from_context()
1722 raw_spin_unlock_irq(&ctx->lock); in perf_remove_from_context()
1731 struct perf_event_context *ctx = event->ctx; in __perf_event_disable() local
1732 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_disable()
1741 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_disable()
1744 raw_spin_lock(&ctx->lock); in __perf_event_disable()
1751 update_context_time(ctx); in __perf_event_disable()
1755 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1757 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1761 raw_spin_unlock(&ctx->lock); in __perf_event_disable()
1781 struct perf_event_context *ctx = event->ctx; in _perf_event_disable() local
1782 struct task_struct *task = ctx->task; in _perf_event_disable()
1796 raw_spin_lock_irq(&ctx->lock); in _perf_event_disable()
1801 raw_spin_unlock_irq(&ctx->lock); in _perf_event_disable()
1806 task = ctx->task; in _perf_event_disable()
1818 raw_spin_unlock_irq(&ctx->lock); in _perf_event_disable()
1827 struct perf_event_context *ctx; in perf_event_disable() local
1829 ctx = perf_event_ctx_lock(event); in perf_event_disable()
1831 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
1836 struct perf_event_context *ctx, in perf_set_shadow_time() argument
1867 event->shadow_ctx_time = tstamp - ctx->timestamp; in perf_set_shadow_time()
1878 struct perf_event_context *ctx) in event_sched_in() argument
1883 lockdep_assert_held(&ctx->lock); in event_sched_in()
1908 perf_set_shadow_time(event, ctx, tstamp); in event_sched_in()
1923 if (!ctx->nr_active++) in event_sched_in()
1924 perf_event_ctx_activate(ctx); in event_sched_in()
1926 ctx->nr_freq++; in event_sched_in()
1932 schedule_orphans_remove(ctx); in event_sched_in()
1943 struct perf_event_context *ctx) in group_sched_in() argument
1946 struct pmu *pmu = ctx->pmu; in group_sched_in()
1947 u64 now = ctx->time; in group_sched_in()
1955 if (event_sched_in(group_event, cpuctx, ctx)) { in group_sched_in()
1965 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
1997 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2000 event_sched_out(group_event, cpuctx, ctx); in group_sched_in()
2041 struct perf_event_context *ctx) in add_event_to_ctx() argument
2045 list_add_event(event, ctx); in add_event_to_ctx()
2052 static void task_ctx_sched_out(struct perf_event_context *ctx);
2054 ctx_sched_in(struct perf_event_context *ctx,
2060 struct perf_event_context *ctx, in perf_event_sched_in() argument
2064 if (ctx) in perf_event_sched_in()
2065 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2067 if (ctx) in perf_event_sched_in()
2068 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2079 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context() local
2080 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_install_in_context()
2085 perf_pmu_disable(cpuctx->ctx.pmu); in __perf_install_in_context()
2097 if (ctx->task && task_ctx != ctx) { in __perf_install_in_context()
2100 raw_spin_lock(&ctx->lock); in __perf_install_in_context()
2101 task_ctx = ctx; in __perf_install_in_context()
2111 update_context_time(ctx); in __perf_install_in_context()
2119 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2126 perf_pmu_enable(cpuctx->ctx.pmu); in __perf_install_in_context()
2143 perf_install_in_context(struct perf_event_context *ctx, in perf_install_in_context() argument
2147 struct task_struct *task = ctx->task; in perf_install_in_context()
2149 lockdep_assert_held(&ctx->mutex); in perf_install_in_context()
2151 event->ctx = ctx; in perf_install_in_context()
2168 raw_spin_lock_irq(&ctx->lock); in perf_install_in_context()
2173 if (ctx->is_active) { in perf_install_in_context()
2174 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2179 task = ctx->task; in perf_install_in_context()
2187 add_event_to_ctx(event, ctx); in perf_install_in_context()
2188 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2218 struct perf_event_context *ctx = event->ctx; in __perf_event_enable() local
2220 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_enable()
2232 if (!ctx->is_active) in __perf_event_enable()
2235 raw_spin_lock(&ctx->lock); in __perf_event_enable()
2236 update_context_time(ctx); in __perf_event_enable()
2244 perf_cgroup_set_timestamp(current, ctx); in __perf_event_enable()
2265 err = group_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2267 err = event_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2276 group_sched_out(leader, cpuctx, ctx); in __perf_event_enable()
2286 raw_spin_unlock(&ctx->lock); in __perf_event_enable()
2302 struct perf_event_context *ctx = event->ctx; in _perf_event_enable() local
2303 struct task_struct *task = ctx->task; in _perf_event_enable()
2313 raw_spin_lock_irq(&ctx->lock); in _perf_event_enable()
2328 if (!ctx->is_active) { in _perf_event_enable()
2333 raw_spin_unlock_irq(&ctx->lock); in _perf_event_enable()
2338 raw_spin_lock_irq(&ctx->lock); in _perf_event_enable()
2344 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { in _perf_event_enable()
2349 task = ctx->task; in _perf_event_enable()
2354 raw_spin_unlock_irq(&ctx->lock); in _perf_event_enable()
2362 struct perf_event_context *ctx; in perf_event_enable() local
2364 ctx = perf_event_ctx_lock(event); in perf_event_enable()
2366 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
2389 struct perf_event_context *ctx; in perf_event_refresh() local
2392 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
2394 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
2400 static void ctx_sched_out(struct perf_event_context *ctx, in ctx_sched_out() argument
2405 int is_active = ctx->is_active; in ctx_sched_out()
2407 ctx->is_active &= ~event_type; in ctx_sched_out()
2408 if (likely(!ctx->nr_events)) in ctx_sched_out()
2411 update_context_time(ctx); in ctx_sched_out()
2413 if (!ctx->nr_active) in ctx_sched_out()
2416 perf_pmu_disable(ctx->pmu); in ctx_sched_out()
2418 list_for_each_entry(event, &ctx->pinned_groups, group_entry) in ctx_sched_out()
2419 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2423 list_for_each_entry(event, &ctx->flexible_groups, group_entry) in ctx_sched_out()
2424 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2426 perf_pmu_enable(ctx->pmu); in ctx_sched_out()
2513 static void perf_event_sync_stat(struct perf_event_context *ctx, in perf_event_sync_stat() argument
2518 if (!ctx->nr_stat) in perf_event_sync_stat()
2521 update_context_time(ctx); in perf_event_sync_stat()
2523 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
2529 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
2542 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; in perf_event_context_sched_out() local
2548 if (likely(!ctx)) in perf_event_context_sched_out()
2551 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_out()
2560 parent = rcu_dereference(ctx->parent_ctx); in perf_event_context_sched_out()
2567 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { in perf_event_context_sched_out()
2577 raw_spin_lock(&ctx->lock); in perf_event_context_sched_out()
2579 if (context_equiv(ctx, next_ctx)) { in perf_event_context_sched_out()
2585 next->perf_event_ctxp[ctxn] = ctx; in perf_event_context_sched_out()
2586 ctx->task = next; in perf_event_context_sched_out()
2589 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); in perf_event_context_sched_out()
2593 perf_event_sync_stat(ctx, next_ctx); in perf_event_context_sched_out()
2596 raw_spin_unlock(&ctx->lock); in perf_event_context_sched_out()
2602 raw_spin_lock(&ctx->lock); in perf_event_context_sched_out()
2603 ctx_sched_out(ctx, cpuctx, EVENT_ALL); in perf_event_context_sched_out()
2605 raw_spin_unlock(&ctx->lock); in perf_event_context_sched_out()
2699 static void task_ctx_sched_out(struct perf_event_context *ctx) in task_ctx_sched_out() argument
2701 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in task_ctx_sched_out()
2706 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2709 ctx_sched_out(ctx, cpuctx, EVENT_ALL); in task_ctx_sched_out()
2719 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); in cpu_ctx_sched_out()
2723 ctx_pinned_sched_in(struct perf_event_context *ctx, in ctx_pinned_sched_in() argument
2728 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { in ctx_pinned_sched_in()
2736 perf_cgroup_mark_enabled(event, ctx); in ctx_pinned_sched_in()
2739 group_sched_in(event, cpuctx, ctx); in ctx_pinned_sched_in()
2753 ctx_flexible_sched_in(struct perf_event_context *ctx, in ctx_flexible_sched_in() argument
2759 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { in ctx_flexible_sched_in()
2772 perf_cgroup_mark_enabled(event, ctx); in ctx_flexible_sched_in()
2775 if (group_sched_in(event, cpuctx, ctx)) in ctx_flexible_sched_in()
2782 ctx_sched_in(struct perf_event_context *ctx, in ctx_sched_in() argument
2788 int is_active = ctx->is_active; in ctx_sched_in()
2790 ctx->is_active |= event_type; in ctx_sched_in()
2791 if (likely(!ctx->nr_events)) in ctx_sched_in()
2795 ctx->timestamp = now; in ctx_sched_in()
2796 perf_cgroup_set_timestamp(task, ctx); in ctx_sched_in()
2802 ctx_pinned_sched_in(ctx, cpuctx); in ctx_sched_in()
2806 ctx_flexible_sched_in(ctx, cpuctx); in ctx_sched_in()
2813 struct perf_event_context *ctx = &cpuctx->ctx; in cpu_ctx_sched_in() local
2815 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
2818 static void perf_event_context_sched_in(struct perf_event_context *ctx, in perf_event_context_sched_in() argument
2823 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_in()
2824 if (cpuctx->task_ctx == ctx) in perf_event_context_sched_in()
2827 perf_ctx_lock(cpuctx, ctx); in perf_event_context_sched_in()
2828 perf_pmu_disable(ctx->pmu); in perf_event_context_sched_in()
2836 if (ctx->nr_events) in perf_event_context_sched_in()
2837 cpuctx->task_ctx = ctx; in perf_event_context_sched_in()
2841 perf_pmu_enable(ctx->pmu); in perf_event_context_sched_in()
2842 perf_ctx_unlock(cpuctx, ctx); in perf_event_context_sched_in()
2859 struct perf_event_context *ctx; in __perf_event_task_sched_in() local
2863 ctx = task->perf_event_ctxp[ctxn]; in __perf_event_task_sched_in()
2864 if (likely(!ctx)) in __perf_event_task_sched_in()
2867 perf_event_context_sched_in(ctx, task); in __perf_event_task_sched_in()
2994 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, in perf_adjust_freq_unthr_context() argument
3007 if (!(ctx->nr_freq || needs_unthr)) in perf_adjust_freq_unthr_context()
3010 raw_spin_lock(&ctx->lock); in perf_adjust_freq_unthr_context()
3011 perf_pmu_disable(ctx->pmu); in perf_adjust_freq_unthr_context()
3013 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
3057 perf_pmu_enable(ctx->pmu); in perf_adjust_freq_unthr_context()
3058 raw_spin_unlock(&ctx->lock); in perf_adjust_freq_unthr_context()
3064 static void rotate_ctx(struct perf_event_context *ctx) in rotate_ctx() argument
3070 if (!ctx->rotate_disable) in rotate_ctx()
3071 list_rotate_left(&ctx->flexible_groups); in rotate_ctx()
3076 struct perf_event_context *ctx = NULL; in perf_rotate_context() local
3079 if (cpuctx->ctx.nr_events) { in perf_rotate_context()
3080 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) in perf_rotate_context()
3084 ctx = cpuctx->task_ctx; in perf_rotate_context()
3085 if (ctx && ctx->nr_events) { in perf_rotate_context()
3086 if (ctx->nr_events != ctx->nr_active) in perf_rotate_context()
3094 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
3097 if (ctx) in perf_rotate_context()
3098 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
3100 rotate_ctx(&cpuctx->ctx); in perf_rotate_context()
3101 if (ctx) in perf_rotate_context()
3102 rotate_ctx(ctx); in perf_rotate_context()
3104 perf_event_sched_in(cpuctx, ctx, current); in perf_rotate_context()
3106 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
3127 struct perf_event_context *ctx, *tmp; in perf_event_task_tick() local
3135 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) in perf_event_task_tick()
3136 perf_adjust_freq_unthr_context(ctx, throttled); in perf_event_task_tick()
3140 struct perf_event_context *ctx) in event_enable_on_exec() argument
3160 struct perf_event_context *ctx, *clone_ctx = NULL; in perf_event_enable_on_exec() local
3167 ctx = current->perf_event_ctxp[ctxn]; in perf_event_enable_on_exec()
3168 if (!ctx || !ctx->nr_events) in perf_event_enable_on_exec()
3180 raw_spin_lock(&ctx->lock); in perf_event_enable_on_exec()
3181 task_ctx_sched_out(ctx); in perf_event_enable_on_exec()
3183 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
3184 ret = event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
3193 clone_ctx = unclone_ctx(ctx); in perf_event_enable_on_exec()
3195 raw_spin_unlock(&ctx->lock); in perf_event_enable_on_exec()
3200 perf_event_context_sched_in(ctx, ctx->task); in perf_event_enable_on_exec()
3231 struct perf_event_context *ctx = event->ctx; in __perf_event_read() local
3232 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_read()
3242 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
3245 raw_spin_lock(&ctx->lock); in __perf_event_read()
3246 if (ctx->is_active) { in __perf_event_read()
3247 update_context_time(ctx); in __perf_event_read()
3279 raw_spin_unlock(&ctx->lock); in __perf_event_read()
3361 struct perf_event_context *ctx = event->ctx; in perf_event_read() local
3364 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_event_read()
3370 if (ctx->is_active) { in perf_event_read()
3371 update_context_time(ctx); in perf_event_read()
3378 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_event_read()
3387 static void __perf_event_init_context(struct perf_event_context *ctx) in __perf_event_init_context() argument
3389 raw_spin_lock_init(&ctx->lock); in __perf_event_init_context()
3390 mutex_init(&ctx->mutex); in __perf_event_init_context()
3391 INIT_LIST_HEAD(&ctx->active_ctx_list); in __perf_event_init_context()
3392 INIT_LIST_HEAD(&ctx->pinned_groups); in __perf_event_init_context()
3393 INIT_LIST_HEAD(&ctx->flexible_groups); in __perf_event_init_context()
3394 INIT_LIST_HEAD(&ctx->event_list); in __perf_event_init_context()
3395 atomic_set(&ctx->refcount, 1); in __perf_event_init_context()
3396 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); in __perf_event_init_context()
3402 struct perf_event_context *ctx; in alloc_perf_context() local
3404 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); in alloc_perf_context()
3405 if (!ctx) in alloc_perf_context()
3408 __perf_event_init_context(ctx); in alloc_perf_context()
3410 ctx->task = task; in alloc_perf_context()
3413 ctx->pmu = pmu; in alloc_perf_context()
3415 return ctx; in alloc_perf_context()
3445 struct perf_event_context *ctx, *clone_ctx = NULL; in find_get_context() local
3466 ctx = &cpuctx->ctx; in find_get_context()
3467 get_ctx(ctx); in find_get_context()
3468 ++ctx->pin_count; in find_get_context()
3470 return ctx; in find_get_context()
3487 ctx = perf_lock_task_context(task, ctxn, &flags); in find_get_context()
3488 if (ctx) { in find_get_context()
3489 clone_ctx = unclone_ctx(ctx); in find_get_context()
3490 ++ctx->pin_count; in find_get_context()
3492 if (task_ctx_data && !ctx->task_ctx_data) { in find_get_context()
3493 ctx->task_ctx_data = task_ctx_data; in find_get_context()
3496 raw_spin_unlock_irqrestore(&ctx->lock, flags); in find_get_context()
3501 ctx = alloc_perf_context(pmu, task); in find_get_context()
3503 if (!ctx) in find_get_context()
3507 ctx->task_ctx_data = task_ctx_data; in find_get_context()
3522 get_ctx(ctx); in find_get_context()
3523 ++ctx->pin_count; in find_get_context()
3524 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); in find_get_context()
3529 put_ctx(ctx); in find_get_context()
3538 return ctx; in find_get_context()
3667 struct perf_event_context *ctx) in exclusive_event_installable() argument
3675 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { in exclusive_event_installable()
3695 if (event->ctx) in __free_event()
3696 put_ctx(event->ctx); in __free_event()
3798 struct perf_event_context *ctx; in put_event() local
3818 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); in put_event()
3819 WARN_ON_ONCE(ctx->parent_ctx); in put_event()
3821 perf_event_ctx_unlock(event, ctx); in put_event()
3847 struct perf_event_context *ctx; in orphans_remove_work() local
3850 ctx = container_of(work, struct perf_event_context, in orphans_remove_work()
3853 mutex_lock(&ctx->mutex); in orphans_remove_work()
3854 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { in orphans_remove_work()
3870 raw_spin_lock_irq(&ctx->lock); in orphans_remove_work()
3871 ctx->orphans_remove_sched = false; in orphans_remove_work()
3872 raw_spin_unlock_irq(&ctx->lock); in orphans_remove_work()
3873 mutex_unlock(&ctx->mutex); in orphans_remove_work()
3875 put_ctx(ctx); in orphans_remove_work()
3954 struct perf_event_context *ctx = leader->ctx; in perf_read_group() local
3958 lockdep_assert_held(&ctx->mutex); in perf_read_group()
4050 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
4063 struct perf_event_context *ctx; in perf_read() local
4066 ctx = perf_event_ctx_lock(event); in perf_read()
4068 perf_event_ctx_unlock(event, ctx); in perf_read()
4114 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
4126 struct perf_event_context *ctx = event->ctx; in perf_event_for_each() local
4129 lockdep_assert_held(&ctx->mutex); in perf_event_for_each()
4147 struct perf_event_context *ctx = event->ctx; in __perf_event_period() local
4151 raw_spin_lock(&ctx->lock); in __perf_event_period()
4161 perf_pmu_disable(ctx->pmu); in __perf_event_period()
4169 perf_pmu_enable(ctx->pmu); in __perf_event_period()
4171 raw_spin_unlock(&ctx->lock); in __perf_event_period()
4179 struct perf_event_context *ctx = event->ctx; in perf_event_period() local
4195 task = ctx->task; in perf_event_period()
4207 raw_spin_lock_irq(&ctx->lock); in perf_event_period()
4208 if (ctx->is_active) { in perf_event_period()
4209 raw_spin_unlock_irq(&ctx->lock); in perf_event_period()
4210 task = ctx->task; in perf_event_period()
4222 raw_spin_unlock_irq(&ctx->lock); in perf_event_period()
4318 struct perf_event_context *ctx; in perf_ioctl() local
4321 ctx = perf_event_ctx_lock(event); in perf_ioctl()
4323 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
4350 struct perf_event_context *ctx; in perf_event_task_enable() local
4355 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
4357 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
4366 struct perf_event_context *ctx; in perf_event_task_disable() local
4371 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
4373 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
4826 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
5656 perf_event_aux_ctx(struct perf_event_context *ctx, in perf_event_aux_ctx() argument
5662 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_event_aux_ctx()
5687 struct perf_event_context *ctx; in perf_event_aux() local
5707 perf_event_aux_ctx(&cpuctx->ctx, output, data); in perf_event_aux()
5711 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); in perf_event_aux()
5712 if (ctx) in perf_event_aux()
5713 perf_event_aux_ctx(ctx, output, data); in perf_event_aux()
6261 if (event->ctx->task) { in perf_event_switch_output()
6279 if (event->ctx->task) in perf_event_switch_output()
6648 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
6975 struct perf_event_context *ctx; in perf_tp_event() local
6979 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); in perf_tp_event()
6980 if (!ctx) in perf_tp_event()
6983 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
7331 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
7338 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
7358 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
7359 u64 time = event->ctx->time + delta; in task_clock_event_read()
7659 __perf_event_init_context(&cpuctx->ctx); in perf_pmu_register()
7660 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_pmu_register()
7661 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_pmu_register()
7662 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
7741 struct perf_event_context *ctx = NULL; in perf_try_init_event() local
7752 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
7754 BUG_ON(!ctx); in perf_try_init_event()
7760 if (ctx) in perf_try_init_event()
7761 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
8147 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
8247 struct perf_event_context *ctx, *uninitialized_var(gctx); in SYSCALL_DEFINE5() local
8395 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
8396 if (IS_ERR(ctx)) { in SYSCALL_DEFINE5()
8397 err = PTR_ERR(ctx); in SYSCALL_DEFINE5()
8432 if (group_leader->ctx->task != ctx->task) in SYSCALL_DEFINE5()
8443 if (group_leader->ctx != ctx) in SYSCALL_DEFINE5()
8468 gctx = group_leader->ctx; in SYSCALL_DEFINE5()
8469 mutex_lock_double(&gctx->mutex, &ctx->mutex); in SYSCALL_DEFINE5()
8471 mutex_lock(&ctx->mutex); in SYSCALL_DEFINE5()
8483 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
8491 WARN_ON_ONCE(ctx->parent_ctx); in SYSCALL_DEFINE5()
8530 perf_install_in_context(ctx, sibling, sibling->cpu); in SYSCALL_DEFINE5()
8531 get_ctx(ctx); in SYSCALL_DEFINE5()
8540 perf_install_in_context(ctx, group_leader, group_leader->cpu); in SYSCALL_DEFINE5()
8541 get_ctx(ctx); in SYSCALL_DEFINE5()
8560 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
8561 perf_unpin_context(ctx); in SYSCALL_DEFINE5()
8565 mutex_unlock(&ctx->mutex); in SYSCALL_DEFINE5()
8593 mutex_unlock(&ctx->mutex); in SYSCALL_DEFINE5()
8597 perf_unpin_context(ctx); in SYSCALL_DEFINE5()
8598 put_ctx(ctx); in SYSCALL_DEFINE5()
8634 struct perf_event_context *ctx; in perf_event_create_kernel_counter() local
8652 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
8653 if (IS_ERR(ctx)) { in perf_event_create_kernel_counter()
8654 err = PTR_ERR(ctx); in perf_event_create_kernel_counter()
8658 WARN_ON_ONCE(ctx->parent_ctx); in perf_event_create_kernel_counter()
8659 mutex_lock(&ctx->mutex); in perf_event_create_kernel_counter()
8660 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
8661 mutex_unlock(&ctx->mutex); in perf_event_create_kernel_counter()
8662 perf_unpin_context(ctx); in perf_event_create_kernel_counter()
8663 put_ctx(ctx); in perf_event_create_kernel_counter()
8668 perf_install_in_context(ctx, event, cpu); in perf_event_create_kernel_counter()
8669 perf_unpin_context(ctx); in perf_event_create_kernel_counter()
8670 mutex_unlock(&ctx->mutex); in perf_event_create_kernel_counter()
8688 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
8689 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
8769 WARN_ON_ONCE(parent_event->ctx->parent_ctx); in sync_child_event()
8925 struct perf_event_context *ctx) in perf_free_event() argument
8938 raw_spin_lock_irq(&ctx->lock); in perf_free_event()
8940 list_del_event(event, ctx); in perf_free_event()
8941 raw_spin_unlock_irq(&ctx->lock); in perf_free_event()
8954 struct perf_event_context *ctx; in perf_event_free_task() local
8959 ctx = task->perf_event_ctxp[ctxn]; in perf_event_free_task()
8960 if (!ctx) in perf_event_free_task()
8963 mutex_lock(&ctx->mutex); in perf_event_free_task()
8965 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, in perf_event_free_task()
8967 perf_free_event(event, ctx); in perf_event_free_task()
8969 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, in perf_event_free_task()
8971 perf_free_event(event, ctx); in perf_event_free_task()
8973 if (!list_empty(&ctx->pinned_groups) || in perf_event_free_task()
8974 !list_empty(&ctx->flexible_groups)) in perf_event_free_task()
8977 mutex_unlock(&ctx->mutex); in perf_event_free_task()
8979 put_ctx(ctx); in perf_event_free_task()
9076 child_event->ctx = child_ctx; in inherit_event()
9097 WARN_ON_ONCE(parent_event->ctx->parent_ctx); in inherit_event()
9317 struct perf_event_context *ctx = __info; in __perf_event_exit_context() local
9320 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) in __perf_event_exit_context()
9327 struct perf_event_context *ctx; in perf_event_exit_cpu_context() local
9333 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; in perf_event_exit_cpu_context()
9335 mutex_lock(&ctx->mutex); in perf_event_exit_cpu_context()
9336 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); in perf_event_exit_cpu_context()
9337 mutex_unlock(&ctx->mutex); in perf_event_exit_cpu_context()