cpuctx            156 kernel/events/core.c static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
cpuctx            159 kernel/events/core.c 	raw_spin_lock(&cpuctx->ctx.lock);
cpuctx            164 kernel/events/core.c static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
cpuctx            169 kernel/events/core.c 	raw_spin_unlock(&cpuctx->ctx.lock);
cpuctx            212 kernel/events/core.c 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
cpuctx            213 kernel/events/core.c 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
cpuctx            218 kernel/events/core.c 	perf_ctx_lock(cpuctx, task_ctx);
cpuctx            243 kernel/events/core.c 		WARN_ON_ONCE(&cpuctx->ctx != ctx);
cpuctx            246 kernel/events/core.c 	efs->func(event, cpuctx, ctx, efs->data);
cpuctx            248 kernel/events/core.c 	perf_ctx_unlock(cpuctx, task_ctx);
cpuctx            309 kernel/events/core.c 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
cpuctx            322 kernel/events/core.c 	perf_ctx_lock(cpuctx, task_ctx);
cpuctx            338 kernel/events/core.c 			if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
cpuctx            342 kernel/events/core.c 		WARN_ON_ONCE(&cpuctx->ctx != ctx);
cpuctx            345 kernel/events/core.c 	func(event, cpuctx, ctx, data);
cpuctx            347 kernel/events/core.c 	perf_ctx_unlock(cpuctx, task_ctx);
cpuctx            439 kernel/events/core.c static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
cpuctx            564 kernel/events/core.c static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
cpuctx            567 kernel/events/core.c static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
cpuctx            679 kernel/events/core.c 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
cpuctx            686 kernel/events/core.c 	if (!cpuctx->cgrp)
cpuctx            695 kernel/events/core.c 	return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
cpuctx            731 kernel/events/core.c static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
cpuctx            733 kernel/events/core.c 	struct perf_cgroup *cgrp = cpuctx->cgrp;
cpuctx            801 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx            812 kernel/events/core.c 	list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
cpuctx            813 kernel/events/core.c 		WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
cpuctx            815 kernel/events/core.c 		perf_ctx_lock(cpuctx, cpuctx->task_ctx);
cpuctx            816 kernel/events/core.c 		perf_pmu_disable(cpuctx->ctx.pmu);
cpuctx            819 kernel/events/core.c 			cpu_ctx_sched_out(cpuctx, EVENT_ALL);
cpuctx            824 kernel/events/core.c 			cpuctx->cgrp = NULL;
cpuctx            828 kernel/events/core.c 			WARN_ON_ONCE(cpuctx->cgrp);
cpuctx            836 kernel/events/core.c 			cpuctx->cgrp = perf_cgroup_from_task(task,
cpuctx            837 kernel/events/core.c 							     &cpuctx->ctx);
cpuctx            838 kernel/events/core.c 			cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
cpuctx            840 kernel/events/core.c 		perf_pmu_enable(cpuctx->ctx.pmu);
cpuctx            841 kernel/events/core.c 		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
cpuctx            951 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx            961 kernel/events/core.c 	cpuctx = __get_cpu_context(ctx);
cpuctx            969 kernel/events/core.c 	if (add && !cpuctx->cgrp) {
cpuctx            973 kernel/events/core.c 			cpuctx->cgrp = cgrp;
cpuctx            983 kernel/events/core.c 		cpuctx->cgrp = NULL;
cpuctx            985 kernel/events/core.c 	cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
cpuctx           1012 kernel/events/core.c static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
cpuctx           1072 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           1077 kernel/events/core.c 	cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
cpuctx           1078 kernel/events/core.c 	rotations = perf_rotate_context(cpuctx);
cpuctx           1080 kernel/events/core.c 	raw_spin_lock(&cpuctx->hrtimer_lock);
cpuctx           1082 kernel/events/core.c 		hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
cpuctx           1084 kernel/events/core.c 		cpuctx->hrtimer_active = 0;
cpuctx           1085 kernel/events/core.c 	raw_spin_unlock(&cpuctx->hrtimer_lock);
cpuctx           1090 kernel/events/core.c static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
cpuctx           1092 kernel/events/core.c 	struct hrtimer *timer = &cpuctx->hrtimer;
cpuctx           1093 kernel/events/core.c 	struct pmu *pmu = cpuctx->ctx.pmu;
cpuctx           1108 kernel/events/core.c 	cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
cpuctx           1110 kernel/events/core.c 	raw_spin_lock_init(&cpuctx->hrtimer_lock);
cpuctx           1115 kernel/events/core.c static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
cpuctx           1117 kernel/events/core.c 	struct hrtimer *timer = &cpuctx->hrtimer;
cpuctx           1118 kernel/events/core.c 	struct pmu *pmu = cpuctx->ctx.pmu;
cpuctx           1125 kernel/events/core.c 	raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
cpuctx           1126 kernel/events/core.c 	if (!cpuctx->hrtimer_active) {
cpuctx           1127 kernel/events/core.c 		cpuctx->hrtimer_active = 1;
cpuctx           1128 kernel/events/core.c 		hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
cpuctx           1131 kernel/events/core.c 	raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
cpuctx           1909 kernel/events/core.c 			    struct perf_cpu_context *cpuctx,
cpuctx           1915 kernel/events/core.c 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
cpuctx           1944 kernel/events/core.c 		event_sched_out(iter, cpuctx, ctx);
cpuctx           2079 kernel/events/core.c 		  struct perf_cpu_context *cpuctx,
cpuctx           2109 kernel/events/core.c 		cpuctx->active_oncpu--;
cpuctx           2114 kernel/events/core.c 	if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx           2115 kernel/events/core.c 		cpuctx->exclusive = 0;
cpuctx           2122 kernel/events/core.c 		struct perf_cpu_context *cpuctx,
cpuctx           2132 kernel/events/core.c 	event_sched_out(group_event, cpuctx, ctx);
cpuctx           2138 kernel/events/core.c 		event_sched_out(event, cpuctx, ctx);
cpuctx           2143 kernel/events/core.c 		cpuctx->exclusive = 0;
cpuctx           2156 kernel/events/core.c 			   struct perf_cpu_context *cpuctx,
cpuctx           2164 kernel/events/core.c 		update_cgrp_time_from_cpuctx(cpuctx);
cpuctx           2167 kernel/events/core.c 	event_sched_out(event, cpuctx, ctx);
cpuctx           2175 kernel/events/core.c 			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx           2176 kernel/events/core.c 			cpuctx->task_ctx = NULL;
cpuctx           2222 kernel/events/core.c 				 struct perf_cpu_context *cpuctx,
cpuctx           2235 kernel/events/core.c 		group_sched_out(event, cpuctx, ctx);
cpuctx           2237 kernel/events/core.c 		event_sched_out(event, cpuctx, ctx);
cpuctx           2337 kernel/events/core.c 		 struct perf_cpu_context *cpuctx,
cpuctx           2380 kernel/events/core.c 		cpuctx->active_oncpu++;
cpuctx           2387 kernel/events/core.c 		cpuctx->exclusive = 1;
cpuctx           2397 kernel/events/core.c 	       struct perf_cpu_context *cpuctx,
cpuctx           2408 kernel/events/core.c 	if (event_sched_in(group_event, cpuctx, ctx)) {
cpuctx           2410 kernel/events/core.c 		perf_mux_hrtimer_restart(cpuctx);
cpuctx           2418 kernel/events/core.c 		if (event_sched_in(event, cpuctx, ctx)) {
cpuctx           2437 kernel/events/core.c 		event_sched_out(event, cpuctx, ctx);
cpuctx           2439 kernel/events/core.c 	event_sched_out(group_event, cpuctx, ctx);
cpuctx           2443 kernel/events/core.c 	perf_mux_hrtimer_restart(cpuctx);
cpuctx           2452 kernel/events/core.c 			   struct perf_cpu_context *cpuctx,
cpuctx           2464 kernel/events/core.c 	if (cpuctx->exclusive)
cpuctx           2470 kernel/events/core.c 	if (event->attr.exclusive && cpuctx->active_oncpu)
cpuctx           2487 kernel/events/core.c 			  struct perf_cpu_context *cpuctx,
cpuctx           2491 kernel/events/core.c 	     struct perf_cpu_context *cpuctx,
cpuctx           2495 kernel/events/core.c static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
cpuctx           2499 kernel/events/core.c 	if (!cpuctx->task_ctx)
cpuctx           2502 kernel/events/core.c 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
cpuctx           2505 kernel/events/core.c 	ctx_sched_out(ctx, cpuctx, event_type);
cpuctx           2508 kernel/events/core.c static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
cpuctx           2512 kernel/events/core.c 	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
cpuctx           2514 kernel/events/core.c 		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
cpuctx           2515 kernel/events/core.c 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
cpuctx           2517 kernel/events/core.c 		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
cpuctx           2535 kernel/events/core.c static void ctx_resched(struct perf_cpu_context *cpuctx,
cpuctx           2551 kernel/events/core.c 	perf_pmu_disable(cpuctx->ctx.pmu);
cpuctx           2553 kernel/events/core.c 		task_ctx_sched_out(cpuctx, task_ctx, event_type);
cpuctx           2563 kernel/events/core.c 		cpu_ctx_sched_out(cpuctx, ctx_event_type);
cpuctx           2565 kernel/events/core.c 		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
cpuctx           2567 kernel/events/core.c 	perf_event_sched_in(cpuctx, task_ctx, current);
cpuctx           2568 kernel/events/core.c 	perf_pmu_enable(cpuctx->ctx.pmu);
cpuctx           2573 kernel/events/core.c 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
cpuctx           2574 kernel/events/core.c 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
cpuctx           2576 kernel/events/core.c 	perf_ctx_lock(cpuctx, task_ctx);
cpuctx           2577 kernel/events/core.c 	ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
cpuctx           2578 kernel/events/core.c 	perf_ctx_unlock(cpuctx, task_ctx);
cpuctx           2591 kernel/events/core.c 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
cpuctx           2592 kernel/events/core.c 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
cpuctx           2596 kernel/events/core.c 	raw_spin_lock(&cpuctx->ctx.lock);
cpuctx           2615 kernel/events/core.c 		WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
cpuctx           2633 kernel/events/core.c 		ctx_sched_out(ctx, cpuctx, EVENT_TIME);
cpuctx           2635 kernel/events/core.c 		ctx_resched(cpuctx, task_ctx, get_event_type(event));
cpuctx           2641 kernel/events/core.c 	perf_ctx_unlock(cpuctx, task_ctx);
cpuctx           2747 kernel/events/core.c 				struct perf_cpu_context *cpuctx,
cpuctx           2759 kernel/events/core.c 		ctx_sched_out(ctx, cpuctx, EVENT_TIME);
cpuctx           2767 kernel/events/core.c 		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
cpuctx           2776 kernel/events/core.c 		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
cpuctx           2780 kernel/events/core.c 	task_ctx = cpuctx->task_ctx;
cpuctx           2784 kernel/events/core.c 	ctx_resched(cpuctx, task_ctx, get_event_type(event));
cpuctx           3001 kernel/events/core.c 			  struct perf_cpu_context *cpuctx,
cpuctx           3015 kernel/events/core.c 			WARN_ON_ONCE(cpuctx->task_ctx);
cpuctx           3024 kernel/events/core.c 		WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx           3026 kernel/events/core.c 			cpuctx->task_ctx = NULL;
cpuctx           3042 kernel/events/core.c 		update_cgrp_time_from_cpuctx(cpuctx);
cpuctx           3059 kernel/events/core.c 			group_sched_out(event, cpuctx, ctx);
cpuctx           3064 kernel/events/core.c 			group_sched_out(event, cpuctx, ctx);
cpuctx           3177 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           3183 kernel/events/core.c 	cpuctx = __get_cpu_context(ctx);
cpuctx           3184 kernel/events/core.c 	if (!cpuctx->task_ctx)
cpuctx           3239 kernel/events/core.c 		task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
cpuctx           3248 kernel/events/core.c 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
cpuctx           3252 kernel/events/core.c 	if (!--cpuctx->sched_cb_usage)
cpuctx           3253 kernel/events/core.c 		list_del(&cpuctx->sched_cb_entry);
cpuctx           3259 kernel/events/core.c 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
cpuctx           3261 kernel/events/core.c 	if (!cpuctx->sched_cb_usage++)
cpuctx           3262 kernel/events/core.c 		list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
cpuctx           3279 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           3285 kernel/events/core.c 	list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
cpuctx           3286 kernel/events/core.c 		pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
cpuctx           3291 kernel/events/core.c 		perf_ctx_lock(cpuctx, cpuctx->task_ctx);
cpuctx           3294 kernel/events/core.c 		pmu->sched_task(cpuctx->task_ctx, sched_in);
cpuctx           3297 kernel/events/core.c 		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
cpuctx           3344 kernel/events/core.c static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
cpuctx           3347 kernel/events/core.c 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
cpuctx           3383 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           3397 kernel/events/core.c 	if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
cpuctx           3398 kernel/events/core.c 		if (!group_sched_in(event, sid->cpuctx, sid->ctx))
cpuctx           3422 kernel/events/core.c 	if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
cpuctx           3423 kernel/events/core.c 		int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
cpuctx           3437 kernel/events/core.c 		    struct perf_cpu_context *cpuctx)
cpuctx           3441 kernel/events/core.c 		.cpuctx = cpuctx,
cpuctx           3452 kernel/events/core.c 		      struct perf_cpu_context *cpuctx)
cpuctx           3456 kernel/events/core.c 		.cpuctx = cpuctx,
cpuctx           3467 kernel/events/core.c 	     struct perf_cpu_context *cpuctx,
cpuctx           3482 kernel/events/core.c 			cpuctx->task_ctx = ctx;
cpuctx           3484 kernel/events/core.c 			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx           3501 kernel/events/core.c 		ctx_pinned_sched_in(ctx, cpuctx);
cpuctx           3505 kernel/events/core.c 		ctx_flexible_sched_in(ctx, cpuctx);
cpuctx           3508 kernel/events/core.c static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
cpuctx           3512 kernel/events/core.c 	struct perf_event_context *ctx = &cpuctx->ctx;
cpuctx           3514 kernel/events/core.c 	ctx_sched_in(ctx, cpuctx, event_type, task);
cpuctx           3520 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           3522 kernel/events/core.c 	cpuctx = __get_cpu_context(ctx);
cpuctx           3523 kernel/events/core.c 	if (cpuctx->task_ctx == ctx)
cpuctx           3526 kernel/events/core.c 	perf_ctx_lock(cpuctx, ctx);
cpuctx           3544 kernel/events/core.c 		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
cpuctx           3545 kernel/events/core.c 	perf_event_sched_in(cpuctx, ctx, task);
cpuctx           3549 kernel/events/core.c 	perf_ctx_unlock(cpuctx, ctx);
cpuctx           3806 kernel/events/core.c static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
cpuctx           3817 kernel/events/core.c 	cpu_rotate = cpuctx->ctx.rotate_necessary;
cpuctx           3818 kernel/events/core.c 	task_ctx = cpuctx->task_ctx;
cpuctx           3824 kernel/events/core.c 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
cpuctx           3825 kernel/events/core.c 	perf_pmu_disable(cpuctx->ctx.pmu);
cpuctx           3830 kernel/events/core.c 		cpu_event = ctx_event_to_rotate(&cpuctx->ctx);
cpuctx           3837 kernel/events/core.c 		ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE);
cpuctx           3839 kernel/events/core.c 		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
cpuctx           3844 kernel/events/core.c 		rotate_ctx(&cpuctx->ctx, cpu_event);
cpuctx           3846 kernel/events/core.c 	perf_event_sched_in(cpuctx, task_ctx, current);
cpuctx           3848 kernel/events/core.c 	perf_pmu_enable(cpuctx->ctx.pmu);
cpuctx           3849 kernel/events/core.c 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
cpuctx           3893 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           3903 kernel/events/core.c 	cpuctx = __get_cpu_context(ctx);
cpuctx           3904 kernel/events/core.c 	perf_ctx_lock(cpuctx, ctx);
cpuctx           3905 kernel/events/core.c 	ctx_sched_out(ctx, cpuctx, EVENT_TIME);
cpuctx           3916 kernel/events/core.c 		ctx_resched(cpuctx, ctx, event_type);
cpuctx           3918 kernel/events/core.c 		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
cpuctx           3920 kernel/events/core.c 	perf_ctx_unlock(cpuctx, ctx);
cpuctx           3960 kernel/events/core.c 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
cpuctx           3970 kernel/events/core.c 	if (ctx->task && cpuctx->task_ctx != ctx)
cpuctx           4229 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           4240 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx           4241 kernel/events/core.c 		ctx = &cpuctx->ctx;
cpuctx           5073 kernel/events/core.c 				struct perf_cpu_context *cpuctx,
cpuctx           6967 kernel/events/core.c 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
cpuctx           6973 kernel/events/core.c 	perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
cpuctx           6974 kernel/events/core.c 	if (cpuctx->task_ctx)
cpuctx           6975 kernel/events/core.c 		perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
cpuctx           9976 kernel/events/core.c 		struct perf_cpu_context *cpuctx;
cpuctx           9977 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx           9978 kernel/events/core.c 		cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
cpuctx           9981 kernel/events/core.c 			(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
cpuctx           10112 kernel/events/core.c 		struct perf_cpu_context *cpuctx;
cpuctx           10114 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx           10115 kernel/events/core.c 		__perf_event_init_context(&cpuctx->ctx);
cpuctx           10116 kernel/events/core.c 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
cpuctx           10117 kernel/events/core.c 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx           10118 kernel/events/core.c 		cpuctx->ctx.pmu = pmu;
cpuctx           10119 kernel/events/core.c 		cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
cpuctx           10121 kernel/events/core.c 		__perf_mux_hrtimer_init(cpuctx, cpu);
cpuctx           11199 kernel/events/core.c 		struct perf_cpu_context *cpuctx =
cpuctx           11202 kernel/events/core.c 		if (!cpuctx->online) {
cpuctx           11397 kernel/events/core.c 		struct perf_cpu_context *cpuctx =
cpuctx           11399 kernel/events/core.c 		if (!cpuctx->online) {
cpuctx           12148 kernel/events/core.c 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
cpuctx           12152 kernel/events/core.c 	ctx_sched_out(ctx, cpuctx, EVENT_TIME);
cpuctx           12154 kernel/events/core.c 		__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
cpuctx           12160 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           12166 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx           12167 kernel/events/core.c 		ctx = &cpuctx->ctx;
cpuctx           12171 kernel/events/core.c 		cpuctx->online = 0;
cpuctx           12185 kernel/events/core.c 	struct perf_cpu_context *cpuctx;
cpuctx           12194 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx           12195 kernel/events/core.c 		ctx = &cpuctx->ctx;
cpuctx           12198 kernel/events/core.c 		cpuctx->online = 1;