cpu_of            154 kernel/sched/core.c 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
cpu_of            179 kernel/sched/core.c 		steal = paravirt_steal_clock(cpu_of(rq));
cpu_of            214 kernel/sched/core.c 	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
cpu_of            242 kernel/sched/core.c 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
cpu_of            298 kernel/sched/core.c 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
cpu_of            517 kernel/sched/core.c 	cpu = cpu_of(rq);
cpu_of           1681 kernel/sched/core.c 		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
cpu_of           1184 kernel/sched/deadline.c 	int cpu = cpu_of(rq);
cpu_of           2250 kernel/sched/deadline.c 		src_dl_b = dl_bw_of(cpu_of(rq));
cpu_of            294 kernel/sched/fair.c 	int cpu = cpu_of(rq);
cpu_of            785 kernel/sched/fair.c 	long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
cpu_of           3780 kernel/sched/fair.c 	cpu = cpu_of(rq_of(cfs_rq));
cpu_of           3822 kernel/sched/fair.c 	if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
cpu_of           4468 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cpu_of           4486 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cpu_of           4506 kernel/sched/fair.c 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
cpu_of           4566 kernel/sched/fair.c 	se = cfs_rq->tg->se[cpu_of(rq)];
cpu_of           5045 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cpu_of           5063 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cpu_of           5858 kernel/sched/fair.c 	int core = cpu_of(rq);
cpu_of           7591 kernel/sched/fair.c 	int cpu = cpu_of(rq);
cpu_of           7635 kernel/sched/fair.c 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
cpu_of           8973 kernel/sched/fair.c 			__cpumask_clear_cpu(cpu_of(busiest), cpus);
cpu_of           9032 kernel/sched/fair.c 				stop_one_cpu_nowait(cpu_of(busiest),
cpu_of           9140 kernel/sched/fair.c 	int busiest_cpu = cpu_of(busiest_rq);
cpu_of           10590 kernel/sched/fair.c 	return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1;
cpu_of           10626 kernel/sched/fair.c 	return rq ? cpu_of(rq) : -1;
cpu_of            375 kernel/sched/pelt.c 	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
cpu_of            376 kernel/sched/pelt.c 	running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
cpu_of             82 kernel/sched/pelt.h 	delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
cpu_of             83 kernel/sched/pelt.h 	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
cpu_of            473 kernel/sched/rt.c 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
cpu_of            492 kernel/sched/rt.c 	int cpu = cpu_of(rq);
cpu_of            510 kernel/sched/rt.c 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
cpu_of           2233 kernel/sched/rt.c 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
cpu_of           1913 kernel/sched/sched.h 	cpu = cpu_of(rq);
cpu_of           1970 kernel/sched/sched.h 	if (!cpu_active(cpu_of(rq)))
cpu_of           2309 kernel/sched/sched.h 						  cpu_of(rq)));
cpu_of            135 kernel/sched/stats.h 		psi_memstall_tick(rq->curr, cpu_of(rq));