Home
last modified time | relevance | path

Searched refs:cpu_of (Results 1 – 5 of 5) sorted by relevance

/linux-4.1.27/kernel/sched/
Dsched.h691 static inline int cpu_of(struct rq *rq) in cpu_of() function
1373 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
1402 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); in sched_rt_avg_update()
Dfair.c299 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { in list_add_leaf_cfs_rq()
2386 se = tg->se[cpu_of(rq_of(cfs_rq))]; in update_cfs_shares()
2705 __update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg, in update_rq_runnable_avg()
2784 int cpu = cpu_of(rq_of(cfs_rq)); in update_entity_load_avg()
3604 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
3621 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
3638 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
3684 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
5878 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
7061 cpumask_clear_cpu(cpu_of(busiest), cpus); in load_balance()
[all …]
Dcore.c127 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
346 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick()
402 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
561 cpu = cpu_of(rq); in resched_curr()
845 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
870 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task()
1455 wq_worker_waking_up(p, cpu_of(rq)); in ttwu_activate()
2806 cpu = cpu_of(rq); in __schedule()
4816 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in set_cpus_allowed_ptr()
Drt.c462 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
481 int cpu = cpu_of(rq); in sched_rt_rq_enqueue()
499 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue()
Ddeadline.c1597 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()