cpu_rq 533 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 592 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 1250 kernel/sched/core.c memset(&cpu_rq(cpu)->uclamp, 0, cpu_rq 1252 kernel/sched/core.c cpu_rq(cpu)->uclamp_flags = 0; cpu_rq 1494 kernel/sched/core.c rq = cpu_rq(new_cpu); cpu_rq 1762 kernel/sched/core.c dst_rq = cpu_rq(cpu); cpu_rq 1799 kernel/sched/core.c src_rq = cpu_rq(arg->src_cpu); cpu_rq 1800 kernel/sched/core.c dst_rq = cpu_rq(arg->dst_cpu); cpu_rq 2133 kernel/sched/core.c struct task_struct *old_stop = cpu_rq(cpu)->stop; cpu_rq 2149 kernel/sched/core.c cpu_rq(cpu)->stop = stop; cpu_rq 2348 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 2352 kernel/sched/core.c if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { cpu_rq 2362 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 2392 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 3397 kernel/sched/core.c sum += cpu_rq(i)->nr_running; cpu_rq 3427 kernel/sched/core.c sum += cpu_rq(i)->nr_switches; cpu_rq 3441 kernel/sched/core.c return atomic_read(&cpu_rq(cpu)->nr_iowait); cpu_rq 3588 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 3653 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 4006 kernel/sched/core.c rq = cpu_rq(cpu); cpu_rq 4618 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 4659 kernel/sched/core.c return cpu_rq(cpu)->idle; cpu_rq 6018 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 6384 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 6456 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 6472 kernel/sched/core.c struct rq *rq = cpu_rq(cpu); cpu_rq 6622 kernel/sched/core.c rq = cpu_rq(i); cpu_rq 114 kernel/sched/cpuacct.c raw_spin_lock_irq(&cpu_rq(cpu)->lock); cpu_rq 128 kernel/sched/cpuacct.c raw_spin_unlock_irq(&cpu_rq(cpu)->lock); cpu_rq 143 kernel/sched/cpuacct.c raw_spin_lock_irq(&cpu_rq(cpu)->lock); cpu_rq 150 kernel/sched/cpuacct.c raw_spin_unlock_irq(&cpu_rq(cpu)->lock); cpu_rq 255 kernel/sched/cpuacct.c raw_spin_lock_irq(&cpu_rq(cpu)->lock); cpu_rq 261 kernel/sched/cpuacct.c raw_spin_unlock_irq(&cpu_rq(cpu)->lock); cpu_rq 211 kernel/sched/cpufreq_schedutil.c struct rq *rq = cpu_rq(cpu); cpu_rq 294 kernel/sched/cpufreq_schedutil.c struct rq *rq = cpu_rq(sg_cpu->cpu); cpu_rq 446 kernel/sched/cpufreq_schedutil.c if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) cpu_rq 51 kernel/sched/deadline.c return &cpu_rq(i)->rd->dl_bw; cpu_rq 56 kernel/sched/deadline.c struct root_domain *rd = cpu_rq(i)->rd; cpu_rq 69 kernel/sched/deadline.c return &cpu_rq(i)->dl.dl_bw; cpu_rq 557 kernel/sched/deadline.c later_rq = cpu_rq(cpu); cpu_rq 1610 kernel/sched/deadline.c rq = cpu_rq(cpu); cpu_rq 1632 kernel/sched/deadline.c cpu_rq(target)->dl.earliest_dl.curr) || cpu_rq 1633 kernel/sched/deadline.c (cpu_rq(target)->dl.dl_nr_running == 0))) cpu_rq 1976 kernel/sched/deadline.c later_rq = cpu_rq(cpu); cpu_rq 2155 kernel/sched/deadline.c src_rq = cpu_rq(cpu); cpu_rq 2540 kernel/sched/deadline.c init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); cpu_rq 2792 kernel/sched/deadline.c print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); cpu_rq 487 kernel/sched/debug.c struct rq *rq = cpu_rq(cpu); cpu_rq 508 kernel/sched/debug.c rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; cpu_rq 604 kernel/sched/debug.c dl_bw = &cpu_rq(cpu)->rd->dl_bw; cpu_rq 616 kernel/sched/debug.c struct rq *rq = cpu_rq(cpu); cpu_rq 1496 kernel/sched/fair.c struct rq *rq = cpu_rq(cpu); cpu_rq 1523 kernel/sched/fair.c struct rq *rq = cpu_rq(env->dst_cpu); cpu_rq 1534 kernel/sched/fair.c rq = cpu_rq(env->best_cpu); cpu_rq 1593 kernel/sched/fair.c struct rq *dst_rq = cpu_rq(env->dst_cpu); cpu_rq 1850 kernel/sched/fair.c best_rq = cpu_rq(env.best_cpu); cpu_rq 2280 kernel/sched/fair.c tsk = READ_ONCE(cpu_rq(cpu)->curr); cpu_rq 4895 kernel/sched/fair.c cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); cpu_rq 5392 kernel/sched/fair.c struct rq *rq = cpu_rq(cpu); cpu_rq 5405 kernel/sched/fair.c return cpu_rq(cpu)->cpu_capacity; cpu_rq 5410 kernel/sched/fair.c struct rq *rq = cpu_rq(cpu); cpu_rq 5497 kernel/sched/fair.c if (sync && cpu_rq(this_cpu)->nr_running == 1) cpu_rq 5510 kernel/sched/fair.c this_eff_load = cpu_runnable_load(cpu_rq(this_cpu)); cpu_rq 5528 kernel/sched/fair.c prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu)); cpu_rq 5616 kernel/sched/fair.c load = cpu_runnable_load(cpu_rq(i)); cpu_rq 5619 kernel/sched/fair.c avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); cpu_rq 5730 kernel/sched/fair.c struct rq *rq = cpu_rq(i); cpu_rq 5757 kernel/sched/fair.c load = cpu_runnable_load(cpu_rq(i)); cpu_rq 6106 kernel/sched/fair.c cfs_rq = &cpu_rq(cpu)->cfs; cpu_rq 6137 kernel/sched/fair.c cfs_rq = &cpu_rq(cpu)->cfs; cpu_rq 6219 kernel/sched/fair.c max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; cpu_rq 6237 kernel/sched/fair.c struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; cpu_rq 6363 kernel/sched/fair.c struct root_domain *rd = cpu_rq(smp_processor_id())->rd; cpu_rq 7695 kernel/sched/fair.c struct rq *rq = cpu_rq(cpu); cpu_rq 7773 kernel/sched/fair.c struct rq *rq = cpu_rq(cpu); cpu_rq 7799 kernel/sched/fair.c cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); cpu_rq 7804 kernel/sched/fair.c cpu_rq(cpu)->cpu_capacity = capacity; cpu_rq 7838 kernel/sched/fair.c struct rq *rq = cpu_rq(cpu); cpu_rq 8062 kernel/sched/fair.c struct rq *rq = cpu_rq(i); cpu_rq 8647 kernel/sched/fair.c rq = cpu_rq(i); cpu_rq 8948 kernel/sched/fair.c env.dst_rq = cpu_rq(env.new_dst_cpu); cpu_rq 9142 kernel/sched/fair.c struct rq *target_rq = cpu_rq(target_cpu); cpu_rq 9563 kernel/sched/fair.c struct rq *rq = cpu_rq(cpu); cpu_rq 9669 kernel/sched/fair.c rq = cpu_rq(balance_cpu); cpu_rq 10327 kernel/sched/fair.c rq = cpu_rq(i); cpu_rq 10354 kernel/sched/fair.c rq = cpu_rq(cpu); cpu_rq 10366 kernel/sched/fair.c struct rq *rq = cpu_rq(cpu); cpu_rq 10413 kernel/sched/fair.c struct rq *rq = cpu_rq(i); cpu_rq 10516 kernel/sched/fair.c for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) cpu_rq 99 kernel/sched/membarrier.c if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & cpu_rq 108 kernel/sched/membarrier.c p = rcu_dereference(cpu_rq(cpu)->curr); cpu_rq 177 kernel/sched/membarrier.c p = rcu_dereference(cpu_rq(cpu)->curr); cpu_rq 240 kernel/sched/membarrier.c struct rq *rq = cpu_rq(cpu); cpu_rq 160 kernel/sched/rt.c struct rq *rq = cpu_rq(cpu); cpu_rq 617 kernel/sched/rt.c return &cpu_rq(cpu)->rt; cpu_rq 1399 kernel/sched/rt.c rq = cpu_rq(cpu); cpu_rq 1436 kernel/sched/rt.c p->prio < cpu_rq(target)->rt.highest_prio.curr) cpu_rq 1723 kernel/sched/rt.c lowest_rq = cpu_rq(cpu); cpu_rq 2082 kernel/sched/rt.c src_rq = cpu_rq(cpu); cpu_rq 2619 kernel/sched/rt.c struct rt_rq *rt_rq = &cpu_rq(i)->rt; cpu_rq 2719 kernel/sched/rt.c for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) cpu_rq 1053 kernel/sched/sched.h #define task_rq(p) cpu_rq(task_cpu(p)) cpu_rq 1054 kernel/sched/sched.h #define cpu_curr(cpu) (cpu_rq(cpu)->curr) cpu_rq 1343 kernel/sched/sched.h for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ cpu_rq 2215 kernel/sched/sched.h #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) cpu_rq 2233 kernel/sched/sched.h struct rq *rq = cpu_rq(i); cpu_rq 2370 kernel/sched/sched.h return cpu_rq(cpu)->cpu_capacity_orig; cpu_rq 29 kernel/sched/stats.c rq = cpu_rq(cpu); cpu_rq 347 kernel/sched/topology.c struct root_domain *rd = cpu_rq(cpu)->rd; cpu_rq 663 kernel/sched/topology.c struct rq *rq = cpu_rq(cpu); cpu_rq 2059 kernel/sched/topology.c rq = cpu_rq(i); cpu_rq 2262 kernel/sched/topology.c rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; cpu_rq 2299 kernel/sched/topology.c cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {