Lines Matching refs:rq
249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
251 return cfs_rq->rq; in rq_of()
317 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
318 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
374 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
376 return container_of(cfs_rq, struct rq, cfs); in rq_of()
392 struct rq *rq = task_rq(p); in cfs_rq_of() local
394 return &rq->cfs; in cfs_rq_of()
411 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
412 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
736 static void update_curr_fair(struct rq *rq) in update_curr_fair() argument
738 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
865 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
867 rq->nr_numa_running += (p->numa_preferred_nid != -1); in account_numa_enqueue()
868 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); in account_numa_enqueue()
871 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
873 rq->nr_numa_running -= (p->numa_preferred_nid != -1); in account_numa_dequeue()
874 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); in account_numa_dequeue()
1146 struct rq *rq = cpu_rq(cpu); in update_numa_stats() local
1148 ns->nr_running += rq->nr_running; in update_numa_stats()
1257 struct rq *src_rq = cpu_rq(env->src_cpu); in task_numa_compare()
1258 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2285 void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
2317 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
2321 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
2325 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
2338 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue() local
2340 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
2341 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
2843 void idle_enter_fair(struct rq *this_rq) in idle_enter_fair()
2852 void idle_exit_fair(struct rq *this_rq) in idle_exit_fair()
2866 static int idle_balance(struct rq *this_rq);
2882 static inline int idle_balance(struct rq *rq) in idle_balance() argument
3527 struct rq *rq = data; in tg_unthrottle_up() local
3528 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
3534 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - in tg_unthrottle_up()
3544 struct rq *rq = data; in tg_throttle_down() local
3545 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
3549 cfs_rq->throttled_clock_task = rq_clock_task(rq); in tg_throttle_down()
3557 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq() local
3567 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
3586 sub_nr_running(rq, task_delta); in throttle_cfs_rq()
3589 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
3611 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq() local
3617 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
3621 update_rq_clock(rq); in unthrottle_cfs_rq()
3624 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
3629 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
3649 add_nr_running(rq, task_delta); in unthrottle_cfs_rq()
3652 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq()
3653 resched_curr(rq); in unthrottle_cfs_rq()
3666 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime() local
3668 raw_spin_lock(&rq->lock); in distribute_cfs_runtime()
3685 raw_spin_unlock(&rq->lock); in distribute_cfs_runtime()
3997 static void __maybe_unused update_runtime_enabled(struct rq *rq) in update_runtime_enabled() argument
4001 for_each_leaf_cfs_rq(rq, cfs_rq) { in update_runtime_enabled()
4010 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) in unthrottle_offline_cfs_rqs() argument
4014 for_each_leaf_cfs_rq(rq, cfs_rq) { in unthrottle_offline_cfs_rqs()
4072 static inline void update_runtime_enabled(struct rq *rq) {} in update_runtime_enabled() argument
4073 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} in unthrottle_offline_cfs_rqs() argument
4082 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
4087 WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
4095 if (rq->curr == p) in hrtick_start_fair()
4096 resched_curr(rq); in hrtick_start_fair()
4099 hrtick_start(rq, delta); in hrtick_start_fair()
4108 static void hrtick_update(struct rq *rq) in hrtick_update() argument
4110 struct task_struct *curr = rq->curr; in hrtick_update()
4112 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) in hrtick_update()
4116 hrtick_start_fair(rq, curr); in hrtick_update()
4120 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
4124 static inline void hrtick_update(struct rq *rq) in hrtick_update() argument
4135 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair() argument
4171 add_nr_running(rq, 1); in enqueue_task_fair()
4173 hrtick_update(rq); in enqueue_task_fair()
4183 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair() argument
4231 sub_nr_running(rq, 1); in dequeue_task_fair()
4233 hrtick_update(rq); in dequeue_task_fair()
4314 static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, in __update_cpu_load()
4369 static void update_idle_cpu_load(struct rq *this_rq) in update_idle_cpu_load()
4392 struct rq *this_rq = this_rq(); in update_cpu_load_nohz()
4416 void update_cpu_load_active(struct rq *this_rq) in update_cpu_load_active()
4435 struct rq *rq = cpu_rq(cpu); in source_load() local
4441 return min(rq->cpu_load[type-1], total); in source_load()
4450 struct rq *rq = cpu_rq(cpu); in target_load() local
4456 return max(rq->cpu_load[type-1], total); in target_load()
4471 struct rq *rq = cpu_rq(cpu); in cpu_avg_load_per_task() local
4472 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); in cpu_avg_load_per_task()
4801 struct rq *rq = cpu_rq(i); in find_idlest_cpu() local
4802 struct cpuidle_state *idle = idle_get_state(rq); in find_idlest_cpu()
4810 latest_idle_timestamp = rq->idle_stamp; in find_idlest_cpu()
4813 rq->idle_stamp > latest_idle_timestamp) { in find_idlest_cpu()
4819 latest_idle_timestamp = rq->idle_stamp; in find_idlest_cpu()
5114 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup() argument
5116 struct task_struct *curr = rq->curr; in check_preempt_wakeup()
5180 resched_curr(rq); in check_preempt_wakeup()
5190 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
5198 pick_next_task_fair(struct rq *rq, struct task_struct *prev) in pick_next_task_fair() argument
5200 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair()
5278 if (hrtick_enabled(rq)) in pick_next_task_fair()
5279 hrtick_start_fair(rq, p); in pick_next_task_fair()
5283 cfs_rq = &rq->cfs; in pick_next_task_fair()
5289 put_prev_task(rq, prev); in pick_next_task_fair()
5299 if (hrtick_enabled(rq)) in pick_next_task_fair()
5300 hrtick_start_fair(rq, p); in pick_next_task_fair()
5311 lockdep_unpin_lock(&rq->lock); in pick_next_task_fair()
5312 new_tasks = idle_balance(rq); in pick_next_task_fair()
5313 lockdep_pin_lock(&rq->lock); in pick_next_task_fair()
5331 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) in put_prev_task_fair() argument
5347 static void yield_task_fair(struct rq *rq) in yield_task_fair() argument
5349 struct task_struct *curr = rq->curr; in yield_task_fair()
5356 if (unlikely(rq->nr_running == 1)) in yield_task_fair()
5362 update_rq_clock(rq); in yield_task_fair()
5372 rq_clock_skip_update(rq, true); in yield_task_fair()
5378 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) in yield_to_task_fair() argument
5389 yield_task_fair(rq); in yield_to_task_fair()
5525 struct rq *src_rq;
5529 struct rq *dst_rq;
5853 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task() argument
5855 lockdep_assert_held(&rq->lock); in attach_task()
5857 BUG_ON(task_rq(p) != rq); in attach_task()
5859 activate_task(rq, p, 0); in attach_task()
5860 check_preempt_curr(rq, p, 0); in attach_task()
5867 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task() argument
5869 raw_spin_lock(&rq->lock); in attach_one_task()
5870 attach_task(rq, p); in attach_one_task()
5871 raw_spin_unlock(&rq->lock); in attach_one_task()
5898 struct rq *rq = cpu_rq(cpu); in update_blocked_averages() local
5902 raw_spin_lock_irqsave(&rq->lock, flags); in update_blocked_averages()
5903 update_rq_clock(rq); in update_blocked_averages()
5909 for_each_leaf_cfs_rq(rq, cfs_rq) { in update_blocked_averages()
5917 raw_spin_unlock_irqrestore(&rq->lock, flags); in update_blocked_averages()
5927 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load() local
5928 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
5969 struct rq *rq = cpu_rq(cpu); in update_blocked_averages() local
5970 struct cfs_rq *cfs_rq = &rq->cfs; in update_blocked_averages()
5973 raw_spin_lock_irqsave(&rq->lock, flags); in update_blocked_averages()
5974 update_rq_clock(rq); in update_blocked_averages()
5976 raw_spin_unlock_irqrestore(&rq->lock, flags); in update_blocked_averages()
6080 struct rq *rq = cpu_rq(cpu); in scale_rt_capacity() local
6088 age_stamp = READ_ONCE(rq->age_stamp); in scale_rt_capacity()
6089 avg = READ_ONCE(rq->rt_avg); in scale_rt_capacity()
6090 delta = __rq_clock_broken(rq) - age_stamp; in scale_rt_capacity()
6148 struct rq *rq = cpu_rq(cpu); in update_group_capacity() local
6161 if (unlikely(!rq->sd)) { in update_group_capacity()
6166 sgc = rq->sd->groups->sgc; in update_group_capacity()
6191 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) in check_cpu_capacity() argument
6193 return ((rq->cpu_capacity * sd->imbalance_pct) < in check_cpu_capacity()
6194 (rq->cpu_capacity_orig * 100)); in check_cpu_capacity()
6310 struct rq *rq = cpu_rq(i); in update_sg_lb_stats() local
6320 sgs->sum_nr_running += rq->cfs.h_nr_running; in update_sg_lb_stats()
6322 if (rq->nr_running > 1) in update_sg_lb_stats()
6326 sgs->nr_numa_running += rq->nr_numa_running; in update_sg_lb_stats()
6327 sgs->nr_preferred_running += rq->nr_preferred_running; in update_sg_lb_stats()
6406 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq() argument
6408 if (rq->nr_running > rq->nr_numa_running) in fbq_classify_rq()
6410 if (rq->nr_running > rq->nr_preferred_running) in fbq_classify_rq()
6420 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq() argument
6799 static struct rq *find_busiest_queue(struct lb_env *env, in find_busiest_queue()
6802 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
6810 rq = cpu_rq(i); in find_busiest_queue()
6811 rt = fbq_classify_rq(rq); in find_busiest_queue()
6844 if (rq->nr_running == 1 && wl > env->imbalance && in find_busiest_queue()
6845 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
6862 busiest = rq; in find_busiest_queue()
6949 static int load_balance(int this_cpu, struct rq *this_rq, in load_balance()
6956 struct rq *busiest; in load_balance()
7243 static int idle_balance(struct rq *this_rq) in idle_balance()
7349 struct rq *busiest_rq = data; in active_load_balance_cpu_stop()
7352 struct rq *target_rq = cpu_rq(target_cpu); in active_load_balance_cpu_stop()
7413 static inline int on_null_domain(struct rq *rq) in on_null_domain() argument
7415 return unlikely(!rcu_dereference_sched(rq->sd)); in on_null_domain()
7573 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) in rebalance_domains() argument
7576 int cpu = rq->cpu; in rebalance_domains()
7624 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { in rebalance_domains()
7648 rq->max_idle_balance_cost = in rebalance_domains()
7659 rq->next_balance = next_balance; in rebalance_domains()
7670 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) in rebalance_domains()
7671 nohz.next_balance = rq->next_balance; in rebalance_domains()
7681 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance()
7684 struct rq *rq; in nohz_idle_balance() local
7706 rq = cpu_rq(balance_cpu); in nohz_idle_balance()
7712 if (time_after_eq(jiffies, rq->next_balance)) { in nohz_idle_balance()
7713 raw_spin_lock_irq(&rq->lock); in nohz_idle_balance()
7714 update_rq_clock(rq); in nohz_idle_balance()
7715 update_idle_cpu_load(rq); in nohz_idle_balance()
7716 raw_spin_unlock_irq(&rq->lock); in nohz_idle_balance()
7717 rebalance_domains(rq, CPU_IDLE); in nohz_idle_balance()
7720 if (time_after(next_balance, rq->next_balance)) { in nohz_idle_balance()
7721 next_balance = rq->next_balance; in nohz_idle_balance()
7748 static inline bool nohz_kick_needed(struct rq *rq) in nohz_kick_needed() argument
7753 int nr_busy, cpu = rq->cpu; in nohz_kick_needed()
7756 if (unlikely(rq->idle_balance)) in nohz_kick_needed()
7776 if (rq->nr_running >= 2) in nohz_kick_needed()
7792 sd = rcu_dereference(rq->sd); in nohz_kick_needed()
7794 if ((rq->cfs.h_nr_running >= 1) && in nohz_kick_needed()
7795 check_cpu_capacity(rq, sd)) { in nohz_kick_needed()
7813 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { } in nohz_idle_balance()
7822 struct rq *this_rq = this_rq(); in run_rebalance_domains()
7841 void trigger_load_balance(struct rq *rq) in trigger_load_balance() argument
7844 if (unlikely(on_null_domain(rq))) in trigger_load_balance()
7847 if (time_after_eq(jiffies, rq->next_balance)) in trigger_load_balance()
7850 if (nohz_kick_needed(rq)) in trigger_load_balance()
7855 static void rq_online_fair(struct rq *rq) in rq_online_fair() argument
7859 update_runtime_enabled(rq); in rq_online_fair()
7862 static void rq_offline_fair(struct rq *rq) in rq_offline_fair() argument
7867 unthrottle_offline_cfs_rqs(rq); in rq_offline_fair()
7875 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) in task_tick_fair() argument
7886 task_tick_numa(rq, curr); in task_tick_fair()
7899 struct rq *rq = this_rq(); in task_fork_fair() local
7902 raw_spin_lock_irqsave(&rq->lock, flags); in task_fork_fair()
7904 update_rq_clock(rq); in task_fork_fair()
7931 resched_curr(rq); in task_fork_fair()
7936 raw_spin_unlock_irqrestore(&rq->lock, flags); in task_fork_fair()
7944 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair() argument
7954 if (rq->curr == p) { in prio_changed_fair()
7956 resched_curr(rq); in prio_changed_fair()
7958 check_preempt_curr(rq, p, 0); in prio_changed_fair()
8026 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair() argument
8031 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair() argument
8041 if (rq->curr == p) in switched_to_fair()
8042 resched_curr(rq); in switched_to_fair()
8044 check_preempt_curr(rq, p, 0); in switched_to_fair()
8053 static void set_curr_task_fair(struct rq *rq) in set_curr_task_fair() argument
8055 struct sched_entity *se = &rq->curr->se; in set_curr_task_fair()
8155 struct rq *rq = cpu_rq(cpu); in unregister_fair_sched_group() local
8165 raw_spin_lock_irqsave(&rq->lock, flags); in unregister_fair_sched_group()
8167 raw_spin_unlock_irqrestore(&rq->lock, flags); in unregister_fair_sched_group()
8174 struct rq *rq = cpu_rq(cpu); in init_tg_cfs_entry() local
8177 cfs_rq->rq = rq; in init_tg_cfs_entry()
8188 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
8222 struct rq *rq = cpu_rq(i); in sched_group_set_shares() local
8227 raw_spin_lock_irqsave(&rq->lock, flags); in sched_group_set_shares()
8230 update_rq_clock(rq); in sched_group_set_shares()
8233 raw_spin_unlock_irqrestore(&rq->lock, flags); in sched_group_set_shares()
8254 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) in get_rr_interval_fair() argument
8263 if (rq->cfs.load.weight) in get_rr_interval_fair()