Lines Matching defs:rq
322 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
397 struct rq *rq = task_rq(p); in cfs_rq_of() local
416 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
730 static void update_curr_fair(struct rq *rq) in update_curr_fair()
859 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue()
865 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue()
1140 struct rq *rq = cpu_rq(cpu); in update_numa_stats() local
2244 void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa()
2276 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa()
2280 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue()
2284 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue()
2297 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue() local
2703 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) in update_rq_runnable_avg()
2715 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} in update_rq_runnable_avg()
2938 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} in update_rq_runnable_avg()
2948 static inline int idle_balance(struct rq *rq) in idle_balance()
3603 struct rq *rq = data; in tg_unthrottle_up() local
3620 struct rq *rq = data; in tg_throttle_down() local
3633 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq() local
3678 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq() local
3733 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime() local
4084 static void __maybe_unused update_runtime_enabled(struct rq *rq) in update_runtime_enabled()
4097 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) in unthrottle_offline_cfs_rqs()
4159 static inline void update_runtime_enabled(struct rq *rq) {} in update_runtime_enabled()
4160 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} in unthrottle_offline_cfs_rqs()
4169 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair()
4195 static void hrtick_update(struct rq *rq) in hrtick_update()
4207 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair()
4211 static inline void hrtick_update(struct rq *rq) in hrtick_update()
4222 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair()
4271 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair()
4341 struct rq *rq = cpu_rq(cpu); in source_load() local
4356 struct rq *rq = cpu_rq(cpu); in target_load() local
4377 struct rq *rq = cpu_rq(cpu); in cpu_avg_load_per_task() local
4711 struct rq *rq = cpu_rq(i); in find_idlest_cpu() local
5013 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup()
5097 pick_next_task_fair(struct rq *rq, struct task_struct *prev) in pick_next_task_fair()
5222 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) in put_prev_task_fair()
5238 static void yield_task_fair(struct rq *rq) in yield_task_fair()
5269 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) in yield_to_task_fair()
5770 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task()
5784 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task()
5841 struct rq *rq = rq_of(cfs_rq); in __update_blocked_averages_cpu() local
5848 struct rq *rq = cpu_rq(cpu); in update_blocked_averages() local
5877 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load() local
6035 struct rq *rq = cpu_rq(cpu); in scale_rt_capacity() local
6110 struct rq *rq = cpu_rq(cpu); in update_group_capacity() local
6153 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) in check_cpu_capacity()
6272 struct rq *rq = cpu_rq(i); in update_sg_lb_stats() local
6368 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq()
6382 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq()
6764 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
7378 static inline int on_null_domain(struct rq *rq) in on_null_domain()
7538 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) in rebalance_domains()
7635 struct rq *rq; in nohz_idle_balance() local
7687 static inline bool nohz_kick_needed(struct rq *rq) in nohz_kick_needed()
7780 void trigger_load_balance(struct rq *rq) in trigger_load_balance()
7794 static void rq_online_fair(struct rq *rq) in rq_online_fair()
7801 static void rq_offline_fair(struct rq *rq) in rq_offline_fair()
7814 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) in task_tick_fair()
7840 struct rq *rq = this_rq(); in task_fork_fair() local
7885 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair()
7902 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair()
7941 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair()
7970 static void set_curr_task_fair(struct rq *rq) in set_curr_task_fair()
8108 struct rq *rq = cpu_rq(cpu); in unregister_fair_sched_group() local
8127 struct rq *rq = cpu_rq(cpu); in init_tg_cfs_entry() local
8175 struct rq *rq = cpu_rq(i); in sched_group_set_shares() local
8207 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) in get_rr_interval_fair()