Lines Matching defs:cfs_rq

249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)  in rq_of()
289 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq()
313 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq()
322 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
379 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
408 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq()
412 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq()
416 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
462 static void update_min_vruntime(struct cfs_rq *cfs_rq) in update_min_vruntime()
491 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity()
527 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity()
539 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity()
560 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity()
636 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice()
663 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice()
695 static void update_curr(struct cfs_rq *cfs_rq) in update_curr()
736 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start()
744 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue()
755 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end()
772 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_dequeue()
786 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start()
2290 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue()
2307 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue()
2321 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) in calc_tg_weight()
2337 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) in calc_cfs_shares()
2356 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) in calc_cfs_shares()
2361 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity()
2379 static void update_cfs_shares(struct cfs_rq *cfs_rq) in update_cfs_shares()
2398 static inline void update_cfs_shares(struct cfs_rq *cfs_rq) in update_cfs_shares()
2606 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __synchronize_entity_decay() local
2622 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, in __update_cfs_rq_tg_load_contrib()
2645 struct cfs_rq *cfs_rq) in __update_tg_runnable_avg()
2663 struct cfs_rq *cfs_rq = group_cfs_rq(se); in __update_group_entity_contrib() local
2710 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, in __update_cfs_rq_tg_load_contrib()
2713 struct cfs_rq *cfs_rq) {} in __update_tg_runnable_avg()
2767 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq, in subtract_blocked_load_contrib()
2782 struct cfs_rq *cfs_rq = cfs_rq_of(se); in update_entity_load_avg() local
2818 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) in update_cfs_rq_blocked_load()
2844 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, in enqueue_entity_load_avg()
2896 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, in dequeue_entity_load_avg()
2939 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, in enqueue_entity_load_avg()
2942 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, in dequeue_entity_load_avg()
2945 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, in update_cfs_rq_blocked_load()
2955 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_sleeper()
3017 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread()
3031 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity()
3065 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity()
3102 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last() local
3113 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next() local
3124 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip() local
3132 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies()
3147 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity()
3195 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick()
3232 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity()
3273 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity()
3325 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity()
3349 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick()
3457 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) in cfs_rq_clock_task()
3466 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) in assign_cfs_rq_runtime()
3515 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) in expire_cfs_rq_runtime()
3546 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in __account_cfs_rq_runtime()
3564 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in account_cfs_rq_runtime()
3572 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled()
3578 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy()
3604 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
3621 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
3631 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) in throttle_cfs_rq()
3676 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq()
3726 struct cfs_rq *cfs_rq; in distribute_cfs_runtime() local
3886 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in __return_cfs_rq_runtime()
3910 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in return_cfs_rq_runtime()
3959 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) in check_enqueue_throttle()
3979 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) in check_cfs_rq_runtime()
4044 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) in init_cfs_rq_runtime()
4086 struct cfs_rq *cfs_rq; in update_runtime_enabled() local
4099 struct cfs_rq *cfs_rq; in unthrottle_offline_cfs_rqs() local
4122 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) in cfs_rq_clock_task()
4127 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} in account_cfs_rq_runtime()
4128 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } in check_cfs_rq_runtime()
4129 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} in check_enqueue_throttle()
4130 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in return_cfs_rq_runtime()
4132 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled()
4137 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy()
4151 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in init_cfs_rq_runtime()
4172 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair() local
4224 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
4273 struct cfs_rq *cfs_rq; in dequeue_task_fair() local
4408 struct cfs_rq *cfs_rq = cfs_rq_of(se); in task_waking_fair() local
4917 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair() local
5017 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup() local
5099 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair() local
5225 struct cfs_rq *cfs_rq; in put_prev_task_fair() local
5241 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair() local
5819 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu]; in __update_blocked_averages_cpu() local
5849 struct cfs_rq *cfs_rq; in update_blocked_averages() local
5875 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) in update_cfs_rq_h_load()
5910 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load() local
7816 struct cfs_rq *cfs_rq; in task_tick_fair() local
7837 struct cfs_rq *cfs_rq; in task_fork_fair() local
7905 struct cfs_rq *cfs_rq = cfs_rq_of(se); in switched_from_fair() local
7975 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_curr_task_fair() local
7983 void init_cfs_rq(struct cfs_rq *cfs_rq) in init_cfs_rq()
8000 struct cfs_rq *cfs_rq; in task_move_group_fair() local
8068 struct cfs_rq *cfs_rq; in alloc_fair_sched_group() local
8123 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry()
8267 struct cfs_rq *cfs_rq; in print_cfs_stats() local