Lines Matching refs:cfs_rq

249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)  in rq_of()  argument
251 return cfs_rq->rq; in rq_of()
269 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
271 return p->se.cfs_rq; in task_cfs_rq()
275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
277 return se->cfs_rq; in cfs_rq_of()
281 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
286 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
288 if (!cfs_rq->on_list) { in list_add_leaf_cfs_rq()
295 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { in list_add_leaf_cfs_rq()
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
298 &rq_of(cfs_rq)->leaf_cfs_rq_list); in list_add_leaf_cfs_rq()
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
301 &rq_of(cfs_rq)->leaf_cfs_rq_list); in list_add_leaf_cfs_rq()
304 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
308 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
310 if (cfs_rq->on_list) { in list_del_leaf_cfs_rq()
311 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); in list_del_leaf_cfs_rq()
312 cfs_rq->on_list = 0; in list_del_leaf_cfs_rq()
317 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
318 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321 static inline struct cfs_rq *
324 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
325 return se->cfs_rq; in is_same_group()
374 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
376 return container_of(cfs_rq, struct rq, cfs); in rq_of()
384 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
389 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
398 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
403 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
407 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
411 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
412 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
427 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
457 static void update_min_vruntime(struct cfs_rq *cfs_rq) in update_min_vruntime() argument
459 u64 vruntime = cfs_rq->min_vruntime; in update_min_vruntime()
461 if (cfs_rq->curr) in update_min_vruntime()
462 vruntime = cfs_rq->curr->vruntime; in update_min_vruntime()
464 if (cfs_rq->rb_leftmost) { in update_min_vruntime()
465 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, in update_min_vruntime()
469 if (!cfs_rq->curr) in update_min_vruntime()
476 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); in update_min_vruntime()
479 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; in update_min_vruntime()
486 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
488 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; in __enqueue_entity()
516 cfs_rq->rb_leftmost = &se->run_node; in __enqueue_entity()
519 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); in __enqueue_entity()
522 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
524 if (cfs_rq->rb_leftmost == &se->run_node) { in __dequeue_entity()
528 cfs_rq->rb_leftmost = next_node; in __dequeue_entity()
531 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
534 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity() argument
536 struct rb_node *left = cfs_rq->rb_leftmost; in __pick_first_entity()
555 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity() argument
557 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); in __pick_last_entity()
626 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
628 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice()
634 cfs_rq = cfs_rq_of(se); in sched_slice()
635 load = &cfs_rq->load; in sched_slice()
638 lw = cfs_rq->load; in sched_slice()
653 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
655 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
690 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
691 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
701 static void update_curr(struct cfs_rq *cfs_rq) in update_curr() argument
703 struct sched_entity *curr = cfs_rq->curr; in update_curr()
704 u64 now = rq_clock_task(rq_of(cfs_rq)); in update_curr()
720 schedstat_add(cfs_rq, exec_clock, delta_exec); in update_curr()
723 update_min_vruntime(cfs_rq); in update_curr()
733 account_cfs_rq_runtime(cfs_rq, delta_exec); in update_curr()
742 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
744 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq))); in update_stats_wait_start()
750 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue() argument
756 if (se != cfs_rq->curr) in update_stats_enqueue()
757 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
761 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
764 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start)); in update_stats_wait_end()
767 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); in update_stats_wait_end()
771 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); in update_stats_wait_end()
778 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_dequeue() argument
784 if (se != cfs_rq->curr) in update_stats_dequeue()
785 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
792 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
797 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2331 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2333 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2335 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_enqueue()
2338 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue()
2344 cfs_rq->nr_running++; in account_entity_enqueue()
2348 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
2350 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2352 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_dequeue()
2354 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
2357 cfs_rq->nr_running--; in account_entity_dequeue()
2362 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) in calc_tg_weight() argument
2372 tg_weight -= cfs_rq->tg_load_avg_contrib; in calc_tg_weight()
2373 tg_weight += cfs_rq->load.weight; in calc_tg_weight()
2378 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) in calc_cfs_shares() argument
2382 tg_weight = calc_tg_weight(tg, cfs_rq); in calc_cfs_shares()
2383 load = cfs_rq->load.weight; in calc_cfs_shares()
2397 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) in calc_cfs_shares() argument
2402 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
2407 if (cfs_rq->curr == se) in reweight_entity()
2408 update_curr(cfs_rq); in reweight_entity()
2409 account_entity_dequeue(cfs_rq, se); in reweight_entity()
2415 account_entity_enqueue(cfs_rq, se); in reweight_entity()
2418 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2420 static void update_cfs_shares(struct cfs_rq *cfs_rq) in update_cfs_shares() argument
2426 tg = cfs_rq->tg; in update_cfs_shares()
2427 se = tg->se[cpu_of(rq_of(cfs_rq))]; in update_cfs_shares()
2428 if (!se || throttled_hierarchy(cfs_rq)) in update_cfs_shares()
2434 shares = calc_cfs_shares(cfs_rq, tg); in update_cfs_shares()
2439 static inline void update_cfs_shares(struct cfs_rq *cfs_rq) in update_cfs_shares() argument
2561 unsigned long weight, int running, struct cfs_rq *cfs_rq) in __update_load_avg() argument
2607 if (cfs_rq) { in __update_load_avg()
2608 cfs_rq->runnable_load_sum += in __update_load_avg()
2622 if (cfs_rq) { in __update_load_avg()
2623 cfs_rq->runnable_load_sum = in __update_load_avg()
2624 decay_load(cfs_rq->runnable_load_sum, periods + 1); in __update_load_avg()
2633 if (cfs_rq) in __update_load_avg()
2634 cfs_rq->runnable_load_sum += weight * contrib; in __update_load_avg()
2644 if (cfs_rq) in __update_load_avg()
2645 cfs_rq->runnable_load_sum += weight * scaled_delta; in __update_load_avg()
2654 if (cfs_rq) { in __update_load_avg()
2655 cfs_rq->runnable_load_avg = in __update_load_avg()
2656 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX); in __update_load_avg()
2669 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) in update_tg_load_avg() argument
2671 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
2673 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { in update_tg_load_avg()
2674 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
2675 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
2680 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} in update_tg_load_avg() argument
2683 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2686 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
2688 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
2691 if (atomic_long_read(&cfs_rq->removed_load_avg)) { in update_cfs_rq_load_avg()
2692 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); in update_cfs_rq_load_avg()
2698 if (atomic_long_read(&cfs_rq->removed_util_avg)) { in update_cfs_rq_load_avg()
2699 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); in update_cfs_rq_load_avg()
2704 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, in update_cfs_rq_load_avg()
2705 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq); in update_cfs_rq_load_avg()
2709 cfs_rq->load_last_update_time_copy = sa->last_update_time; in update_cfs_rq_load_avg()
2718 struct cfs_rq *cfs_rq = cfs_rq_of(se); in update_load_avg() local
2719 u64 now = cfs_rq_clock_task(cfs_rq); in update_load_avg()
2720 int cpu = cpu_of(rq_of(cfs_rq)); in update_load_avg()
2728 cfs_rq->curr == se, NULL); in update_load_avg()
2730 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg) in update_load_avg()
2731 update_tg_load_avg(cfs_rq, 0); in update_load_avg()
2734 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
2744 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), in attach_entity_load_avg()
2754 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
2755 cfs_rq->avg.load_avg += se->avg.load_avg; in attach_entity_load_avg()
2756 cfs_rq->avg.load_sum += se->avg.load_sum; in attach_entity_load_avg()
2757 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
2758 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
2761 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
2763 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), in detach_entity_load_avg()
2765 cfs_rq->curr == se, NULL); in detach_entity_load_avg()
2767 cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0); in detach_entity_load_avg()
2768 cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0); in detach_entity_load_avg()
2769 cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0); in detach_entity_load_avg()
2770 cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0); in detach_entity_load_avg()
2775 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_entity_load_avg() argument
2778 u64 now = cfs_rq_clock_task(cfs_rq); in enqueue_entity_load_avg()
2783 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, in enqueue_entity_load_avg()
2785 cfs_rq->curr == se, NULL); in enqueue_entity_load_avg()
2788 decayed = update_cfs_rq_load_avg(now, cfs_rq); in enqueue_entity_load_avg()
2790 cfs_rq->runnable_load_avg += sa->load_avg; in enqueue_entity_load_avg()
2791 cfs_rq->runnable_load_sum += sa->load_sum; in enqueue_entity_load_avg()
2794 attach_entity_load_avg(cfs_rq, se); in enqueue_entity_load_avg()
2797 update_tg_load_avg(cfs_rq, 0); in enqueue_entity_load_avg()
2802 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_entity_load_avg() argument
2806 cfs_rq->runnable_load_avg = in dequeue_entity_load_avg()
2807 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); in dequeue_entity_load_avg()
2808 cfs_rq->runnable_load_sum = in dequeue_entity_load_avg()
2809 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); in dequeue_entity_load_avg()
2818 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg() local
2825 last_update_time_copy = cfs_rq->load_last_update_time_copy; in remove_entity_load_avg()
2827 last_update_time = cfs_rq->avg.last_update_time; in remove_entity_load_avg()
2830 last_update_time = cfs_rq->avg.last_update_time; in remove_entity_load_avg()
2833 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL); in remove_entity_load_avg()
2834 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); in remove_entity_load_avg()
2835 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); in remove_entity_load_avg()
2856 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_runnable_load_avg() argument
2858 return cfs_rq->runnable_load_avg; in cfs_rq_runnable_load_avg()
2861 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_load_avg() argument
2863 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
2872 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in enqueue_entity_load_avg() argument
2874 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in dequeue_entity_load_avg() argument
2878 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
2880 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
2889 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_sleeper() argument
2898 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start; in enqueue_sleeper()
2915 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start; in enqueue_sleeper()
2951 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
2954 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
2960 schedstat_inc(cfs_rq, nr_spread_over); in check_spread()
2965 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
2967 u64 vruntime = cfs_rq->min_vruntime; in place_entity()
2976 vruntime += sched_vslice(cfs_rq, se); in place_entity()
2996 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2999 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
3006 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3011 update_curr(cfs_rq); in enqueue_entity()
3012 enqueue_entity_load_avg(cfs_rq, se); in enqueue_entity()
3013 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
3014 update_cfs_shares(cfs_rq); in enqueue_entity()
3017 place_entity(cfs_rq, se, 0); in enqueue_entity()
3018 enqueue_sleeper(cfs_rq, se); in enqueue_entity()
3021 update_stats_enqueue(cfs_rq, se); in enqueue_entity()
3022 check_spread(cfs_rq, se); in enqueue_entity()
3023 if (se != cfs_rq->curr) in enqueue_entity()
3024 __enqueue_entity(cfs_rq, se); in enqueue_entity()
3027 if (cfs_rq->nr_running == 1) { in enqueue_entity()
3028 list_add_leaf_cfs_rq(cfs_rq); in enqueue_entity()
3029 check_enqueue_throttle(cfs_rq); in enqueue_entity()
3036 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last() local
3037 if (cfs_rq->last != se) in __clear_buddies_last()
3040 cfs_rq->last = NULL; in __clear_buddies_last()
3047 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next() local
3048 if (cfs_rq->next != se) in __clear_buddies_next()
3051 cfs_rq->next = NULL; in __clear_buddies_next()
3058 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip() local
3059 if (cfs_rq->skip != se) in __clear_buddies_skip()
3062 cfs_rq->skip = NULL; in __clear_buddies_skip()
3066 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
3068 if (cfs_rq->last == se) in clear_buddies()
3071 if (cfs_rq->next == se) in clear_buddies()
3074 if (cfs_rq->skip == se) in clear_buddies()
3078 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3081 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
3086 update_curr(cfs_rq); in dequeue_entity()
3087 dequeue_entity_load_avg(cfs_rq, se); in dequeue_entity()
3089 update_stats_dequeue(cfs_rq, se); in dequeue_entity()
3096 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq)); in dequeue_entity()
3098 se->statistics.block_start = rq_clock(rq_of(cfs_rq)); in dequeue_entity()
3103 clear_buddies(cfs_rq, se); in dequeue_entity()
3105 if (se != cfs_rq->curr) in dequeue_entity()
3106 __dequeue_entity(cfs_rq, se); in dequeue_entity()
3108 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
3116 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
3119 return_cfs_rq_runtime(cfs_rq); in dequeue_entity()
3121 update_min_vruntime(cfs_rq); in dequeue_entity()
3122 update_cfs_shares(cfs_rq); in dequeue_entity()
3129 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick() argument
3135 ideal_runtime = sched_slice(cfs_rq, curr); in check_preempt_tick()
3138 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
3143 clear_buddies(cfs_rq, curr); in check_preempt_tick()
3155 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
3162 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
3166 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
3175 update_stats_wait_end(cfs_rq, se); in set_next_entity()
3176 __dequeue_entity(cfs_rq, se); in set_next_entity()
3180 update_stats_curr_start(cfs_rq, se); in set_next_entity()
3181 cfs_rq->curr = se; in set_next_entity()
3188 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { in set_next_entity()
3207 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity() argument
3209 struct sched_entity *left = __pick_first_entity(cfs_rq); in pick_next_entity()
3225 if (cfs_rq->skip == se) { in pick_next_entity()
3229 second = __pick_first_entity(cfs_rq); in pick_next_entity()
3243 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) in pick_next_entity()
3244 se = cfs_rq->last; in pick_next_entity()
3249 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) in pick_next_entity()
3250 se = cfs_rq->next; in pick_next_entity()
3252 clear_buddies(cfs_rq, se); in pick_next_entity()
3257 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3259 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity() argument
3266 update_curr(cfs_rq); in put_prev_entity()
3269 check_cfs_rq_runtime(cfs_rq); in put_prev_entity()
3271 check_spread(cfs_rq, prev); in put_prev_entity()
3273 update_stats_wait_start(cfs_rq, prev); in put_prev_entity()
3275 __enqueue_entity(cfs_rq, prev); in put_prev_entity()
3279 cfs_rq->curr = NULL; in put_prev_entity()
3283 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
3288 update_curr(cfs_rq); in entity_tick()
3294 update_cfs_shares(cfs_rq); in entity_tick()
3302 resched_curr(rq_of(cfs_rq)); in entity_tick()
3309 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) in entity_tick()
3313 if (cfs_rq->nr_running > 1) in entity_tick()
3314 check_preempt_tick(cfs_rq, curr); in entity_tick()
3390 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) in cfs_rq_clock_task() argument
3392 if (unlikely(cfs_rq->throttle_count)) in cfs_rq_clock_task()
3393 return cfs_rq->throttled_clock_task; in cfs_rq_clock_task()
3395 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; in cfs_rq_clock_task()
3399 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) in assign_cfs_rq_runtime() argument
3401 struct task_group *tg = cfs_rq->tg; in assign_cfs_rq_runtime()
3406 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; in assign_cfs_rq_runtime()
3423 cfs_rq->runtime_remaining += amount; in assign_cfs_rq_runtime()
3429 if ((s64)(expires - cfs_rq->runtime_expires) > 0) in assign_cfs_rq_runtime()
3430 cfs_rq->runtime_expires = expires; in assign_cfs_rq_runtime()
3432 return cfs_rq->runtime_remaining > 0; in assign_cfs_rq_runtime()
3439 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) in expire_cfs_rq_runtime() argument
3441 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in expire_cfs_rq_runtime()
3444 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) in expire_cfs_rq_runtime()
3447 if (cfs_rq->runtime_remaining < 0) in expire_cfs_rq_runtime()
3461 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { in expire_cfs_rq_runtime()
3463 cfs_rq->runtime_expires += TICK_NSEC; in expire_cfs_rq_runtime()
3466 cfs_rq->runtime_remaining = 0; in expire_cfs_rq_runtime()
3470 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in __account_cfs_rq_runtime() argument
3473 cfs_rq->runtime_remaining -= delta_exec; in __account_cfs_rq_runtime()
3474 expire_cfs_rq_runtime(cfs_rq); in __account_cfs_rq_runtime()
3476 if (likely(cfs_rq->runtime_remaining > 0)) in __account_cfs_rq_runtime()
3483 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
3484 resched_curr(rq_of(cfs_rq)); in __account_cfs_rq_runtime()
3488 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in account_cfs_rq_runtime() argument
3490 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) in account_cfs_rq_runtime()
3493 __account_cfs_rq_runtime(cfs_rq, delta_exec); in account_cfs_rq_runtime()
3496 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
3498 return cfs_bandwidth_used() && cfs_rq->throttled; in cfs_rq_throttled()
3502 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
3504 return cfs_bandwidth_used() && cfs_rq->throttle_count; in throttled_hierarchy()
3515 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; in throttled_lb_pair()
3517 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
3518 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
3528 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
3530 cfs_rq->throttle_count--; in tg_unthrottle_up()
3532 if (!cfs_rq->throttle_count) { in tg_unthrottle_up()
3534 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - in tg_unthrottle_up()
3535 cfs_rq->throttled_clock_task; in tg_unthrottle_up()
3545 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
3548 if (!cfs_rq->throttle_count) in tg_throttle_down()
3549 cfs_rq->throttled_clock_task = rq_clock_task(rq); in tg_throttle_down()
3550 cfs_rq->throttle_count++; in tg_throttle_down()
3555 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) in throttle_cfs_rq() argument
3557 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq()
3558 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
3563 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
3567 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
3570 task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
3572 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
3588 cfs_rq->throttled = 1; in throttle_cfs_rq()
3589 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
3597 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); in throttle_cfs_rq()
3609 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq() argument
3611 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq()
3612 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
3617 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
3619 cfs_rq->throttled = 0; in unthrottle_cfs_rq()
3624 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
3625 list_del_rcu(&cfs_rq->throttled_list); in unthrottle_cfs_rq()
3629 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
3631 if (!cfs_rq->load.weight) in unthrottle_cfs_rq()
3634 task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
3639 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
3641 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
3642 cfs_rq->h_nr_running += task_delta; in unthrottle_cfs_rq()
3644 if (cfs_rq_throttled(cfs_rq)) in unthrottle_cfs_rq()
3659 struct cfs_rq *cfs_rq; in distribute_cfs_runtime() local
3664 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, in distribute_cfs_runtime()
3666 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime()
3669 if (!cfs_rq_throttled(cfs_rq)) in distribute_cfs_runtime()
3672 runtime = -cfs_rq->runtime_remaining + 1; in distribute_cfs_runtime()
3677 cfs_rq->runtime_remaining += runtime; in distribute_cfs_runtime()
3678 cfs_rq->runtime_expires = expires; in distribute_cfs_runtime()
3681 if (cfs_rq->runtime_remaining > 0) in distribute_cfs_runtime()
3682 unthrottle_cfs_rq(cfs_rq); in distribute_cfs_runtime()
3812 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in __return_cfs_rq_runtime() argument
3814 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
3815 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; in __return_cfs_rq_runtime()
3822 cfs_rq->runtime_expires == cfs_b->runtime_expires) { in __return_cfs_rq_runtime()
3833 cfs_rq->runtime_remaining -= slack_runtime; in __return_cfs_rq_runtime()
3836 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in return_cfs_rq_runtime() argument
3841 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) in return_cfs_rq_runtime()
3844 __return_cfs_rq_runtime(cfs_rq); in return_cfs_rq_runtime()
3885 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) in check_enqueue_throttle() argument
3891 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
3895 if (cfs_rq_throttled(cfs_rq)) in check_enqueue_throttle()
3899 account_cfs_rq_runtime(cfs_rq, 0); in check_enqueue_throttle()
3900 if (cfs_rq->runtime_remaining <= 0) in check_enqueue_throttle()
3901 throttle_cfs_rq(cfs_rq); in check_enqueue_throttle()
3905 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) in check_cfs_rq_runtime() argument
3910 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) in check_cfs_rq_runtime()
3917 if (cfs_rq_throttled(cfs_rq)) in check_cfs_rq_runtime()
3920 throttle_cfs_rq(cfs_rq); in check_cfs_rq_runtime()
3970 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) in init_cfs_rq_runtime() argument
3972 cfs_rq->runtime_enabled = 0; in init_cfs_rq_runtime()
3973 INIT_LIST_HEAD(&cfs_rq->throttled_list); in init_cfs_rq_runtime()
3999 struct cfs_rq *cfs_rq; in update_runtime_enabled() local
4001 for_each_leaf_cfs_rq(rq, cfs_rq) { in update_runtime_enabled()
4002 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth; in update_runtime_enabled()
4005 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; in update_runtime_enabled()
4012 struct cfs_rq *cfs_rq; in unthrottle_offline_cfs_rqs() local
4014 for_each_leaf_cfs_rq(rq, cfs_rq) { in unthrottle_offline_cfs_rqs()
4015 if (!cfs_rq->runtime_enabled) in unthrottle_offline_cfs_rqs()
4022 cfs_rq->runtime_remaining = 1; in unthrottle_offline_cfs_rqs()
4027 cfs_rq->runtime_enabled = 0; in unthrottle_offline_cfs_rqs()
4029 if (cfs_rq_throttled(cfs_rq)) in unthrottle_offline_cfs_rqs()
4030 unthrottle_cfs_rq(cfs_rq); in unthrottle_offline_cfs_rqs()
4035 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) in cfs_rq_clock_task() argument
4037 return rq_clock_task(rq_of(cfs_rq)); in cfs_rq_clock_task()
4040 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} in account_cfs_rq_runtime() argument
4041 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } in check_cfs_rq_runtime() argument
4042 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} in check_enqueue_throttle() argument
4043 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in return_cfs_rq_runtime() argument
4045 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
4050 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
4064 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in init_cfs_rq_runtime() argument
4085 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair() local
4089 if (cfs_rq->nr_running > 1) { in hrtick_start_fair()
4090 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
4137 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
4143 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
4144 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
4152 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
4154 cfs_rq->h_nr_running++; in enqueue_task_fair()
4160 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
4161 cfs_rq->h_nr_running++; in enqueue_task_fair()
4163 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
4167 update_cfs_shares(cfs_rq); in enqueue_task_fair()
4185 struct cfs_rq *cfs_rq; in dequeue_task_fair() local
4190 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
4191 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
4199 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
4201 cfs_rq->h_nr_running--; in dequeue_task_fair()
4204 if (cfs_rq->load.weight) { in dequeue_task_fair()
4220 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
4221 cfs_rq->h_nr_running--; in dequeue_task_fair()
4223 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
4227 update_cfs_shares(cfs_rq); in dequeue_task_fair()
4502 struct cfs_rq *cfs_rq = cfs_rq_of(se); in task_waking_fair() local
4509 min_vruntime_copy = cfs_rq->min_vruntime_copy; in task_waking_fair()
4511 min_vruntime = cfs_rq->min_vruntime; in task_waking_fair()
4514 min_vruntime = cfs_rq->min_vruntime; in task_waking_fair()
5118 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup() local
5119 int scale = cfs_rq->nr_running >= sched_nr_latency; in check_preempt_wakeup()
5200 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair() local
5207 if (!cfs_rq->nr_running) in pick_next_task_fair()
5222 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair()
5232 update_curr(cfs_rq); in pick_next_task_fair()
5242 if (unlikely(check_cfs_rq_runtime(cfs_rq))) in pick_next_task_fair()
5246 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
5247 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
5248 } while (cfs_rq); in pick_next_task_fair()
5260 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
5274 put_prev_entity(cfs_rq, pse); in pick_next_task_fair()
5275 set_next_entity(cfs_rq, se); in pick_next_task_fair()
5283 cfs_rq = &rq->cfs; in pick_next_task_fair()
5286 if (!cfs_rq->nr_running) in pick_next_task_fair()
5292 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
5293 set_next_entity(cfs_rq, se); in pick_next_task_fair()
5294 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
5295 } while (cfs_rq); in pick_next_task_fair()
5334 struct cfs_rq *cfs_rq; in put_prev_task_fair() local
5337 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
5338 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
5350 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair() local
5359 clear_buddies(cfs_rq, se); in yield_task_fair()
5366 update_curr(cfs_rq); in yield_task_fair()
5899 struct cfs_rq *cfs_rq; in update_blocked_averages() local
5909 for_each_leaf_cfs_rq(rq, cfs_rq) { in update_blocked_averages()
5911 if (throttled_hierarchy(cfs_rq)) in update_blocked_averages()
5914 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) in update_blocked_averages()
5915 update_tg_load_avg(cfs_rq, 0); in update_blocked_averages()
5925 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) in update_cfs_rq_h_load() argument
5927 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load()
5928 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
5932 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
5935 cfs_rq->h_load_next = NULL; in update_cfs_rq_h_load()
5937 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
5938 cfs_rq->h_load_next = se; in update_cfs_rq_h_load()
5939 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
5944 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); in update_cfs_rq_h_load()
5945 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
5948 while ((se = cfs_rq->h_load_next) != NULL) { in update_cfs_rq_h_load()
5949 load = cfs_rq->h_load; in update_cfs_rq_h_load()
5951 cfs_rq_load_avg(cfs_rq) + 1); in update_cfs_rq_h_load()
5952 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
5953 cfs_rq->h_load = load; in update_cfs_rq_h_load()
5954 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
5960 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load() local
5962 update_cfs_rq_h_load(cfs_rq); in task_h_load()
5963 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
5964 cfs_rq_load_avg(cfs_rq) + 1); in task_h_load()
5970 struct cfs_rq *cfs_rq = &rq->cfs; in update_blocked_averages() local
5975 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); in update_blocked_averages()
7877 struct cfs_rq *cfs_rq; in task_tick_fair() local
7881 cfs_rq = cfs_rq_of(se); in task_tick_fair()
7882 entity_tick(cfs_rq, se, queued); in task_tick_fair()
7896 struct cfs_rq *cfs_rq; in task_fork_fair() local
7906 cfs_rq = task_cfs_rq(current); in task_fork_fair()
7907 curr = cfs_rq->curr; in task_fork_fair()
7919 update_curr(cfs_rq); in task_fork_fair()
7923 place_entity(cfs_rq, se, 1); in task_fork_fair()
7934 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
7991 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq() local
7998 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
7999 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
8003 detach_entity_load_avg(cfs_rq, se); in detach_task_cfs_rq()
8009 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq() local
8020 attach_entity_load_avg(cfs_rq, se); in attach_task_cfs_rq()
8023 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
8058 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_curr_task_fair() local
8060 set_next_entity(cfs_rq, se); in set_curr_task_fair()
8062 account_cfs_rq_runtime(cfs_rq, 0); in set_curr_task_fair()
8066 void init_cfs_rq(struct cfs_rq *cfs_rq) in init_cfs_rq() argument
8068 cfs_rq->tasks_timeline = RB_ROOT; in init_cfs_rq()
8069 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); in init_cfs_rq()
8071 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; in init_cfs_rq()
8074 atomic_long_set(&cfs_rq->removed_load_avg, 0); in init_cfs_rq()
8075 atomic_long_set(&cfs_rq->removed_util_avg, 0); in init_cfs_rq()
8099 if (tg->cfs_rq) in free_fair_sched_group()
8100 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
8108 kfree(tg->cfs_rq); in free_fair_sched_group()
8114 struct cfs_rq *cfs_rq; in alloc_fair_sched_group() local
8118 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8119 if (!tg->cfs_rq) in alloc_fair_sched_group()
8130 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), in alloc_fair_sched_group()
8132 if (!cfs_rq) in alloc_fair_sched_group()
8140 init_cfs_rq(cfs_rq); in alloc_fair_sched_group()
8141 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
8148 kfree(cfs_rq); in alloc_fair_sched_group()
8162 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
8166 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
8170 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
8176 cfs_rq->tg = tg; in init_tg_cfs_entry()
8177 cfs_rq->rq = rq; in init_tg_cfs_entry()
8178 init_cfs_rq_runtime(cfs_rq); in init_tg_cfs_entry()
8180 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
8188 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
8191 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
8195 se->my_q = cfs_rq; in init_tg_cfs_entry()
8316 struct cfs_rq *cfs_rq; in print_cfs_stats() local
8319 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) in print_cfs_stats()
8320 print_cfs_rq(m, cpu, cfs_rq); in print_cfs_stats()