Searched refs:cfs_rq (Results 1 - 5 of 5) sorted by relevance

/linux-4.1.27/kernel/sched/
H A Dfair.c105 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
106 * each time a cfs_rq requests quota.
248 /* cpu runqueue to which this cfs_rq is attached */ rq_of()
249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) rq_of() argument
251 return cfs_rq->rq; rq_of()
269 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) task_cfs_rq()
271 return p->se.cfs_rq; task_cfs_rq()
275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) cfs_rq_of()
277 return se->cfs_rq; cfs_rq_of()
281 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) group_cfs_rq()
286 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
289 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument
291 if (!cfs_rq->on_list) { list_add_leaf_cfs_rq()
298 if (cfs_rq->tg->parent && list_add_leaf_cfs_rq()
299 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { list_add_leaf_cfs_rq()
300 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, list_add_leaf_cfs_rq()
301 &rq_of(cfs_rq)->leaf_cfs_rq_list); list_add_leaf_cfs_rq()
303 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, list_add_leaf_cfs_rq()
304 &rq_of(cfs_rq)->leaf_cfs_rq_list); list_add_leaf_cfs_rq()
307 cfs_rq->on_list = 1; list_add_leaf_cfs_rq()
309 update_cfs_rq_blocked_load(cfs_rq, 0); list_add_leaf_cfs_rq()
313 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument
315 if (cfs_rq->on_list) { list_del_leaf_cfs_rq()
316 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); list_del_leaf_cfs_rq()
317 cfs_rq->on_list = 0; list_del_leaf_cfs_rq()
321 /* Iterate thr' all leaf cfs_rq's on a runqueue */
322 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
323 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
326 static inline struct cfs_rq * is_same_group()
329 if (se->cfs_rq == pse->cfs_rq) is_same_group()
330 return se->cfs_rq; is_same_group()
347 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of find_matching_se()
379 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) rq_of() argument
381 return container_of(cfs_rq, struct rq, cfs); rq_of()
389 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) task_cfs_rq()
394 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) cfs_rq_of()
403 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) group_cfs_rq()
408 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument
412 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument
416 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
417 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
432 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
462 static void update_min_vruntime(struct cfs_rq *cfs_rq) update_min_vruntime() argument
464 u64 vruntime = cfs_rq->min_vruntime; update_min_vruntime()
466 if (cfs_rq->curr) update_min_vruntime()
467 vruntime = cfs_rq->curr->vruntime; update_min_vruntime()
469 if (cfs_rq->rb_leftmost) { update_min_vruntime()
470 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, update_min_vruntime()
474 if (!cfs_rq->curr) update_min_vruntime()
481 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); update_min_vruntime()
484 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; update_min_vruntime()
491 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __enqueue_entity() argument
493 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; __enqueue_entity()
521 cfs_rq->rb_leftmost = &se->run_node; __enqueue_entity()
524 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); __enqueue_entity()
527 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity() argument
529 if (cfs_rq->rb_leftmost == &se->run_node) { __dequeue_entity()
533 cfs_rq->rb_leftmost = next_node; __dequeue_entity()
536 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); __dequeue_entity()
539 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) __pick_first_entity() argument
541 struct rb_node *left = cfs_rq->rb_leftmost; __pick_first_entity()
560 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) __pick_last_entity() argument
562 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); __pick_last_entity()
636 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_slice() argument
638 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); sched_slice()
644 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
645 load = &cfs_rq->load; for_each_sched_entity()
648 lw = cfs_rq->load; for_each_sched_entity()
663 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_vslice() argument
665 return calc_delta_fair(sched_slice(cfs_rq, se), se); sched_vslice()
695 static void update_curr(struct cfs_rq *cfs_rq) update_curr() argument
697 struct sched_entity *curr = cfs_rq->curr; update_curr()
698 u64 now = rq_clock_task(rq_of(cfs_rq)); update_curr()
714 schedstat_add(cfs_rq, exec_clock, delta_exec); update_curr()
717 update_min_vruntime(cfs_rq); update_curr()
727 account_cfs_rq_runtime(cfs_rq, delta_exec); update_curr()
736 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start() argument
738 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq))); update_stats_wait_start()
744 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue() argument
750 if (se != cfs_rq->curr) update_stats_enqueue()
751 update_stats_wait_start(cfs_rq, se); update_stats_enqueue()
755 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end() argument
758 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start)); update_stats_wait_end()
761 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); update_stats_wait_end()
765 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); update_stats_wait_end()
772 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_dequeue() argument
778 if (se != cfs_rq->curr) update_stats_dequeue()
779 update_stats_wait_end(cfs_rq, se); update_stats_dequeue()
786 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_curr_start() argument
791 se->exec_start = rq_clock_task(rq_of(cfs_rq)); update_stats_curr_start()
2290 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_enqueue() argument
2292 update_load_add(&cfs_rq->load, se->load.weight); account_entity_enqueue()
2294 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); account_entity_enqueue()
2297 struct rq *rq = rq_of(cfs_rq); account_entity_enqueue()
2303 cfs_rq->nr_running++; account_entity_enqueue()
2307 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_dequeue() argument
2309 update_load_sub(&cfs_rq->load, se->load.weight); account_entity_dequeue()
2311 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); account_entity_dequeue()
2313 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); account_entity_dequeue()
2316 cfs_rq->nr_running--; account_entity_dequeue()
2321 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) calc_tg_weight() argument
2331 tg_weight -= cfs_rq->tg_load_contrib; calc_tg_weight()
2332 tg_weight += cfs_rq->load.weight; calc_tg_weight()
2337 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) calc_cfs_shares() argument
2341 tg_weight = calc_tg_weight(tg, cfs_rq); calc_cfs_shares()
2342 load = cfs_rq->load.weight; calc_cfs_shares()
2356 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) calc_cfs_shares() argument
2361 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, reweight_entity() argument
2366 if (cfs_rq->curr == se) reweight_entity()
2367 update_curr(cfs_rq); reweight_entity()
2368 account_entity_dequeue(cfs_rq, se); reweight_entity()
2374 account_entity_enqueue(cfs_rq, se); reweight_entity()
2377 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2379 static void update_cfs_shares(struct cfs_rq *cfs_rq) update_cfs_shares() argument
2385 tg = cfs_rq->tg; update_cfs_shares()
2386 se = tg->se[cpu_of(rq_of(cfs_rq))]; update_cfs_shares()
2387 if (!se || throttled_hierarchy(cfs_rq)) update_cfs_shares()
2393 shares = calc_cfs_shares(cfs_rq, tg); update_cfs_shares()
2398 static inline void update_cfs_shares(struct cfs_rq *cfs_rq) update_cfs_shares() argument
2603 /* Synchronize an entity's decay with its parenting cfs_rq.*/ __synchronize_entity_decay()
2606 struct cfs_rq *cfs_rq = cfs_rq_of(se); __synchronize_entity_decay() local
2607 u64 decays = atomic64_read(&cfs_rq->decay_counter); __synchronize_entity_decay()
2622 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, __update_cfs_rq_tg_load_contrib() argument
2625 struct task_group *tg = cfs_rq->tg; __update_cfs_rq_tg_load_contrib()
2628 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg; __update_cfs_rq_tg_load_contrib()
2629 tg_contrib -= cfs_rq->tg_load_contrib; __update_cfs_rq_tg_load_contrib()
2634 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) { __update_cfs_rq_tg_load_contrib()
2636 cfs_rq->tg_load_contrib += tg_contrib; __update_cfs_rq_tg_load_contrib()
2641 * Aggregate cfs_rq runnable averages into an equivalent task_group
2645 struct cfs_rq *cfs_rq) __update_tg_runnable_avg()
2647 struct task_group *tg = cfs_rq->tg; __update_tg_runnable_avg()
2650 /* The fraction of a cpu used by this cfs_rq */ __update_tg_runnable_avg()
2653 contrib -= cfs_rq->tg_runnable_contrib; __update_tg_runnable_avg()
2655 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) { __update_tg_runnable_avg()
2657 cfs_rq->tg_runnable_contrib += contrib; __update_tg_runnable_avg()
2663 struct cfs_rq *cfs_rq = group_cfs_rq(se); __update_group_entity_contrib() local
2664 struct task_group *tg = cfs_rq->tg; __update_group_entity_contrib()
2669 contrib = cfs_rq->tg_load_contrib * tg->shares; __update_group_entity_contrib()
2710 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, __update_cfs_rq_tg_load_contrib() argument
2713 struct cfs_rq *cfs_rq) {} __update_group_entity_contrib()
2767 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq, subtract_blocked_load_contrib() argument
2770 if (likely(load_contrib < cfs_rq->blocked_load_avg)) subtract_blocked_load_contrib()
2771 cfs_rq->blocked_load_avg -= load_contrib; subtract_blocked_load_contrib()
2773 cfs_rq->blocked_load_avg = 0; subtract_blocked_load_contrib()
2776 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2782 struct cfs_rq *cfs_rq = cfs_rq_of(se); update_entity_load_avg() local
2784 int cpu = cpu_of(rq_of(cfs_rq)); update_entity_load_avg()
2792 now = cfs_rq_clock_task(cfs_rq); update_entity_load_avg()
2797 cfs_rq->curr == se)) update_entity_load_avg()
2807 cfs_rq->runnable_load_avg += contrib_delta; update_entity_load_avg()
2808 cfs_rq->utilization_load_avg += utilization_delta; update_entity_load_avg()
2810 subtract_blocked_load_contrib(cfs_rq, -contrib_delta); update_entity_load_avg()
2818 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) update_cfs_rq_blocked_load() argument
2820 u64 now = cfs_rq_clock_task(cfs_rq) >> 20; update_cfs_rq_blocked_load()
2823 decays = now - cfs_rq->last_decay; update_cfs_rq_blocked_load()
2827 if (atomic_long_read(&cfs_rq->removed_load)) { update_cfs_rq_blocked_load()
2829 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0); update_cfs_rq_blocked_load()
2830 subtract_blocked_load_contrib(cfs_rq, removed_load); update_cfs_rq_blocked_load()
2834 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg, update_cfs_rq_blocked_load()
2836 atomic64_add(decays, &cfs_rq->decay_counter); update_cfs_rq_blocked_load()
2837 cfs_rq->last_decay = now; update_cfs_rq_blocked_load()
2840 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update); update_cfs_rq_blocked_load()
2843 /* Add the load generated by se into cfs_rq's child load-average */ enqueue_entity_load_avg()
2844 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, enqueue_entity_load_avg() argument
2858 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq)); enqueue_entity_load_avg()
2881 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib); enqueue_entity_load_avg()
2885 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib; enqueue_entity_load_avg()
2886 cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib; enqueue_entity_load_avg()
2888 update_cfs_rq_blocked_load(cfs_rq, !wakeup); enqueue_entity_load_avg()
2892 * Remove se's load from this cfs_rq child load-average, if the entity is
2896 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, dequeue_entity_load_avg() argument
2902 update_cfs_rq_blocked_load(cfs_rq, !sleep); dequeue_entity_load_avg()
2904 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib; dequeue_entity_load_avg()
2905 cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib; dequeue_entity_load_avg()
2907 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; dequeue_entity_load_avg()
2908 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); dequeue_entity_load_avg()
2939 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, enqueue_entity_load_avg() argument
2942 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, dequeue_entity_load_avg() argument
2945 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, update_cfs_rq_blocked_load() argument
2955 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_sleeper() argument
2964 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start; enqueue_sleeper()
2981 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start; enqueue_sleeper()
3017 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) check_spread() argument
3020 s64 d = se->vruntime - cfs_rq->min_vruntime; check_spread()
3026 schedstat_inc(cfs_rq, nr_spread_over); check_spread()
3031 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity() argument
3033 u64 vruntime = cfs_rq->min_vruntime; place_entity()
3042 vruntime += sched_vslice(cfs_rq, se); place_entity()
3062 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3065 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity() argument
3072 se->vruntime += cfs_rq->min_vruntime; enqueue_entity()
3077 update_curr(cfs_rq); enqueue_entity()
3078 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP); enqueue_entity()
3079 account_entity_enqueue(cfs_rq, se); enqueue_entity()
3080 update_cfs_shares(cfs_rq); enqueue_entity()
3083 place_entity(cfs_rq, se, 0); enqueue_entity()
3084 enqueue_sleeper(cfs_rq, se); enqueue_entity()
3087 update_stats_enqueue(cfs_rq, se); enqueue_entity()
3088 check_spread(cfs_rq, se); enqueue_entity()
3089 if (se != cfs_rq->curr) enqueue_entity()
3090 __enqueue_entity(cfs_rq, se); enqueue_entity()
3093 if (cfs_rq->nr_running == 1) { enqueue_entity()
3094 list_add_leaf_cfs_rq(cfs_rq); enqueue_entity()
3095 check_enqueue_throttle(cfs_rq); enqueue_entity()
3102 struct cfs_rq *cfs_rq = cfs_rq_of(se); for_each_sched_entity() local
3103 if (cfs_rq->last != se) for_each_sched_entity()
3106 cfs_rq->last = NULL; for_each_sched_entity()
3113 struct cfs_rq *cfs_rq = cfs_rq_of(se); for_each_sched_entity() local
3114 if (cfs_rq->next != se) for_each_sched_entity()
3117 cfs_rq->next = NULL; for_each_sched_entity()
3124 struct cfs_rq *cfs_rq = cfs_rq_of(se); for_each_sched_entity() local
3125 if (cfs_rq->skip != se) for_each_sched_entity()
3128 cfs_rq->skip = NULL; for_each_sched_entity()
3132 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) clear_buddies() argument
3134 if (cfs_rq->last == se) clear_buddies()
3137 if (cfs_rq->next == se) clear_buddies()
3140 if (cfs_rq->skip == se) clear_buddies()
3144 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3147 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity() argument
3152 update_curr(cfs_rq); dequeue_entity()
3153 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP); dequeue_entity()
3155 update_stats_dequeue(cfs_rq, se); dequeue_entity()
3162 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq)); dequeue_entity()
3164 se->statistics.block_start = rq_clock(rq_of(cfs_rq)); dequeue_entity()
3169 clear_buddies(cfs_rq, se); dequeue_entity()
3171 if (se != cfs_rq->curr) dequeue_entity()
3172 __dequeue_entity(cfs_rq, se); dequeue_entity()
3174 account_entity_dequeue(cfs_rq, se); dequeue_entity()
3182 se->vruntime -= cfs_rq->min_vruntime; dequeue_entity()
3185 return_cfs_rq_runtime(cfs_rq); dequeue_entity()
3187 update_min_vruntime(cfs_rq); dequeue_entity()
3188 update_cfs_shares(cfs_rq); dequeue_entity()
3195 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) check_preempt_tick() argument
3201 ideal_runtime = sched_slice(cfs_rq, curr); check_preempt_tick()
3204 resched_curr(rq_of(cfs_rq)); check_preempt_tick()
3209 clear_buddies(cfs_rq, curr); check_preempt_tick()
3221 se = __pick_first_entity(cfs_rq); check_preempt_tick()
3228 resched_curr(rq_of(cfs_rq)); check_preempt_tick()
3232 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity() argument
3241 update_stats_wait_end(cfs_rq, se); set_next_entity()
3242 __dequeue_entity(cfs_rq, se); set_next_entity()
3246 update_stats_curr_start(cfs_rq, se); set_next_entity()
3247 cfs_rq->curr = se; set_next_entity()
3254 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { set_next_entity()
3273 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity() argument
3275 struct sched_entity *left = __pick_first_entity(cfs_rq); pick_next_entity()
3291 if (cfs_rq->skip == se) { pick_next_entity()
3295 second = __pick_first_entity(cfs_rq); pick_next_entity()
3309 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) pick_next_entity()
3310 se = cfs_rq->last; pick_next_entity()
3315 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) pick_next_entity()
3316 se = cfs_rq->next; pick_next_entity()
3318 clear_buddies(cfs_rq, se); pick_next_entity()
3323 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3325 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) put_prev_entity() argument
3332 update_curr(cfs_rq); put_prev_entity()
3335 check_cfs_rq_runtime(cfs_rq); put_prev_entity()
3337 check_spread(cfs_rq, prev); put_prev_entity()
3339 update_stats_wait_start(cfs_rq, prev); put_prev_entity()
3341 __enqueue_entity(cfs_rq, prev); put_prev_entity()
3345 cfs_rq->curr = NULL; put_prev_entity()
3349 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) entity_tick() argument
3354 update_curr(cfs_rq); entity_tick()
3360 update_cfs_rq_blocked_load(cfs_rq, 1); entity_tick()
3361 update_cfs_shares(cfs_rq); entity_tick()
3369 resched_curr(rq_of(cfs_rq)); entity_tick()
3376 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) entity_tick()
3380 if (cfs_rq->nr_running > 1) entity_tick()
3381 check_preempt_tick(cfs_rq, curr); entity_tick()
3456 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ cfs_rq_clock_task()
3457 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) cfs_rq_clock_task() argument
3459 if (unlikely(cfs_rq->throttle_count)) cfs_rq_clock_task()
3460 return cfs_rq->throttled_clock_task; cfs_rq_clock_task()
3462 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; cfs_rq_clock_task()
3466 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) assign_cfs_rq_runtime() argument
3468 struct task_group *tg = cfs_rq->tg; assign_cfs_rq_runtime()
3473 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; assign_cfs_rq_runtime()
3499 cfs_rq->runtime_remaining += amount; assign_cfs_rq_runtime()
3505 if ((s64)(expires - cfs_rq->runtime_expires) > 0) assign_cfs_rq_runtime()
3506 cfs_rq->runtime_expires = expires; assign_cfs_rq_runtime()
3508 return cfs_rq->runtime_remaining > 0; assign_cfs_rq_runtime()
3515 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) expire_cfs_rq_runtime() argument
3517 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); expire_cfs_rq_runtime()
3520 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) expire_cfs_rq_runtime()
3523 if (cfs_rq->runtime_remaining < 0) expire_cfs_rq_runtime()
3537 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { expire_cfs_rq_runtime()
3539 cfs_rq->runtime_expires += TICK_NSEC; expire_cfs_rq_runtime()
3542 cfs_rq->runtime_remaining = 0; expire_cfs_rq_runtime()
3546 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) __account_cfs_rq_runtime() argument
3549 cfs_rq->runtime_remaining -= delta_exec; __account_cfs_rq_runtime()
3550 expire_cfs_rq_runtime(cfs_rq); __account_cfs_rq_runtime()
3552 if (likely(cfs_rq->runtime_remaining > 0)) __account_cfs_rq_runtime()
3559 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) __account_cfs_rq_runtime()
3560 resched_curr(rq_of(cfs_rq)); __account_cfs_rq_runtime()
3564 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) account_cfs_rq_runtime() argument
3566 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) account_cfs_rq_runtime()
3569 __account_cfs_rq_runtime(cfs_rq, delta_exec); account_cfs_rq_runtime()
3572 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument
3574 return cfs_bandwidth_used() && cfs_rq->throttled; cfs_rq_throttled()
3577 /* check whether cfs_rq, or any parent, is throttled */ throttled_hierarchy()
3578 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument
3580 return cfs_bandwidth_used() && cfs_rq->throttle_count; throttled_hierarchy()
3591 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; throttled_lb_pair()
3593 src_cfs_rq = tg->cfs_rq[src_cpu]; throttled_lb_pair()
3594 dest_cfs_rq = tg->cfs_rq[dest_cpu]; throttled_lb_pair()
3604 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_unthrottle_up() local
3606 cfs_rq->throttle_count--; tg_unthrottle_up()
3608 if (!cfs_rq->throttle_count) { tg_unthrottle_up()
3610 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - tg_unthrottle_up()
3611 cfs_rq->throttled_clock_task; tg_unthrottle_up()
3621 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_throttle_down() local
3624 if (!cfs_rq->throttle_count) tg_throttle_down()
3625 cfs_rq->throttled_clock_task = rq_clock_task(rq); tg_throttle_down()
3626 cfs_rq->throttle_count++; tg_throttle_down()
3631 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) throttle_cfs_rq() argument
3633 struct rq *rq = rq_of(cfs_rq); throttle_cfs_rq()
3634 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); throttle_cfs_rq()
3638 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; throttle_cfs_rq()
3642 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); throttle_cfs_rq()
3645 task_delta = cfs_rq->h_nr_running; for_each_sched_entity()
3647 struct cfs_rq *qcfs_rq = cfs_rq_of(se); for_each_sched_entity()
3663 cfs_rq->throttled = 1;
3664 cfs_rq->throttled_clock = rq_clock(rq);
3670 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3676 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) unthrottle_cfs_rq() argument
3678 struct rq *rq = rq_of(cfs_rq); unthrottle_cfs_rq()
3679 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); unthrottle_cfs_rq()
3684 se = cfs_rq->tg->se[cpu_of(rq)]; unthrottle_cfs_rq()
3686 cfs_rq->throttled = 0; unthrottle_cfs_rq()
3691 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; unthrottle_cfs_rq()
3692 list_del_rcu(&cfs_rq->throttled_list); unthrottle_cfs_rq()
3696 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); unthrottle_cfs_rq()
3698 if (!cfs_rq->load.weight) unthrottle_cfs_rq()
3701 task_delta = cfs_rq->h_nr_running; for_each_sched_entity()
3706 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
3708 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); for_each_sched_entity()
3709 cfs_rq->h_nr_running += task_delta; for_each_sched_entity()
3711 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity()
3726 struct cfs_rq *cfs_rq; distribute_cfs_runtime() local
3731 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, distribute_cfs_runtime()
3733 struct rq *rq = rq_of(cfs_rq); distribute_cfs_runtime()
3736 if (!cfs_rq_throttled(cfs_rq)) distribute_cfs_runtime()
3739 runtime = -cfs_rq->runtime_remaining + 1; distribute_cfs_runtime()
3744 cfs_rq->runtime_remaining += runtime; distribute_cfs_runtime()
3745 cfs_rq->runtime_expires = expires; distribute_cfs_runtime()
3748 if (cfs_rq->runtime_remaining > 0) distribute_cfs_runtime()
3749 unthrottle_cfs_rq(cfs_rq); distribute_cfs_runtime()
3842 /* a cfs_rq won't donate quota below this amount */
3886 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) __return_cfs_rq_runtime() argument
3888 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); __return_cfs_rq_runtime()
3889 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; __return_cfs_rq_runtime()
3896 cfs_rq->runtime_expires == cfs_b->runtime_expires) { __return_cfs_rq_runtime()
3907 cfs_rq->runtime_remaining -= slack_runtime; __return_cfs_rq_runtime()
3910 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return_cfs_rq_runtime() argument
3915 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) return_cfs_rq_runtime()
3918 __return_cfs_rq_runtime(cfs_rq); return_cfs_rq_runtime()
3959 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) check_enqueue_throttle() argument
3965 if (!cfs_rq->runtime_enabled || cfs_rq->curr) check_enqueue_throttle()
3969 if (cfs_rq_throttled(cfs_rq)) check_enqueue_throttle()
3973 account_cfs_rq_runtime(cfs_rq, 0); check_enqueue_throttle()
3974 if (cfs_rq->runtime_remaining <= 0) check_enqueue_throttle()
3975 throttle_cfs_rq(cfs_rq); check_enqueue_throttle()
3978 /* conditionally throttle active cfs_rq's from put_prev_entity() */ check_cfs_rq_runtime()
3979 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) check_cfs_rq_runtime() argument
3984 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) check_cfs_rq_runtime()
3991 if (cfs_rq_throttled(cfs_rq)) check_cfs_rq_runtime()
3994 throttle_cfs_rq(cfs_rq); check_cfs_rq_runtime()
4044 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) init_cfs_rq_runtime() argument
4046 cfs_rq->runtime_enabled = 0; init_cfs_rq_runtime()
4047 INIT_LIST_HEAD(&cfs_rq->throttled_list); init_cfs_rq_runtime()
4086 struct cfs_rq *cfs_rq; update_runtime_enabled() local
4088 for_each_leaf_cfs_rq(rq, cfs_rq) { for_each_leaf_cfs_rq()
4089 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth; for_each_leaf_cfs_rq()
4092 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; for_each_leaf_cfs_rq()
4099 struct cfs_rq *cfs_rq; unthrottle_offline_cfs_rqs() local
4101 for_each_leaf_cfs_rq(rq, cfs_rq) { for_each_leaf_cfs_rq()
4102 if (!cfs_rq->runtime_enabled) for_each_leaf_cfs_rq()
4109 cfs_rq->runtime_remaining = 1; for_each_leaf_cfs_rq()
4114 cfs_rq->runtime_enabled = 0; for_each_leaf_cfs_rq()
4116 if (cfs_rq_throttled(cfs_rq)) for_each_leaf_cfs_rq()
4117 unthrottle_cfs_rq(cfs_rq); for_each_leaf_cfs_rq()
4122 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) cfs_rq_clock_task() argument
4124 return rq_clock_task(rq_of(cfs_rq)); cfs_rq_clock_task()
4127 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} check_cfs_rq_runtime() argument
4128 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } check_enqueue_throttle() argument
4129 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} return_cfs_rq_runtime() argument
4130 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} return_cfs_rq_runtime() argument
4132 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument
4137 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument
4151 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} init_cfs_rq_runtime() argument
4172 struct cfs_rq *cfs_rq = cfs_rq_of(se); hrtick_start_fair() local
4176 if (cfs_rq->nr_running > 1) { hrtick_start_fair()
4177 u64 slice = sched_slice(cfs_rq, se); hrtick_start_fair()
4224 struct cfs_rq *cfs_rq; enqueue_task_fair() local
4230 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
4231 enqueue_entity(cfs_rq, se, flags); for_each_sched_entity()
4234 * end evaluation on encountering a throttled cfs_rq for_each_sched_entity()
4236 * note: in the case of encountering a throttled cfs_rq we will for_each_sched_entity()
4239 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity()
4241 cfs_rq->h_nr_running++; for_each_sched_entity()
4247 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
4248 cfs_rq->h_nr_running++; for_each_sched_entity()
4250 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity()
4253 update_cfs_shares(cfs_rq); for_each_sched_entity()
4273 struct cfs_rq *cfs_rq; dequeue_task_fair() local
4278 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
4279 dequeue_entity(cfs_rq, se, flags); for_each_sched_entity()
4282 * end evaluation on encountering a throttled cfs_rq for_each_sched_entity()
4284 * note: in the case of encountering a throttled cfs_rq we will for_each_sched_entity()
4287 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity()
4289 cfs_rq->h_nr_running--; for_each_sched_entity()
4292 if (cfs_rq->load.weight) { for_each_sched_entity()
4294 * Bias pick_next to pick a task from this cfs_rq, as for_each_sched_entity()
4308 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
4309 cfs_rq->h_nr_running--; for_each_sched_entity()
4311 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity()
4314 update_cfs_shares(cfs_rq); for_each_sched_entity()
4408 struct cfs_rq *cfs_rq = cfs_rq_of(se); task_waking_fair() local
4415 min_vruntime_copy = cfs_rq->min_vruntime_copy; task_waking_fair()
4417 min_vruntime = cfs_rq->min_vruntime; task_waking_fair()
4420 min_vruntime = cfs_rq->min_vruntime; task_waking_fair()
4917 struct cfs_rq *cfs_rq = cfs_rq_of(se); migrate_task_rq_fair() local
4921 * when we next update owning cfs_rq under rq->lock. Tasks contribute migrate_task_rq_fair()
4928 &cfs_rq->removed_load); migrate_task_rq_fair()
5017 struct cfs_rq *cfs_rq = task_cfs_rq(curr); check_preempt_wakeup() local
5018 int scale = cfs_rq->nr_running >= sched_nr_latency; check_preempt_wakeup()
5099 struct cfs_rq *cfs_rq = &rq->cfs; pick_next_task_fair() local
5106 if (!cfs_rq->nr_running) pick_next_task_fair()
5121 struct sched_entity *curr = cfs_rq->curr; pick_next_task_fair()
5125 * have to consider cfs_rq->curr. If it is still a runnable pick_next_task_fair()
5131 update_curr(cfs_rq); pick_next_task_fair()
5141 if (unlikely(check_cfs_rq_runtime(cfs_rq))) pick_next_task_fair()
5145 se = pick_next_entity(cfs_rq, curr); pick_next_task_fair()
5146 cfs_rq = group_cfs_rq(se); pick_next_task_fair()
5147 } while (cfs_rq); pick_next_task_fair()
5159 while (!(cfs_rq = is_same_group(se, pse))) { pick_next_task_fair()
5173 put_prev_entity(cfs_rq, pse); pick_next_task_fair()
5174 set_next_entity(cfs_rq, se); pick_next_task_fair()
5182 cfs_rq = &rq->cfs; pick_next_task_fair()
5185 if (!cfs_rq->nr_running) pick_next_task_fair()
5191 se = pick_next_entity(cfs_rq, NULL); pick_next_task_fair()
5192 set_next_entity(cfs_rq, se); pick_next_task_fair()
5193 cfs_rq = group_cfs_rq(se); pick_next_task_fair()
5194 } while (cfs_rq); pick_next_task_fair()
5225 struct cfs_rq *cfs_rq; put_prev_task_fair() local
5228 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
5229 put_prev_entity(cfs_rq, se); for_each_sched_entity()
5241 struct cfs_rq *cfs_rq = task_cfs_rq(curr); yield_task_fair() local
5250 clear_buddies(cfs_rq, se); yield_task_fair()
5257 update_curr(cfs_rq); yield_task_fair()
5819 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu]; __update_blocked_averages_cpu() local
5822 if (throttled_hierarchy(cfs_rq)) __update_blocked_averages_cpu()
5825 update_cfs_rq_blocked_load(cfs_rq, 1); __update_blocked_averages_cpu()
5838 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running) __update_blocked_averages_cpu()
5839 list_del_leaf_cfs_rq(cfs_rq); __update_blocked_averages_cpu()
5841 struct rq *rq = rq_of(cfs_rq); __update_blocked_averages_cpu()
5849 struct cfs_rq *cfs_rq; update_blocked_averages() local
5858 for_each_leaf_cfs_rq(rq, cfs_rq) { for_each_leaf_cfs_rq()
5864 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu); for_each_leaf_cfs_rq()
5871 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
5875 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) update_cfs_rq_h_load() argument
5877 struct rq *rq = rq_of(cfs_rq); update_cfs_rq_h_load()
5878 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; update_cfs_rq_h_load()
5882 if (cfs_rq->last_h_load_update == now) update_cfs_rq_h_load()
5885 cfs_rq->h_load_next = NULL; for_each_sched_entity()
5887 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
5888 cfs_rq->h_load_next = se; for_each_sched_entity()
5889 if (cfs_rq->last_h_load_update == now) for_each_sched_entity()
5894 cfs_rq->h_load = cfs_rq->runnable_load_avg;
5895 cfs_rq->last_h_load_update = now;
5898 while ((se = cfs_rq->h_load_next) != NULL) {
5899 load = cfs_rq->h_load;
5901 cfs_rq->runnable_load_avg + 1);
5902 cfs_rq = group_cfs_rq(se);
5903 cfs_rq->h_load = load;
5904 cfs_rq->last_h_load_update = now;
5910 struct cfs_rq *cfs_rq = task_cfs_rq(p); task_h_load() local
5912 update_cfs_rq_h_load(cfs_rq); task_h_load()
5913 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load, task_h_load()
5914 cfs_rq->runnable_load_avg + 1); task_h_load()
7816 struct cfs_rq *cfs_rq; task_tick_fair() local
7820 cfs_rq = cfs_rq_of(se); for_each_sched_entity()
7821 entity_tick(cfs_rq, se, queued); for_each_sched_entity()
7837 struct cfs_rq *cfs_rq; task_fork_fair() local
7847 cfs_rq = task_cfs_rq(current); task_fork_fair()
7848 curr = cfs_rq->curr; task_fork_fair()
7852 * been changed after parent->se.parent,cfs_rq were copied to task_fork_fair()
7853 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those task_fork_fair()
7860 update_curr(cfs_rq); task_fork_fair()
7864 place_entity(cfs_rq, se, 1); task_fork_fair()
7875 se->vruntime -= cfs_rq->min_vruntime; task_fork_fair()
7905 struct cfs_rq *cfs_rq = cfs_rq_of(se); switched_from_fair() local
7921 place_entity(cfs_rq, se, 0); switched_from_fair()
7922 se->vruntime -= cfs_rq->min_vruntime; switched_from_fair()
7933 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib); switched_from_fair()
7967 * This routine is mostly called to set cfs_rq->curr field when a task
7975 struct cfs_rq *cfs_rq = cfs_rq_of(se); for_each_sched_entity() local
7977 set_next_entity(cfs_rq, se); for_each_sched_entity()
7978 /* ensure bandwidth has been allocated on our new cfs_rq */ for_each_sched_entity()
7979 account_cfs_rq_runtime(cfs_rq, 0); for_each_sched_entity()
7983 void init_cfs_rq(struct cfs_rq *cfs_rq) init_cfs_rq() argument
7985 cfs_rq->tasks_timeline = RB_ROOT; init_cfs_rq()
7986 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); init_cfs_rq()
7988 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; init_cfs_rq()
7991 atomic64_set(&cfs_rq->decay_counter, 1); init_cfs_rq()
7992 atomic_long_set(&cfs_rq->removed_load, 0); init_cfs_rq()
8000 struct cfs_rq *cfs_rq; task_move_group_fair() local
8024 * To prevent boost or penalty in the new cfs_rq caused by delta task_move_group_fair()
8035 cfs_rq = cfs_rq_of(se); task_move_group_fair()
8036 se->vruntime += cfs_rq->min_vruntime; task_move_group_fair()
8043 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); task_move_group_fair()
8044 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; task_move_group_fair()
8056 if (tg->cfs_rq) for_each_possible_cpu()
8057 kfree(tg->cfs_rq[i]); for_each_possible_cpu()
8062 kfree(tg->cfs_rq);
8068 struct cfs_rq *cfs_rq; alloc_fair_sched_group() local
8072 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); alloc_fair_sched_group()
8073 if (!tg->cfs_rq) alloc_fair_sched_group()
8084 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), for_each_possible_cpu()
8086 if (!cfs_rq) for_each_possible_cpu()
8094 init_cfs_rq(cfs_rq); for_each_possible_cpu()
8095 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); for_each_possible_cpu()
8101 kfree(cfs_rq);
8115 if (!tg->cfs_rq[cpu]->on_list) unregister_fair_sched_group()
8119 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); unregister_fair_sched_group()
8123 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, init_tg_cfs_entry() argument
8129 cfs_rq->tg = tg; init_tg_cfs_entry()
8130 cfs_rq->rq = rq; init_tg_cfs_entry()
8131 init_cfs_rq_runtime(cfs_rq); init_tg_cfs_entry()
8133 tg->cfs_rq[cpu] = cfs_rq; init_tg_cfs_entry()
8141 se->cfs_rq = &rq->cfs; init_tg_cfs_entry()
8144 se->cfs_rq = parent->my_q; init_tg_cfs_entry()
8148 se->my_q = cfs_rq; init_tg_cfs_entry()
8267 struct cfs_rq *cfs_rq; print_cfs_stats() local
8270 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) print_cfs_stats()
8271 print_cfs_rq(m, cpu, cfs_rq); print_cfs_stats()
2644 __update_tg_runnable_avg(struct sched_avg *sa, struct cfs_rq *cfs_rq) __update_tg_runnable_avg() argument
2712 __update_tg_runnable_avg(struct sched_avg *sa, struct cfs_rq *cfs_rq) __update_tg_runnable_avg() argument
H A Ddebug.c173 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) print_cfs_rq() argument
182 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); print_cfs_rq()
187 SPLIT_NS(cfs_rq->exec_clock)); print_cfs_rq()
190 if (cfs_rq->rb_leftmost) print_cfs_rq()
191 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; print_cfs_rq()
192 last = __pick_last_entity(cfs_rq); print_cfs_rq()
195 min_vruntime = cfs_rq->min_vruntime; print_cfs_rq()
211 cfs_rq->nr_spread_over); print_cfs_rq()
212 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); print_cfs_rq()
213 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); print_cfs_rq()
216 cfs_rq->runnable_load_avg); print_cfs_rq()
218 cfs_rq->blocked_load_avg); print_cfs_rq()
220 cfs_rq->utilization_load_avg); print_cfs_rq()
223 cfs_rq->tg_load_contrib); print_cfs_rq()
225 cfs_rq->tg_runnable_contrib); print_cfs_rq()
227 atomic_long_read(&cfs_rq->tg->load_avg)); print_cfs_rq()
229 atomic_read(&cfs_rq->tg->runnable_avg)); print_cfs_rq()
234 cfs_rq->tg->cfs_bandwidth.timer_active); print_cfs_rq()
236 cfs_rq->throttled); print_cfs_rq()
238 cfs_rq->throttle_count); print_cfs_rq()
242 print_cfs_group_stats(m, cpu, cfs_rq->tg); print_cfs_rq()
H A Dsched.h205 struct cfs_rq;
236 struct cfs_rq **cfs_rq; member in struct:task_group
271 * A weight of a cfs_rq is the sum of weights of which entities
272 * are queued on this cfs_rq, so a weight of a entity should not be
302 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
310 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
337 struct cfs_rq { struct
351 * 'curr' points to currently running entity on this cfs_rq.
396 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
403 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
584 struct cfs_rq cfs;
589 /* list of leaf cfs_rq on this cpu: */
667 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
911 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ set_task_rq()
919 p->se.cfs_rq = tg->cfs_rq[cpu]; set_task_rq()
1668 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1669 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1674 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
H A Dcore.c7124 root_task_group.cfs_rq = (struct cfs_rq **)ptr; sched_init()
7537 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8100 * Prevent race between setting of cfs_rq->runtime_enabled and tg_set_cfs_bandwidth()
8130 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; for_each_online_cpu() local
8131 struct rq *rq = cfs_rq->rq; for_each_online_cpu()
8134 cfs_rq->runtime_enabled = runtime_enabled; for_each_online_cpu()
8135 cfs_rq->runtime_remaining = 0; for_each_online_cpu()
8137 if (cfs_rq->throttled) for_each_online_cpu()
8138 unthrottle_cfs_rq(cfs_rq); for_each_online_cpu()
/linux-4.1.27/include/linux/
H A Dsched.h183 struct cfs_rq;
189 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
1201 struct cfs_rq *cfs_rq; member in struct:sched_entity
1203 struct cfs_rq *my_q;

Completed in 594 milliseconds