H A D | fair.c | 105 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool 106 * each time a cfs_rq requests quota. 248 /* cpu runqueue to which this cfs_rq is attached */ rq_of() 249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) rq_of() argument 251 return cfs_rq->rq; rq_of() 269 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) task_cfs_rq() 271 return p->se.cfs_rq; task_cfs_rq() 275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) cfs_rq_of() 277 return se->cfs_rq; cfs_rq_of() 281 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) group_cfs_rq() 286 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument 288 if (!cfs_rq->on_list) { list_add_leaf_cfs_rq() 295 if (cfs_rq->tg->parent && list_add_leaf_cfs_rq() 296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { list_add_leaf_cfs_rq() 297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, list_add_leaf_cfs_rq() 298 &rq_of(cfs_rq)->leaf_cfs_rq_list); list_add_leaf_cfs_rq() 300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, list_add_leaf_cfs_rq() 301 &rq_of(cfs_rq)->leaf_cfs_rq_list); list_add_leaf_cfs_rq() 304 cfs_rq->on_list = 1; list_add_leaf_cfs_rq() 308 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument 310 if (cfs_rq->on_list) { list_del_leaf_cfs_rq() 311 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); list_del_leaf_cfs_rq() 312 cfs_rq->on_list = 0; list_del_leaf_cfs_rq() 316 /* Iterate thr' all leaf cfs_rq's on a runqueue */ 317 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ 318 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) 321 static inline struct cfs_rq * is_same_group() 324 if (se->cfs_rq == pse->cfs_rq) is_same_group() 325 return se->cfs_rq; is_same_group() 342 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of find_matching_se() 374 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) rq_of() argument 376 return container_of(cfs_rq, struct rq, cfs); rq_of() 384 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) task_cfs_rq() 389 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) cfs_rq_of() 398 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) group_cfs_rq() 403 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument 407 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument 411 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ 412 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) 427 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); 457 static void update_min_vruntime(struct cfs_rq *cfs_rq) update_min_vruntime() argument 459 u64 vruntime = cfs_rq->min_vruntime; update_min_vruntime() 461 if (cfs_rq->curr) update_min_vruntime() 462 vruntime = cfs_rq->curr->vruntime; update_min_vruntime() 464 if (cfs_rq->rb_leftmost) { update_min_vruntime() 465 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, update_min_vruntime() 469 if (!cfs_rq->curr) update_min_vruntime() 476 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); update_min_vruntime() 479 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; update_min_vruntime() 486 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __enqueue_entity() argument 488 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; __enqueue_entity() 516 cfs_rq->rb_leftmost = &se->run_node; __enqueue_entity() 519 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); __enqueue_entity() 522 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity() argument 524 if (cfs_rq->rb_leftmost == &se->run_node) { __dequeue_entity() 528 cfs_rq->rb_leftmost = next_node; __dequeue_entity() 531 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); __dequeue_entity() 534 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) __pick_first_entity() argument 536 struct rb_node *left = cfs_rq->rb_leftmost; __pick_first_entity() 555 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) __pick_last_entity() argument 557 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); __pick_last_entity() 626 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_slice() argument 628 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); sched_slice() 634 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 635 load = &cfs_rq->load; for_each_sched_entity() 638 lw = cfs_rq->load; for_each_sched_entity() 653 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_vslice() argument 655 return calc_delta_fair(sched_slice(cfs_rq, se), se); sched_vslice() 687 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ init_entity_runnable_average() 690 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq); 691 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq); 701 static void update_curr(struct cfs_rq *cfs_rq) update_curr() argument 703 struct sched_entity *curr = cfs_rq->curr; update_curr() 704 u64 now = rq_clock_task(rq_of(cfs_rq)); update_curr() 720 schedstat_add(cfs_rq, exec_clock, delta_exec); update_curr() 723 update_min_vruntime(cfs_rq); update_curr() 733 account_cfs_rq_runtime(cfs_rq, delta_exec); update_curr() 742 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start() argument 744 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq))); update_stats_wait_start() 750 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue() argument 756 if (se != cfs_rq->curr) update_stats_enqueue() 757 update_stats_wait_start(cfs_rq, se); update_stats_enqueue() 761 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end() argument 764 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start)); update_stats_wait_end() 767 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); update_stats_wait_end() 771 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); update_stats_wait_end() 778 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_dequeue() argument 784 if (se != cfs_rq->curr) update_stats_dequeue() 785 update_stats_wait_end(cfs_rq, se); update_stats_dequeue() 792 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_curr_start() argument 797 se->exec_start = rq_clock_task(rq_of(cfs_rq)); update_stats_curr_start() 2331 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_enqueue() argument 2333 update_load_add(&cfs_rq->load, se->load.weight); account_entity_enqueue() 2335 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); account_entity_enqueue() 2338 struct rq *rq = rq_of(cfs_rq); account_entity_enqueue() 2344 cfs_rq->nr_running++; account_entity_enqueue() 2348 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_dequeue() argument 2350 update_load_sub(&cfs_rq->load, se->load.weight); account_entity_dequeue() 2352 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); account_entity_dequeue() 2354 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); account_entity_dequeue() 2357 cfs_rq->nr_running--; account_entity_dequeue() 2362 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) calc_tg_weight() argument 2372 tg_weight -= cfs_rq->tg_load_avg_contrib; calc_tg_weight() 2373 tg_weight += cfs_rq->load.weight; calc_tg_weight() 2378 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) calc_cfs_shares() argument 2382 tg_weight = calc_tg_weight(tg, cfs_rq); calc_cfs_shares() 2383 load = cfs_rq->load.weight; calc_cfs_shares() 2397 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) calc_cfs_shares() argument 2402 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, reweight_entity() argument 2407 if (cfs_rq->curr == se) reweight_entity() 2408 update_curr(cfs_rq); reweight_entity() 2409 account_entity_dequeue(cfs_rq, se); reweight_entity() 2415 account_entity_enqueue(cfs_rq, se); reweight_entity() 2418 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 2420 static void update_cfs_shares(struct cfs_rq *cfs_rq) update_cfs_shares() argument 2426 tg = cfs_rq->tg; update_cfs_shares() 2427 se = tg->se[cpu_of(rq_of(cfs_rq))]; update_cfs_shares() 2428 if (!se || throttled_hierarchy(cfs_rq)) update_cfs_shares() 2434 shares = calc_cfs_shares(cfs_rq, tg); update_cfs_shares() 2439 static inline void update_cfs_shares(struct cfs_rq *cfs_rq) update_cfs_shares() argument 2561 unsigned long weight, int running, struct cfs_rq *cfs_rq) __update_load_avg() 2607 if (cfs_rq) { __update_load_avg() 2608 cfs_rq->runnable_load_sum += __update_load_avg() 2622 if (cfs_rq) { __update_load_avg() 2623 cfs_rq->runnable_load_sum = __update_load_avg() 2624 decay_load(cfs_rq->runnable_load_sum, periods + 1); __update_load_avg() 2633 if (cfs_rq) __update_load_avg() 2634 cfs_rq->runnable_load_sum += weight * contrib; __update_load_avg() 2644 if (cfs_rq) __update_load_avg() 2645 cfs_rq->runnable_load_sum += weight * scaled_delta; __update_load_avg() 2654 if (cfs_rq) { __update_load_avg() 2655 cfs_rq->runnable_load_avg = __update_load_avg() 2656 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX); __update_load_avg() 2669 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) update_tg_load_avg() argument 2671 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; update_tg_load_avg() 2673 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { update_tg_load_avg() 2674 atomic_long_add(delta, &cfs_rq->tg->load_avg); update_tg_load_avg() 2675 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; update_tg_load_avg() 2680 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} update_tg_load_avg() argument 2683 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); update_tg_load_avg() 2685 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ update_cfs_rq_load_avg() 2686 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) update_cfs_rq_load_avg() argument 2688 struct sched_avg *sa = &cfs_rq->avg; update_cfs_rq_load_avg() 2691 if (atomic_long_read(&cfs_rq->removed_load_avg)) { update_cfs_rq_load_avg() 2692 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); update_cfs_rq_load_avg() 2698 if (atomic_long_read(&cfs_rq->removed_util_avg)) { update_cfs_rq_load_avg() 2699 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); update_cfs_rq_load_avg() 2704 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, update_cfs_rq_load_avg() 2705 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq); update_cfs_rq_load_avg() 2709 cfs_rq->load_last_update_time_copy = sa->last_update_time; update_cfs_rq_load_avg() 2715 /* Update task and its cfs_rq load average */ update_load_avg() 2718 struct cfs_rq *cfs_rq = cfs_rq_of(se); update_load_avg() local 2719 u64 now = cfs_rq_clock_task(cfs_rq); update_load_avg() 2720 int cpu = cpu_of(rq_of(cfs_rq)); update_load_avg() 2728 cfs_rq->curr == se, NULL); update_load_avg() 2730 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg) update_load_avg() 2731 update_tg_load_avg(cfs_rq, 0); update_load_avg() 2734 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument 2744 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), attach_entity_load_avg() 2754 se->avg.last_update_time = cfs_rq->avg.last_update_time; attach_entity_load_avg() 2755 cfs_rq->avg.load_avg += se->avg.load_avg; attach_entity_load_avg() 2756 cfs_rq->avg.load_sum += se->avg.load_sum; attach_entity_load_avg() 2757 cfs_rq->avg.util_avg += se->avg.util_avg; attach_entity_load_avg() 2758 cfs_rq->avg.util_sum += se->avg.util_sum; attach_entity_load_avg() 2761 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument 2763 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), detach_entity_load_avg() 2765 cfs_rq->curr == se, NULL); detach_entity_load_avg() 2767 cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0); detach_entity_load_avg() 2768 cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0); detach_entity_load_avg() 2769 cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0); detach_entity_load_avg() 2770 cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0); detach_entity_load_avg() 2773 /* Add the load generated by se into cfs_rq's load average */ 2775 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_entity_load_avg() argument 2778 u64 now = cfs_rq_clock_task(cfs_rq); enqueue_entity_load_avg() 2783 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, enqueue_entity_load_avg() 2785 cfs_rq->curr == se, NULL); enqueue_entity_load_avg() 2788 decayed = update_cfs_rq_load_avg(now, cfs_rq); enqueue_entity_load_avg() 2790 cfs_rq->runnable_load_avg += sa->load_avg; enqueue_entity_load_avg() 2791 cfs_rq->runnable_load_sum += sa->load_sum; enqueue_entity_load_avg() 2794 attach_entity_load_avg(cfs_rq, se); enqueue_entity_load_avg() 2797 update_tg_load_avg(cfs_rq, 0); enqueue_entity_load_avg() 2800 /* Remove the runnable load generated by se from cfs_rq's runnable load average */ 2802 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_entity_load_avg() argument 2806 cfs_rq->runnable_load_avg = dequeue_entity_load_avg() 2807 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); dequeue_entity_load_avg() 2808 cfs_rq->runnable_load_sum = dequeue_entity_load_avg() 2809 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); dequeue_entity_load_avg() 2813 * Task first catches up with cfs_rq, and then subtract 2814 * itself from the cfs_rq (task must be off the queue now). 2818 struct cfs_rq *cfs_rq = cfs_rq_of(se); remove_entity_load_avg() local 2825 last_update_time_copy = cfs_rq->load_last_update_time_copy; remove_entity_load_avg() 2827 last_update_time = cfs_rq->avg.last_update_time; remove_entity_load_avg() 2830 last_update_time = cfs_rq->avg.last_update_time; remove_entity_load_avg() 2833 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL); remove_entity_load_avg() 2834 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); remove_entity_load_avg() 2835 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); remove_entity_load_avg() 2856 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) cfs_rq_runnable_load_avg() argument 2858 return cfs_rq->runnable_load_avg; cfs_rq_runnable_load_avg() 2861 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) cfs_rq_load_avg() argument 2863 return cfs_rq->avg.load_avg; cfs_rq_load_avg() 2872 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} enqueue_entity_load_avg() argument 2874 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} remove_entity_load_avg() argument 2878 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} attach_entity_load_avg() argument 2880 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} detach_entity_load_avg() argument 2889 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_sleeper() argument 2898 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start; enqueue_sleeper() 2915 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start; enqueue_sleeper() 2951 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) check_spread() argument 2954 s64 d = se->vruntime - cfs_rq->min_vruntime; check_spread() 2960 schedstat_inc(cfs_rq, nr_spread_over); check_spread() 2965 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity() argument 2967 u64 vruntime = cfs_rq->min_vruntime; place_entity() 2976 vruntime += sched_vslice(cfs_rq, se); place_entity() 2996 static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 2999 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity() argument 3006 se->vruntime += cfs_rq->min_vruntime; enqueue_entity() 3011 update_curr(cfs_rq); enqueue_entity() 3012 enqueue_entity_load_avg(cfs_rq, se); enqueue_entity() 3013 account_entity_enqueue(cfs_rq, se); enqueue_entity() 3014 update_cfs_shares(cfs_rq); enqueue_entity() 3017 place_entity(cfs_rq, se, 0); enqueue_entity() 3018 enqueue_sleeper(cfs_rq, se); enqueue_entity() 3021 update_stats_enqueue(cfs_rq, se); enqueue_entity() 3022 check_spread(cfs_rq, se); enqueue_entity() 3023 if (se != cfs_rq->curr) enqueue_entity() 3024 __enqueue_entity(cfs_rq, se); enqueue_entity() 3027 if (cfs_rq->nr_running == 1) { enqueue_entity() 3028 list_add_leaf_cfs_rq(cfs_rq); enqueue_entity() 3029 check_enqueue_throttle(cfs_rq); enqueue_entity() 3036 struct cfs_rq *cfs_rq = cfs_rq_of(se); for_each_sched_entity() local 3037 if (cfs_rq->last != se) for_each_sched_entity() 3040 cfs_rq->last = NULL; for_each_sched_entity() 3047 struct cfs_rq *cfs_rq = cfs_rq_of(se); for_each_sched_entity() local 3048 if (cfs_rq->next != se) for_each_sched_entity() 3051 cfs_rq->next = NULL; for_each_sched_entity() 3058 struct cfs_rq *cfs_rq = cfs_rq_of(se); for_each_sched_entity() local 3059 if (cfs_rq->skip != se) for_each_sched_entity() 3062 cfs_rq->skip = NULL; for_each_sched_entity() 3066 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) clear_buddies() argument 3068 if (cfs_rq->last == se) clear_buddies() 3071 if (cfs_rq->next == se) clear_buddies() 3074 if (cfs_rq->skip == se) clear_buddies() 3078 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 3081 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity() argument 3086 update_curr(cfs_rq); dequeue_entity() 3087 dequeue_entity_load_avg(cfs_rq, se); dequeue_entity() 3089 update_stats_dequeue(cfs_rq, se); dequeue_entity() 3096 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq)); dequeue_entity() 3098 se->statistics.block_start = rq_clock(rq_of(cfs_rq)); dequeue_entity() 3103 clear_buddies(cfs_rq, se); dequeue_entity() 3105 if (se != cfs_rq->curr) dequeue_entity() 3106 __dequeue_entity(cfs_rq, se); dequeue_entity() 3108 account_entity_dequeue(cfs_rq, se); dequeue_entity() 3116 se->vruntime -= cfs_rq->min_vruntime; dequeue_entity() 3119 return_cfs_rq_runtime(cfs_rq); dequeue_entity() 3121 update_min_vruntime(cfs_rq); dequeue_entity() 3122 update_cfs_shares(cfs_rq); dequeue_entity() 3129 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) check_preempt_tick() argument 3135 ideal_runtime = sched_slice(cfs_rq, curr); check_preempt_tick() 3138 resched_curr(rq_of(cfs_rq)); check_preempt_tick() 3143 clear_buddies(cfs_rq, curr); check_preempt_tick() 3155 se = __pick_first_entity(cfs_rq); check_preempt_tick() 3162 resched_curr(rq_of(cfs_rq)); check_preempt_tick() 3166 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity() argument 3175 update_stats_wait_end(cfs_rq, se); set_next_entity() 3176 __dequeue_entity(cfs_rq, se); set_next_entity() 3180 update_stats_curr_start(cfs_rq, se); set_next_entity() 3181 cfs_rq->curr = se; set_next_entity() 3188 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { set_next_entity() 3207 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity() argument 3209 struct sched_entity *left = __pick_first_entity(cfs_rq); pick_next_entity() 3225 if (cfs_rq->skip == se) { pick_next_entity() 3229 second = __pick_first_entity(cfs_rq); pick_next_entity() 3243 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) pick_next_entity() 3244 se = cfs_rq->last; pick_next_entity() 3249 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) pick_next_entity() 3250 se = cfs_rq->next; pick_next_entity() 3252 clear_buddies(cfs_rq, se); pick_next_entity() 3257 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); 3259 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) put_prev_entity() argument 3266 update_curr(cfs_rq); put_prev_entity() 3269 check_cfs_rq_runtime(cfs_rq); put_prev_entity() 3271 check_spread(cfs_rq, prev); put_prev_entity() 3273 update_stats_wait_start(cfs_rq, prev); put_prev_entity() 3275 __enqueue_entity(cfs_rq, prev); put_prev_entity() 3279 cfs_rq->curr = NULL; put_prev_entity() 3283 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) entity_tick() argument 3288 update_curr(cfs_rq); entity_tick() 3294 update_cfs_shares(cfs_rq); entity_tick() 3302 resched_curr(rq_of(cfs_rq)); entity_tick() 3309 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) entity_tick() 3313 if (cfs_rq->nr_running > 1) entity_tick() 3314 check_preempt_tick(cfs_rq, curr); entity_tick() 3389 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ cfs_rq_clock_task() 3390 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) cfs_rq_clock_task() argument 3392 if (unlikely(cfs_rq->throttle_count)) cfs_rq_clock_task() 3393 return cfs_rq->throttled_clock_task; cfs_rq_clock_task() 3395 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; cfs_rq_clock_task() 3399 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) assign_cfs_rq_runtime() argument 3401 struct task_group *tg = cfs_rq->tg; assign_cfs_rq_runtime() 3406 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; assign_cfs_rq_runtime() 3423 cfs_rq->runtime_remaining += amount; assign_cfs_rq_runtime() 3429 if ((s64)(expires - cfs_rq->runtime_expires) > 0) assign_cfs_rq_runtime() 3430 cfs_rq->runtime_expires = expires; assign_cfs_rq_runtime() 3432 return cfs_rq->runtime_remaining > 0; assign_cfs_rq_runtime() 3439 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) expire_cfs_rq_runtime() argument 3441 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); expire_cfs_rq_runtime() 3444 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) expire_cfs_rq_runtime() 3447 if (cfs_rq->runtime_remaining < 0) expire_cfs_rq_runtime() 3461 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { expire_cfs_rq_runtime() 3463 cfs_rq->runtime_expires += TICK_NSEC; expire_cfs_rq_runtime() 3466 cfs_rq->runtime_remaining = 0; expire_cfs_rq_runtime() 3470 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) __account_cfs_rq_runtime() argument 3473 cfs_rq->runtime_remaining -= delta_exec; __account_cfs_rq_runtime() 3474 expire_cfs_rq_runtime(cfs_rq); __account_cfs_rq_runtime() 3476 if (likely(cfs_rq->runtime_remaining > 0)) __account_cfs_rq_runtime() 3483 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) __account_cfs_rq_runtime() 3484 resched_curr(rq_of(cfs_rq)); __account_cfs_rq_runtime() 3488 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) account_cfs_rq_runtime() argument 3490 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) account_cfs_rq_runtime() 3493 __account_cfs_rq_runtime(cfs_rq, delta_exec); account_cfs_rq_runtime() 3496 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument 3498 return cfs_bandwidth_used() && cfs_rq->throttled; cfs_rq_throttled() 3501 /* check whether cfs_rq, or any parent, is throttled */ throttled_hierarchy() 3502 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument 3504 return cfs_bandwidth_used() && cfs_rq->throttle_count; throttled_hierarchy() 3515 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; throttled_lb_pair() 3517 src_cfs_rq = tg->cfs_rq[src_cpu]; throttled_lb_pair() 3518 dest_cfs_rq = tg->cfs_rq[dest_cpu]; throttled_lb_pair() 3528 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_unthrottle_up() local 3530 cfs_rq->throttle_count--; tg_unthrottle_up() 3532 if (!cfs_rq->throttle_count) { tg_unthrottle_up() 3534 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - tg_unthrottle_up() 3535 cfs_rq->throttled_clock_task; tg_unthrottle_up() 3545 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_throttle_down() local 3548 if (!cfs_rq->throttle_count) tg_throttle_down() 3549 cfs_rq->throttled_clock_task = rq_clock_task(rq); tg_throttle_down() 3550 cfs_rq->throttle_count++; tg_throttle_down() 3555 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) throttle_cfs_rq() argument 3557 struct rq *rq = rq_of(cfs_rq); throttle_cfs_rq() 3558 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); throttle_cfs_rq() 3563 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; throttle_cfs_rq() 3567 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); throttle_cfs_rq() 3570 task_delta = cfs_rq->h_nr_running; for_each_sched_entity() 3572 struct cfs_rq *qcfs_rq = cfs_rq_of(se); for_each_sched_entity() 3588 cfs_rq->throttled = 1; 3589 cfs_rq->throttled_clock = rq_clock(rq); 3597 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 3609 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) unthrottle_cfs_rq() argument 3611 struct rq *rq = rq_of(cfs_rq); unthrottle_cfs_rq() 3612 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); unthrottle_cfs_rq() 3617 se = cfs_rq->tg->se[cpu_of(rq)]; unthrottle_cfs_rq() 3619 cfs_rq->throttled = 0; unthrottle_cfs_rq() 3624 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; unthrottle_cfs_rq() 3625 list_del_rcu(&cfs_rq->throttled_list); unthrottle_cfs_rq() 3629 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); unthrottle_cfs_rq() 3631 if (!cfs_rq->load.weight) unthrottle_cfs_rq() 3634 task_delta = cfs_rq->h_nr_running; for_each_sched_entity() 3639 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 3641 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); for_each_sched_entity() 3642 cfs_rq->h_nr_running += task_delta; for_each_sched_entity() 3644 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity() 3659 struct cfs_rq *cfs_rq; distribute_cfs_runtime() local 3664 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, distribute_cfs_runtime() 3666 struct rq *rq = rq_of(cfs_rq); distribute_cfs_runtime() 3669 if (!cfs_rq_throttled(cfs_rq)) distribute_cfs_runtime() 3672 runtime = -cfs_rq->runtime_remaining + 1; distribute_cfs_runtime() 3677 cfs_rq->runtime_remaining += runtime; distribute_cfs_runtime() 3678 cfs_rq->runtime_expires = expires; distribute_cfs_runtime() 3681 if (cfs_rq->runtime_remaining > 0) distribute_cfs_runtime() 3682 unthrottle_cfs_rq(cfs_rq); distribute_cfs_runtime() 3767 /* a cfs_rq won't donate quota below this amount */ 3812 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) __return_cfs_rq_runtime() argument 3814 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); __return_cfs_rq_runtime() 3815 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; __return_cfs_rq_runtime() 3822 cfs_rq->runtime_expires == cfs_b->runtime_expires) { __return_cfs_rq_runtime() 3833 cfs_rq->runtime_remaining -= slack_runtime; __return_cfs_rq_runtime() 3836 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return_cfs_rq_runtime() argument 3841 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) return_cfs_rq_runtime() 3844 __return_cfs_rq_runtime(cfs_rq); return_cfs_rq_runtime() 3885 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) check_enqueue_throttle() argument 3891 if (!cfs_rq->runtime_enabled || cfs_rq->curr) check_enqueue_throttle() 3895 if (cfs_rq_throttled(cfs_rq)) check_enqueue_throttle() 3899 account_cfs_rq_runtime(cfs_rq, 0); check_enqueue_throttle() 3900 if (cfs_rq->runtime_remaining <= 0) check_enqueue_throttle() 3901 throttle_cfs_rq(cfs_rq); check_enqueue_throttle() 3904 /* conditionally throttle active cfs_rq's from put_prev_entity() */ check_cfs_rq_runtime() 3905 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) check_cfs_rq_runtime() argument 3910 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) check_cfs_rq_runtime() 3917 if (cfs_rq_throttled(cfs_rq)) check_cfs_rq_runtime() 3920 throttle_cfs_rq(cfs_rq); check_cfs_rq_runtime() 3970 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) init_cfs_rq_runtime() argument 3972 cfs_rq->runtime_enabled = 0; init_cfs_rq_runtime() 3973 INIT_LIST_HEAD(&cfs_rq->throttled_list); init_cfs_rq_runtime() 3999 struct cfs_rq *cfs_rq; update_runtime_enabled() local 4001 for_each_leaf_cfs_rq(rq, cfs_rq) { for_each_leaf_cfs_rq() 4002 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth; for_each_leaf_cfs_rq() 4005 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; for_each_leaf_cfs_rq() 4012 struct cfs_rq *cfs_rq; unthrottle_offline_cfs_rqs() local 4014 for_each_leaf_cfs_rq(rq, cfs_rq) { for_each_leaf_cfs_rq() 4015 if (!cfs_rq->runtime_enabled) for_each_leaf_cfs_rq() 4022 cfs_rq->runtime_remaining = 1; for_each_leaf_cfs_rq() 4027 cfs_rq->runtime_enabled = 0; for_each_leaf_cfs_rq() 4029 if (cfs_rq_throttled(cfs_rq)) for_each_leaf_cfs_rq() 4030 unthrottle_cfs_rq(cfs_rq); for_each_leaf_cfs_rq() 4035 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) cfs_rq_clock_task() argument 4037 return rq_clock_task(rq_of(cfs_rq)); cfs_rq_clock_task() 4040 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} check_cfs_rq_runtime() argument 4041 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } check_enqueue_throttle() argument 4042 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} return_cfs_rq_runtime() argument 4043 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} return_cfs_rq_runtime() argument 4045 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument 4050 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument 4064 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} init_cfs_rq_runtime() argument 4085 struct cfs_rq *cfs_rq = cfs_rq_of(se); hrtick_start_fair() local 4089 if (cfs_rq->nr_running > 1) { hrtick_start_fair() 4090 u64 slice = sched_slice(cfs_rq, se); hrtick_start_fair() 4137 struct cfs_rq *cfs_rq; enqueue_task_fair() local 4143 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 4144 enqueue_entity(cfs_rq, se, flags); for_each_sched_entity() 4147 * end evaluation on encountering a throttled cfs_rq for_each_sched_entity() 4149 * note: in the case of encountering a throttled cfs_rq we will for_each_sched_entity() 4152 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity() 4154 cfs_rq->h_nr_running++; for_each_sched_entity() 4160 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 4161 cfs_rq->h_nr_running++; for_each_sched_entity() 4163 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity() 4167 update_cfs_shares(cfs_rq); for_each_sched_entity() 4185 struct cfs_rq *cfs_rq; dequeue_task_fair() local 4190 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 4191 dequeue_entity(cfs_rq, se, flags); for_each_sched_entity() 4194 * end evaluation on encountering a throttled cfs_rq for_each_sched_entity() 4196 * note: in the case of encountering a throttled cfs_rq we will for_each_sched_entity() 4199 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity() 4201 cfs_rq->h_nr_running--; for_each_sched_entity() 4204 if (cfs_rq->load.weight) { for_each_sched_entity() 4206 * Bias pick_next to pick a task from this cfs_rq, as for_each_sched_entity() 4220 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 4221 cfs_rq->h_nr_running--; for_each_sched_entity() 4223 if (cfs_rq_throttled(cfs_rq)) for_each_sched_entity() 4227 update_cfs_shares(cfs_rq); for_each_sched_entity() 4502 struct cfs_rq *cfs_rq = cfs_rq_of(se); task_waking_fair() local 4509 min_vruntime_copy = cfs_rq->min_vruntime_copy; task_waking_fair() 4511 min_vruntime = cfs_rq->min_vruntime; task_waking_fair() 4514 min_vruntime = cfs_rq->min_vruntime; task_waking_fair() 4885 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the 4894 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even 5017 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting migrate_task_rq_fair() 5118 struct cfs_rq *cfs_rq = task_cfs_rq(curr); check_preempt_wakeup() local 5119 int scale = cfs_rq->nr_running >= sched_nr_latency; check_preempt_wakeup() 5200 struct cfs_rq *cfs_rq = &rq->cfs; pick_next_task_fair() local 5207 if (!cfs_rq->nr_running) pick_next_task_fair() 5222 struct sched_entity *curr = cfs_rq->curr; pick_next_task_fair() 5226 * have to consider cfs_rq->curr. If it is still a runnable pick_next_task_fair() 5232 update_curr(cfs_rq); pick_next_task_fair() 5242 if (unlikely(check_cfs_rq_runtime(cfs_rq))) pick_next_task_fair() 5246 se = pick_next_entity(cfs_rq, curr); pick_next_task_fair() 5247 cfs_rq = group_cfs_rq(se); pick_next_task_fair() 5248 } while (cfs_rq); pick_next_task_fair() 5260 while (!(cfs_rq = is_same_group(se, pse))) { pick_next_task_fair() 5274 put_prev_entity(cfs_rq, pse); pick_next_task_fair() 5275 set_next_entity(cfs_rq, se); pick_next_task_fair() 5283 cfs_rq = &rq->cfs; pick_next_task_fair() 5286 if (!cfs_rq->nr_running) pick_next_task_fair() 5292 se = pick_next_entity(cfs_rq, NULL); pick_next_task_fair() 5293 set_next_entity(cfs_rq, se); pick_next_task_fair() 5294 cfs_rq = group_cfs_rq(se); pick_next_task_fair() 5295 } while (cfs_rq); pick_next_task_fair() 5334 struct cfs_rq *cfs_rq; put_prev_task_fair() local 5337 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 5338 put_prev_entity(cfs_rq, se); for_each_sched_entity() 5350 struct cfs_rq *cfs_rq = task_cfs_rq(curr); yield_task_fair() local 5359 clear_buddies(cfs_rq, se); yield_task_fair() 5366 update_curr(cfs_rq); yield_task_fair() 5899 struct cfs_rq *cfs_rq; update_blocked_averages() local 5909 for_each_leaf_cfs_rq(rq, cfs_rq) { for_each_leaf_cfs_rq() 5911 if (throttled_hierarchy(cfs_rq)) for_each_leaf_cfs_rq() 5914 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) for_each_leaf_cfs_rq() 5915 update_tg_load_avg(cfs_rq, 0); for_each_leaf_cfs_rq() 5921 * Compute the hierarchical load factor for cfs_rq and all its ascendants. 5925 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) update_cfs_rq_h_load() argument 5927 struct rq *rq = rq_of(cfs_rq); update_cfs_rq_h_load() 5928 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; update_cfs_rq_h_load() 5932 if (cfs_rq->last_h_load_update == now) update_cfs_rq_h_load() 5935 cfs_rq->h_load_next = NULL; for_each_sched_entity() 5937 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 5938 cfs_rq->h_load_next = se; for_each_sched_entity() 5939 if (cfs_rq->last_h_load_update == now) for_each_sched_entity() 5944 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); 5945 cfs_rq->last_h_load_update = now; 5948 while ((se = cfs_rq->h_load_next) != NULL) { 5949 load = cfs_rq->h_load; 5951 cfs_rq_load_avg(cfs_rq) + 1); 5952 cfs_rq = group_cfs_rq(se); 5953 cfs_rq->h_load = load; 5954 cfs_rq->last_h_load_update = now; 5960 struct cfs_rq *cfs_rq = task_cfs_rq(p); task_h_load() local 5962 update_cfs_rq_h_load(cfs_rq); task_h_load() 5963 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, task_h_load() 5964 cfs_rq_load_avg(cfs_rq) + 1); task_h_load() 5970 struct cfs_rq *cfs_rq = &rq->cfs; update_blocked_averages() local 5975 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); update_blocked_averages() local 7877 struct cfs_rq *cfs_rq; task_tick_fair() local 7881 cfs_rq = cfs_rq_of(se); for_each_sched_entity() 7882 entity_tick(cfs_rq, se, queued); for_each_sched_entity() 7896 struct cfs_rq *cfs_rq; task_fork_fair() local 7906 cfs_rq = task_cfs_rq(current); task_fork_fair() 7907 curr = cfs_rq->curr; task_fork_fair() 7911 * been changed after parent->se.parent,cfs_rq were copied to task_fork_fair() 7912 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those task_fork_fair() 7919 update_curr(cfs_rq); task_fork_fair() 7923 place_entity(cfs_rq, se, 1); task_fork_fair() 7934 se->vruntime -= cfs_rq->min_vruntime; task_fork_fair() 7991 struct cfs_rq *cfs_rq = cfs_rq_of(se); detach_task_cfs_rq() local 7998 place_entity(cfs_rq, se, 0); detach_task_cfs_rq() 7999 se->vruntime -= cfs_rq->min_vruntime; detach_task_cfs_rq() 8002 /* Catch up with the cfs_rq and remove our load when we leave */ detach_task_cfs_rq() 8003 detach_entity_load_avg(cfs_rq, se); detach_task_cfs_rq() 8009 struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_task_cfs_rq() local 8019 /* Synchronize task with its cfs_rq */ attach_task_cfs_rq() 8020 attach_entity_load_avg(cfs_rq, se); attach_task_cfs_rq() 8023 se->vruntime += cfs_rq->min_vruntime; attach_task_cfs_rq() 8050 * This routine is mostly called to set cfs_rq->curr field when a task 8058 struct cfs_rq *cfs_rq = cfs_rq_of(se); for_each_sched_entity() local 8060 set_next_entity(cfs_rq, se); for_each_sched_entity() 8061 /* ensure bandwidth has been allocated on our new cfs_rq */ for_each_sched_entity() 8062 account_cfs_rq_runtime(cfs_rq, 0); for_each_sched_entity() 8066 void init_cfs_rq(struct cfs_rq *cfs_rq) init_cfs_rq() argument 8068 cfs_rq->tasks_timeline = RB_ROOT; init_cfs_rq() 8069 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); init_cfs_rq() 8071 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; init_cfs_rq() 8074 atomic_long_set(&cfs_rq->removed_load_avg, 0); init_cfs_rq() 8075 atomic_long_set(&cfs_rq->removed_util_avg, 0); init_cfs_rq() 8086 /* Tell se's cfs_rq has been changed -- migrated */ task_move_group_fair() 8099 if (tg->cfs_rq) for_each_possible_cpu() 8100 kfree(tg->cfs_rq[i]); for_each_possible_cpu() 8108 kfree(tg->cfs_rq); 8114 struct cfs_rq *cfs_rq; alloc_fair_sched_group() local 8118 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); alloc_fair_sched_group() 8119 if (!tg->cfs_rq) alloc_fair_sched_group() 8130 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), for_each_possible_cpu() 8132 if (!cfs_rq) for_each_possible_cpu() 8140 init_cfs_rq(cfs_rq); for_each_possible_cpu() 8141 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); for_each_possible_cpu() 8148 kfree(cfs_rq); 8162 if (!tg->cfs_rq[cpu]->on_list) unregister_fair_sched_group() 8166 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); unregister_fair_sched_group() 8170 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, init_tg_cfs_entry() argument 8176 cfs_rq->tg = tg; init_tg_cfs_entry() 8177 cfs_rq->rq = rq; init_tg_cfs_entry() 8178 init_cfs_rq_runtime(cfs_rq); init_tg_cfs_entry() 8180 tg->cfs_rq[cpu] = cfs_rq; init_tg_cfs_entry() 8188 se->cfs_rq = &rq->cfs; init_tg_cfs_entry() 8191 se->cfs_rq = parent->my_q; init_tg_cfs_entry() 8195 se->my_q = cfs_rq; init_tg_cfs_entry() 8316 struct cfs_rq *cfs_rq; print_cfs_stats() local 8319 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) print_cfs_stats() 8320 print_cfs_rq(m, cpu, cfs_rq); print_cfs_stats() 2560 __update_load_avg(u64 now, int cpu, struct sched_avg *sa, unsigned long weight, int running, struct cfs_rq *cfs_rq) __update_load_avg() argument
|