Lines Matching refs:se

255 #define entity_is_task(se)	(!se->my_q)  argument
257 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
260 WARN_ON_ONCE(!entity_is_task(se)); in task_of()
262 return container_of(se, struct task_struct, se); in task_of()
266 #define for_each_sched_entity(se) \ argument
267 for (; se; se = se->parent)
271 return p->se.cfs_rq; in task_cfs_rq()
275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
277 return se->cfs_rq; in cfs_rq_of()
327 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
329 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
330 return se->cfs_rq; in is_same_group()
335 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
337 return se->parent; in parent_entity()
341 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
353 se_depth = (*se)->depth; in find_matching_se()
358 *se = parent_entity(*se); in find_matching_se()
366 while (!is_same_group(*se, *pse)) { in find_matching_se()
367 *se = parent_entity(*se); in find_matching_se()
374 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
376 return container_of(se, struct task_struct, se); in task_of()
384 #define entity_is_task(se) 1 argument
386 #define for_each_sched_entity(se) \ argument
387 for (; se; se = NULL)
394 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
396 struct task_struct *p = task_of(se); in cfs_rq_of()
419 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
425 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
470 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, in update_min_vruntime() local
475 vruntime = se->vruntime; in update_min_vruntime()
477 vruntime = min_vruntime(vruntime, se->vruntime); in update_min_vruntime()
491 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
508 if (entity_before(se, entry)) { in __enqueue_entity()
521 cfs_rq->rb_leftmost = &se->run_node; in __enqueue_entity()
523 rb_link_node(&se->run_node, parent, link); in __enqueue_entity()
524 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); in __enqueue_entity()
527 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
529 if (cfs_rq->rb_leftmost == &se->run_node) { in __dequeue_entity()
532 next_node = rb_next(&se->run_node); in __dequeue_entity()
536 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
549 static struct sched_entity *__pick_next_entity(struct sched_entity *se) in __pick_next_entity() argument
551 struct rb_node *next = rb_next(&se->run_node); in __pick_next_entity()
601 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
603 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
604 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
636 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
638 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice()
640 for_each_sched_entity(se) { in sched_slice()
644 cfs_rq = cfs_rq_of(se); in sched_slice()
647 if (unlikely(!se->on_rq)) { in sched_slice()
650 update_load_add(&lw, se->load.weight); in sched_slice()
653 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
663 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
665 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
672 static inline void __update_task_entity_contrib(struct sched_entity *se);
673 static inline void __update_task_entity_utilization(struct sched_entity *se);
680 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; in init_task_runnable_average()
681 p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = slice; in init_task_runnable_average()
682 p->se.avg.avg_period = slice; in init_task_runnable_average()
683 __update_task_entity_contrib(&p->se); in init_task_runnable_average()
684 __update_task_entity_utilization(&p->se); in init_task_runnable_average()
732 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
736 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
738 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq))); in update_stats_wait_start()
744 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue() argument
750 if (se != cfs_rq->curr) in update_stats_enqueue()
751 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
755 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
757 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, in update_stats_wait_end()
758 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start)); in update_stats_wait_end()
759 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); in update_stats_wait_end()
760 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + in update_stats_wait_end()
761 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); in update_stats_wait_end()
763 if (entity_is_task(se)) { in update_stats_wait_end()
764 trace_sched_stat_wait(task_of(se), in update_stats_wait_end()
765 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); in update_stats_wait_end()
768 schedstat_set(se->statistics.wait_start, 0); in update_stats_wait_end()
772 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_dequeue() argument
778 if (se != cfs_rq->curr) in update_stats_dequeue()
779 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
786 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
791 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
1683 now = p->se.exec_start; in numa_get_avg_runtime()
1684 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
1690 delta = p->se.avg.runnable_avg_sum; in numa_get_avg_runtime()
1691 *period = p->se.avg.avg_period; in numa_get_avg_runtime()
2261 now = curr->se.sum_exec_runtime; in task_tick_numa()
2290 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2292 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2293 if (!parent_entity(se)) in account_entity_enqueue()
2294 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_enqueue()
2296 if (entity_is_task(se)) { in account_entity_enqueue()
2299 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
2300 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
2307 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
2309 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2310 if (!parent_entity(se)) in account_entity_dequeue()
2311 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_dequeue()
2312 if (entity_is_task(se)) { in account_entity_dequeue()
2313 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
2314 list_del_init(&se->group_node); in account_entity_dequeue()
2361 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
2364 if (se->on_rq) { in reweight_entity()
2366 if (cfs_rq->curr == se) in reweight_entity()
2368 account_entity_dequeue(cfs_rq, se); in reweight_entity()
2371 update_load_set(&se->load, weight); in reweight_entity()
2373 if (se->on_rq) in reweight_entity()
2374 account_entity_enqueue(cfs_rq, se); in reweight_entity()
2382 struct sched_entity *se; in update_cfs_shares() local
2386 se = tg->se[cpu_of(rq_of(cfs_rq))]; in update_cfs_shares()
2387 if (!se || throttled_hierarchy(cfs_rq)) in update_cfs_shares()
2390 if (likely(se->load.weight == tg->shares)) in update_cfs_shares()
2395 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_shares()
2604 static inline u64 __synchronize_entity_decay(struct sched_entity *se) in __synchronize_entity_decay() argument
2606 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __synchronize_entity_decay()
2609 decays -= se->avg.decay_count; in __synchronize_entity_decay()
2610 se->avg.decay_count = 0; in __synchronize_entity_decay()
2614 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays); in __synchronize_entity_decay()
2615 se->avg.utilization_avg_contrib = in __synchronize_entity_decay()
2616 decay_load(se->avg.utilization_avg_contrib, decays); in __synchronize_entity_decay()
2661 static inline void __update_group_entity_contrib(struct sched_entity *se) in __update_group_entity_contrib() argument
2663 struct cfs_rq *cfs_rq = group_cfs_rq(se); in __update_group_entity_contrib()
2670 se->avg.load_avg_contrib = div_u64(contrib, in __update_group_entity_contrib()
2698 se->avg.load_avg_contrib *= runnable_avg; in __update_group_entity_contrib()
2699 se->avg.load_avg_contrib >>= NICE_0_SHIFT; in __update_group_entity_contrib()
2714 static inline void __update_group_entity_contrib(struct sched_entity *se) {} in __update_group_entity_contrib() argument
2718 static inline void __update_task_entity_contrib(struct sched_entity *se) in __update_task_entity_contrib() argument
2723 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight); in __update_task_entity_contrib()
2724 contrib /= (se->avg.avg_period + 1); in __update_task_entity_contrib()
2725 se->avg.load_avg_contrib = scale_load(contrib); in __update_task_entity_contrib()
2729 static long __update_entity_load_avg_contrib(struct sched_entity *se) in __update_entity_load_avg_contrib() argument
2731 long old_contrib = se->avg.load_avg_contrib; in __update_entity_load_avg_contrib()
2733 if (entity_is_task(se)) { in __update_entity_load_avg_contrib()
2734 __update_task_entity_contrib(se); in __update_entity_load_avg_contrib()
2736 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se)); in __update_entity_load_avg_contrib()
2737 __update_group_entity_contrib(se); in __update_entity_load_avg_contrib()
2740 return se->avg.load_avg_contrib - old_contrib; in __update_entity_load_avg_contrib()
2744 static inline void __update_task_entity_utilization(struct sched_entity *se) in __update_task_entity_utilization() argument
2749 contrib = se->avg.running_avg_sum * scale_load_down(SCHED_LOAD_SCALE); in __update_task_entity_utilization()
2750 contrib /= (se->avg.avg_period + 1); in __update_task_entity_utilization()
2751 se->avg.utilization_avg_contrib = scale_load(contrib); in __update_task_entity_utilization()
2754 static long __update_entity_utilization_avg_contrib(struct sched_entity *se) in __update_entity_utilization_avg_contrib() argument
2756 long old_contrib = se->avg.utilization_avg_contrib; in __update_entity_utilization_avg_contrib()
2758 if (entity_is_task(se)) in __update_entity_utilization_avg_contrib()
2759 __update_task_entity_utilization(se); in __update_entity_utilization_avg_contrib()
2761 se->avg.utilization_avg_contrib = in __update_entity_utilization_avg_contrib()
2762 group_cfs_rq(se)->utilization_load_avg; in __update_entity_utilization_avg_contrib()
2764 return se->avg.utilization_avg_contrib - old_contrib; in __update_entity_utilization_avg_contrib()
2779 static inline void update_entity_load_avg(struct sched_entity *se, in update_entity_load_avg() argument
2782 struct cfs_rq *cfs_rq = cfs_rq_of(se); in update_entity_load_avg()
2791 if (entity_is_task(se)) in update_entity_load_avg()
2794 now = cfs_rq_clock_task(group_cfs_rq(se)); in update_entity_load_avg()
2796 if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq, in update_entity_load_avg()
2797 cfs_rq->curr == se)) in update_entity_load_avg()
2800 contrib_delta = __update_entity_load_avg_contrib(se); in update_entity_load_avg()
2801 utilization_delta = __update_entity_utilization_avg_contrib(se); in update_entity_load_avg()
2806 if (se->on_rq) { in update_entity_load_avg()
2845 struct sched_entity *se, in enqueue_entity_load_avg() argument
2857 if (unlikely(se->avg.decay_count <= 0)) { in enqueue_entity_load_avg()
2858 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq)); in enqueue_entity_load_avg()
2859 if (se->avg.decay_count) { in enqueue_entity_load_avg()
2868 se->avg.last_runnable_update -= (-se->avg.decay_count) in enqueue_entity_load_avg()
2870 update_entity_load_avg(se, 0); in enqueue_entity_load_avg()
2872 se->avg.decay_count = 0; in enqueue_entity_load_avg()
2876 __synchronize_entity_decay(se); in enqueue_entity_load_avg()
2881 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib); in enqueue_entity_load_avg()
2882 update_entity_load_avg(se, 0); in enqueue_entity_load_avg()
2885 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib; in enqueue_entity_load_avg()
2886 cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib; in enqueue_entity_load_avg()
2897 struct sched_entity *se, in dequeue_entity_load_avg() argument
2900 update_entity_load_avg(se, 1); in dequeue_entity_load_avg()
2904 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib; in dequeue_entity_load_avg()
2905 cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib; in dequeue_entity_load_avg()
2907 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; in dequeue_entity_load_avg()
2908 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); in dequeue_entity_load_avg()
2936 static inline void update_entity_load_avg(struct sched_entity *se, in update_entity_load_avg() argument
2940 struct sched_entity *se, in enqueue_entity_load_avg() argument
2943 struct sched_entity *se, in dequeue_entity_load_avg() argument
2955 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_sleeper() argument
2960 if (entity_is_task(se)) in enqueue_sleeper()
2961 tsk = task_of(se); in enqueue_sleeper()
2963 if (se->statistics.sleep_start) { in enqueue_sleeper()
2964 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start; in enqueue_sleeper()
2969 if (unlikely(delta > se->statistics.sleep_max)) in enqueue_sleeper()
2970 se->statistics.sleep_max = delta; in enqueue_sleeper()
2972 se->statistics.sleep_start = 0; in enqueue_sleeper()
2973 se->statistics.sum_sleep_runtime += delta; in enqueue_sleeper()
2980 if (se->statistics.block_start) { in enqueue_sleeper()
2981 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start; in enqueue_sleeper()
2986 if (unlikely(delta > se->statistics.block_max)) in enqueue_sleeper()
2987 se->statistics.block_max = delta; in enqueue_sleeper()
2989 se->statistics.block_start = 0; in enqueue_sleeper()
2990 se->statistics.sum_sleep_runtime += delta; in enqueue_sleeper()
2994 se->statistics.iowait_sum += delta; in enqueue_sleeper()
2995 se->statistics.iowait_count++; in enqueue_sleeper()
3017 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
3020 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
3031 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
3042 vruntime += sched_vslice(cfs_rq, se); in place_entity()
3059 se->vruntime = max_vruntime(se->vruntime, vruntime); in place_entity()
3065 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
3072 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3078 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP); in enqueue_entity()
3079 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
3083 place_entity(cfs_rq, se, 0); in enqueue_entity()
3084 enqueue_sleeper(cfs_rq, se); in enqueue_entity()
3087 update_stats_enqueue(cfs_rq, se); in enqueue_entity()
3088 check_spread(cfs_rq, se); in enqueue_entity()
3089 if (se != cfs_rq->curr) in enqueue_entity()
3090 __enqueue_entity(cfs_rq, se); in enqueue_entity()
3091 se->on_rq = 1; in enqueue_entity()
3099 static void __clear_buddies_last(struct sched_entity *se) in __clear_buddies_last() argument
3101 for_each_sched_entity(se) { in __clear_buddies_last()
3102 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last()
3103 if (cfs_rq->last != se) in __clear_buddies_last()
3110 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
3112 for_each_sched_entity(se) { in __clear_buddies_next()
3113 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
3114 if (cfs_rq->next != se) in __clear_buddies_next()
3121 static void __clear_buddies_skip(struct sched_entity *se) in __clear_buddies_skip() argument
3123 for_each_sched_entity(se) { in __clear_buddies_skip()
3124 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip()
3125 if (cfs_rq->skip != se) in __clear_buddies_skip()
3132 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
3134 if (cfs_rq->last == se) in clear_buddies()
3135 __clear_buddies_last(se); in clear_buddies()
3137 if (cfs_rq->next == se) in clear_buddies()
3138 __clear_buddies_next(se); in clear_buddies()
3140 if (cfs_rq->skip == se) in clear_buddies()
3141 __clear_buddies_skip(se); in clear_buddies()
3147 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
3153 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP); in dequeue_entity()
3155 update_stats_dequeue(cfs_rq, se); in dequeue_entity()
3158 if (entity_is_task(se)) { in dequeue_entity()
3159 struct task_struct *tsk = task_of(se); in dequeue_entity()
3162 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq)); in dequeue_entity()
3164 se->statistics.block_start = rq_clock(rq_of(cfs_rq)); in dequeue_entity()
3169 clear_buddies(cfs_rq, se); in dequeue_entity()
3171 if (se != cfs_rq->curr) in dequeue_entity()
3172 __dequeue_entity(cfs_rq, se); in dequeue_entity()
3173 se->on_rq = 0; in dequeue_entity()
3174 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
3182 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
3198 struct sched_entity *se; in check_preempt_tick() local
3221 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
3222 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
3232 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
3235 if (se->on_rq) { in set_next_entity()
3241 update_stats_wait_end(cfs_rq, se); in set_next_entity()
3242 __dequeue_entity(cfs_rq, se); in set_next_entity()
3243 update_entity_load_avg(se, 1); in set_next_entity()
3246 update_stats_curr_start(cfs_rq, se); in set_next_entity()
3247 cfs_rq->curr = se; in set_next_entity()
3254 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { in set_next_entity()
3255 se->statistics.slice_max = max(se->statistics.slice_max, in set_next_entity()
3256 se->sum_exec_runtime - se->prev_sum_exec_runtime); in set_next_entity()
3259 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
3263 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3276 struct sched_entity *se; in pick_next_entity() local
3285 se = left; /* ideally we run the leftmost entity */ in pick_next_entity()
3291 if (cfs_rq->skip == se) { in pick_next_entity()
3294 if (se == curr) { in pick_next_entity()
3297 second = __pick_next_entity(se); in pick_next_entity()
3303 se = second; in pick_next_entity()
3310 se = cfs_rq->last; in pick_next_entity()
3316 se = cfs_rq->next; in pick_next_entity()
3318 clear_buddies(cfs_rq, se); in pick_next_entity()
3320 return se; in pick_next_entity()
3635 struct sched_entity *se; in throttle_cfs_rq() local
3638 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
3646 for_each_sched_entity(se) { in throttle_cfs_rq()
3647 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
3649 if (!se->on_rq) in throttle_cfs_rq()
3653 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); in throttle_cfs_rq()
3660 if (!se) in throttle_cfs_rq()
3680 struct sched_entity *se; in unthrottle_cfs_rq() local
3684 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
3702 for_each_sched_entity(se) { in unthrottle_cfs_rq()
3703 if (se->on_rq) in unthrottle_cfs_rq()
3706 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
3708 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
3715 if (!se) in unthrottle_cfs_rq()
4171 struct sched_entity *se = &p->se; in hrtick_start_fair() local
4172 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair()
4177 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
4178 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
4202 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
4225 struct sched_entity *se = &p->se; in enqueue_task_fair() local
4227 for_each_sched_entity(se) { in enqueue_task_fair()
4228 if (se->on_rq) in enqueue_task_fair()
4230 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
4231 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
4246 for_each_sched_entity(se) { in enqueue_task_fair()
4247 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
4254 update_entity_load_avg(se, 1); in enqueue_task_fair()
4257 if (!se) { in enqueue_task_fair()
4264 static void set_next_buddy(struct sched_entity *se);
4274 struct sched_entity *se = &p->se; in dequeue_task_fair() local
4277 for_each_sched_entity(se) { in dequeue_task_fair()
4278 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
4279 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
4297 if (task_sleep && parent_entity(se)) in dequeue_task_fair()
4298 set_next_buddy(parent_entity(se)); in dequeue_task_fair()
4301 se = parent_entity(se); in dequeue_task_fair()
4307 for_each_sched_entity(se) { in dequeue_task_fair()
4308 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
4315 update_entity_load_avg(se, 1); in dequeue_task_fair()
4318 if (!se) { in dequeue_task_fair()
4407 struct sched_entity *se = &p->se; in task_waking_fair() local
4408 struct cfs_rq *cfs_rq = cfs_rq_of(se); in task_waking_fair()
4423 se->vruntime -= min_vruntime; in task_waking_fair()
4480 struct sched_entity *se = tg->se[cpu]; in effective_load() local
4485 for_each_sched_entity(se) { in effective_load()
4488 tg = se->my_q->tg; in effective_load()
4493 W = wg + calc_tg_weight(tg, se->my_q); in effective_load()
4498 w = se->my_q->load.weight + wl; in effective_load()
4519 wl -= se->load.weight; in effective_load()
4593 weight = current->se.load.weight; in wake_affine()
4600 weight = p->se.load.weight; in wake_affine()
4626 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); in wake_affine()
4632 schedstat_inc(p, se.statistics.nr_wakeups_affine); in wake_affine()
4916 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
4917 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair()
4925 if (se->avg.decay_count) { in migrate_task_rq_fair()
4926 se->avg.decay_count = -__synchronize_entity_decay(se); in migrate_task_rq_fair()
4927 atomic_long_add(se->avg.load_avg_contrib, in migrate_task_rq_fair()
4932 se->exec_start = 0; in migrate_task_rq_fair()
4937 wakeup_gran(struct sched_entity *curr, struct sched_entity *se) in wakeup_gran() argument
4954 return calc_delta_fair(gran, se); in wakeup_gran()
4972 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
4974 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
4979 gran = wakeup_gran(curr, se); in wakeup_preempt_entity()
4986 static void set_last_buddy(struct sched_entity *se) in set_last_buddy() argument
4988 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) in set_last_buddy()
4991 for_each_sched_entity(se) in set_last_buddy()
4992 cfs_rq_of(se)->last = se; in set_last_buddy()
4995 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
4997 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) in set_next_buddy()
5000 for_each_sched_entity(se) in set_next_buddy()
5001 cfs_rq_of(se)->next = se; in set_next_buddy()
5004 static void set_skip_buddy(struct sched_entity *se) in set_skip_buddy() argument
5006 for_each_sched_entity(se) in set_skip_buddy()
5007 cfs_rq_of(se)->skip = se; in set_skip_buddy()
5016 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup() local
5021 if (unlikely(se == pse)) in check_preempt_wakeup()
5063 find_matching_se(&se, &pse); in check_preempt_wakeup()
5064 update_curr(cfs_rq_of(se)); in check_preempt_wakeup()
5066 if (wakeup_preempt_entity(se, pse) == 1) { in check_preempt_wakeup()
5089 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
5092 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) in check_preempt_wakeup()
5093 set_last_buddy(se); in check_preempt_wakeup()
5100 struct sched_entity *se; in pick_next_task_fair() local
5145 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
5146 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
5149 p = task_of(se); in pick_next_task_fair()
5157 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
5159 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
5160 int se_depth = se->depth; in pick_next_task_fair()
5168 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
5169 se = parent_entity(se); in pick_next_task_fair()
5174 set_next_entity(cfs_rq, se); in pick_next_task_fair()
5191 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
5192 set_next_entity(cfs_rq, se); in pick_next_task_fair()
5193 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
5196 p = task_of(se); in pick_next_task_fair()
5224 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
5227 for_each_sched_entity(se) { in put_prev_task_fair()
5228 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
5229 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
5242 struct sched_entity *se = &curr->se; in yield_task_fair() local
5250 clear_buddies(cfs_rq, se); in yield_task_fair()
5266 set_skip_buddy(se); in yield_task_fair()
5271 struct sched_entity *se = &p->se; in yield_to_task_fair() local
5274 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
5278 set_next_buddy(se); in yield_to_task_fair()
5458 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
5459 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
5467 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
5583 schedstat_inc(p, se.statistics.nr_failed_migrations_affine); in can_migrate_task()
5614 schedstat_inc(p, se.statistics.nr_failed_migrations_running); in can_migrate_task()
5632 schedstat_inc(p, se.statistics.nr_forced_migrations); in can_migrate_task()
5637 schedstat_inc(p, se.statistics.nr_failed_migrations_hot); in can_migrate_task()
5665 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
5704 p = list_first_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
5730 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
5754 list_move_tail(&p->se.group_node, tasks); in detach_tasks()
5803 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
5804 list_del_init(&p->se.group_node); in attach_tasks()
5818 struct sched_entity *se = tg->se[cpu]; in __update_blocked_averages_cpu() local
5827 if (se) { in __update_blocked_averages_cpu()
5828 update_entity_load_avg(se, 1); in __update_blocked_averages_cpu()
5838 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running) in __update_blocked_averages_cpu()
5878 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
5886 for_each_sched_entity(se) { in update_cfs_rq_h_load()
5887 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
5888 cfs_rq->h_load_next = se; in update_cfs_rq_h_load()
5893 if (!se) { in update_cfs_rq_h_load()
5898 while ((se = cfs_rq->h_load_next) != NULL) { in update_cfs_rq_h_load()
5900 load = div64_ul(load * se->avg.load_avg_contrib, in update_cfs_rq_h_load()
5902 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
5913 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load, in task_h_load()
5923 return p->se.avg.load_avg_contrib; in task_h_load()
7817 struct sched_entity *se = &curr->se; in task_tick_fair() local
7819 for_each_sched_entity(se) { in task_tick_fair()
7820 cfs_rq = cfs_rq_of(se); in task_tick_fair()
7821 entity_tick(cfs_rq, se, queued); in task_tick_fair()
7838 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
7863 se->vruntime = curr->vruntime; in task_fork_fair()
7864 place_entity(cfs_rq, se, 1); in task_fork_fair()
7866 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
7871 swap(curr->vruntime, se->vruntime); in task_fork_fair()
7875 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
7904 struct sched_entity *se = &p->se; in switched_from_fair() local
7905 struct cfs_rq *cfs_rq = cfs_rq_of(se); in switched_from_fair()
7921 place_entity(cfs_rq, se, 0); in switched_from_fair()
7922 se->vruntime -= cfs_rq->min_vruntime; in switched_from_fair()
7931 if (se->avg.decay_count) { in switched_from_fair()
7932 __synchronize_entity_decay(se); in switched_from_fair()
7933 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib); in switched_from_fair()
7944 struct sched_entity *se = &p->se; in switched_to_fair() local
7949 se->depth = se->parent ? se->parent->depth + 1 : 0; in switched_to_fair()
7972 struct sched_entity *se = &rq->curr->se; in set_curr_task_fair() local
7974 for_each_sched_entity(se) { in set_curr_task_fair()
7975 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_curr_task_fair()
7977 set_next_entity(cfs_rq, se); in set_curr_task_fair()
7999 struct sched_entity *se = &p->se; in task_move_group_fair() local
8027 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING)) in task_move_group_fair()
8031 se->vruntime -= cfs_rq_of(se)->min_vruntime; in task_move_group_fair()
8033 se->depth = se->parent ? se->parent->depth + 1 : 0; in task_move_group_fair()
8035 cfs_rq = cfs_rq_of(se); in task_move_group_fair()
8036 se->vruntime += cfs_rq->min_vruntime; in task_move_group_fair()
8043 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); in task_move_group_fair()
8044 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; in task_move_group_fair()
8058 if (tg->se) in free_fair_sched_group()
8059 kfree(tg->se[i]); in free_fair_sched_group()
8063 kfree(tg->se); in free_fair_sched_group()
8069 struct sched_entity *se; in alloc_fair_sched_group() local
8075 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8076 if (!tg->se) in alloc_fair_sched_group()
8089 se = kzalloc_node(sizeof(struct sched_entity), in alloc_fair_sched_group()
8091 if (!se) in alloc_fair_sched_group()
8095 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
8124 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
8134 tg->se[cpu] = se; in init_tg_cfs_entry()
8137 if (!se) in init_tg_cfs_entry()
8141 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
8142 se->depth = 0; in init_tg_cfs_entry()
8144 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
8145 se->depth = parent->depth + 1; in init_tg_cfs_entry()
8148 se->my_q = cfs_rq; in init_tg_cfs_entry()
8150 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
8151 se->parent = parent; in init_tg_cfs_entry()
8164 if (!tg->se[0]) in sched_group_set_shares()
8176 struct sched_entity *se; in sched_group_set_shares() local
8178 se = tg->se[i]; in sched_group_set_shares()
8184 for_each_sched_entity(se) in sched_group_set_shares()
8185 update_cfs_shares(group_cfs_rq(se)); in sched_group_set_shares()
8209 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
8217 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); in get_rr_interval_fair()