Lines Matching refs:se

255 #define entity_is_task(se)	(!se->my_q)  argument
257 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
260 WARN_ON_ONCE(!entity_is_task(se)); in task_of()
262 return container_of(se, struct task_struct, se); in task_of()
266 #define for_each_sched_entity(se) \ argument
267 for (; se; se = se->parent)
271 return p->se.cfs_rq; in task_cfs_rq()
275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
277 return se->cfs_rq; in cfs_rq_of()
322 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
324 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
325 return se->cfs_rq; in is_same_group()
330 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
332 return se->parent; in parent_entity()
336 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
348 se_depth = (*se)->depth; in find_matching_se()
353 *se = parent_entity(*se); in find_matching_se()
361 while (!is_same_group(*se, *pse)) { in find_matching_se()
362 *se = parent_entity(*se); in find_matching_se()
369 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
371 return container_of(se, struct task_struct, se); in task_of()
379 #define entity_is_task(se) 1 argument
381 #define for_each_sched_entity(se) \ argument
382 for (; se; se = NULL)
389 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
391 struct task_struct *p = task_of(se); in cfs_rq_of()
414 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
420 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
465 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, in update_min_vruntime() local
470 vruntime = se->vruntime; in update_min_vruntime()
472 vruntime = min_vruntime(vruntime, se->vruntime); in update_min_vruntime()
486 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
503 if (entity_before(se, entry)) { in __enqueue_entity()
516 cfs_rq->rb_leftmost = &se->run_node; in __enqueue_entity()
518 rb_link_node(&se->run_node, parent, link); in __enqueue_entity()
519 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); in __enqueue_entity()
522 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
524 if (cfs_rq->rb_leftmost == &se->run_node) { in __dequeue_entity()
527 next_node = rb_next(&se->run_node); in __dequeue_entity()
531 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
544 static struct sched_entity *__pick_next_entity(struct sched_entity *se) in __pick_next_entity() argument
546 struct rb_node *next = rb_next(&se->run_node); in __pick_next_entity()
596 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
598 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
599 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
626 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
628 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice()
630 for_each_sched_entity(se) { in sched_slice()
634 cfs_rq = cfs_rq_of(se); in sched_slice()
637 if (unlikely(!se->on_rq)) { in sched_slice()
640 update_load_add(&lw, se->load.weight); in sched_slice()
643 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
653 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
655 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
672 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
674 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
683 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
693 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
738 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
742 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
744 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq))); in update_stats_wait_start()
750 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue() argument
756 if (se != cfs_rq->curr) in update_stats_enqueue()
757 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
761 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
763 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, in update_stats_wait_end()
764 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start)); in update_stats_wait_end()
765 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); in update_stats_wait_end()
766 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + in update_stats_wait_end()
767 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); in update_stats_wait_end()
769 if (entity_is_task(se)) { in update_stats_wait_end()
770 trace_sched_stat_wait(task_of(se), in update_stats_wait_end()
771 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start); in update_stats_wait_end()
774 schedstat_set(se->statistics.wait_start, 0); in update_stats_wait_end()
778 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_dequeue() argument
784 if (se != cfs_rq->curr) in update_stats_dequeue()
785 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
792 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
797 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
1705 now = p->se.exec_start; in numa_get_avg_runtime()
1706 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
1712 delta = p->se.avg.load_sum / p->se.load.weight; in numa_get_avg_runtime()
2302 now = curr->se.sum_exec_runtime; in task_tick_numa()
2331 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2333 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2334 if (!parent_entity(se)) in account_entity_enqueue()
2335 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_enqueue()
2337 if (entity_is_task(se)) { in account_entity_enqueue()
2340 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
2341 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
2348 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
2350 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2351 if (!parent_entity(se)) in account_entity_dequeue()
2352 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_dequeue()
2353 if (entity_is_task(se)) { in account_entity_dequeue()
2354 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
2355 list_del_init(&se->group_node); in account_entity_dequeue()
2402 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
2405 if (se->on_rq) { in reweight_entity()
2407 if (cfs_rq->curr == se) in reweight_entity()
2409 account_entity_dequeue(cfs_rq, se); in reweight_entity()
2412 update_load_set(&se->load, weight); in reweight_entity()
2414 if (se->on_rq) in reweight_entity()
2415 account_entity_enqueue(cfs_rq, se); in reweight_entity()
2423 struct sched_entity *se; in update_cfs_shares() local
2427 se = tg->se[cpu_of(rq_of(cfs_rq))]; in update_cfs_shares()
2428 if (!se || throttled_hierarchy(cfs_rq)) in update_cfs_shares()
2431 if (likely(se->load.weight == tg->shares)) in update_cfs_shares()
2436 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_shares()
2716 static inline void update_load_avg(struct sched_entity *se, int update_tg) in update_load_avg() argument
2718 struct cfs_rq *cfs_rq = cfs_rq_of(se); in update_load_avg()
2726 __update_load_avg(now, cpu, &se->avg, in update_load_avg()
2727 se->on_rq * scale_load_down(se->load.weight), in update_load_avg()
2728 cfs_rq->curr == se, NULL); in update_load_avg()
2734 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
2743 if (se->avg.last_update_time) { in attach_entity_load_avg()
2745 &se->avg, 0, 0, NULL); in attach_entity_load_avg()
2754 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
2755 cfs_rq->avg.load_avg += se->avg.load_avg; in attach_entity_load_avg()
2756 cfs_rq->avg.load_sum += se->avg.load_sum; in attach_entity_load_avg()
2757 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
2758 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
2761 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
2764 &se->avg, se->on_rq * scale_load_down(se->load.weight), in detach_entity_load_avg()
2765 cfs_rq->curr == se, NULL); in detach_entity_load_avg()
2767 cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0); in detach_entity_load_avg()
2768 cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0); in detach_entity_load_avg()
2769 cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0); in detach_entity_load_avg()
2770 cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0); in detach_entity_load_avg()
2775 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_entity_load_avg() argument
2777 struct sched_avg *sa = &se->avg; in enqueue_entity_load_avg()
2784 se->on_rq * scale_load_down(se->load.weight), in enqueue_entity_load_avg()
2785 cfs_rq->curr == se, NULL); in enqueue_entity_load_avg()
2794 attach_entity_load_avg(cfs_rq, se); in enqueue_entity_load_avg()
2802 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_entity_load_avg() argument
2804 update_load_avg(se, 1); in dequeue_entity_load_avg()
2807 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); in dequeue_entity_load_avg()
2809 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); in dequeue_entity_load_avg()
2816 void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
2818 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
2833 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL); in remove_entity_load_avg()
2834 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); in remove_entity_load_avg()
2835 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); in remove_entity_load_avg()
2870 static inline void update_load_avg(struct sched_entity *se, int update_tg) {} in update_load_avg() argument
2872 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in enqueue_entity_load_avg() argument
2874 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in dequeue_entity_load_avg() argument
2875 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
2878 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
2880 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
2889 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_sleeper() argument
2894 if (entity_is_task(se)) in enqueue_sleeper()
2895 tsk = task_of(se); in enqueue_sleeper()
2897 if (se->statistics.sleep_start) { in enqueue_sleeper()
2898 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start; in enqueue_sleeper()
2903 if (unlikely(delta > se->statistics.sleep_max)) in enqueue_sleeper()
2904 se->statistics.sleep_max = delta; in enqueue_sleeper()
2906 se->statistics.sleep_start = 0; in enqueue_sleeper()
2907 se->statistics.sum_sleep_runtime += delta; in enqueue_sleeper()
2914 if (se->statistics.block_start) { in enqueue_sleeper()
2915 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start; in enqueue_sleeper()
2920 if (unlikely(delta > se->statistics.block_max)) in enqueue_sleeper()
2921 se->statistics.block_max = delta; in enqueue_sleeper()
2923 se->statistics.block_start = 0; in enqueue_sleeper()
2924 se->statistics.sum_sleep_runtime += delta; in enqueue_sleeper()
2928 se->statistics.iowait_sum += delta; in enqueue_sleeper()
2929 se->statistics.iowait_count++; in enqueue_sleeper()
2951 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
2954 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
2965 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
2976 vruntime += sched_vslice(cfs_rq, se); in place_entity()
2993 se->vruntime = max_vruntime(se->vruntime, vruntime); in place_entity()
2999 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
3006 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3012 enqueue_entity_load_avg(cfs_rq, se); in enqueue_entity()
3013 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
3017 place_entity(cfs_rq, se, 0); in enqueue_entity()
3018 enqueue_sleeper(cfs_rq, se); in enqueue_entity()
3021 update_stats_enqueue(cfs_rq, se); in enqueue_entity()
3022 check_spread(cfs_rq, se); in enqueue_entity()
3023 if (se != cfs_rq->curr) in enqueue_entity()
3024 __enqueue_entity(cfs_rq, se); in enqueue_entity()
3025 se->on_rq = 1; in enqueue_entity()
3033 static void __clear_buddies_last(struct sched_entity *se) in __clear_buddies_last() argument
3035 for_each_sched_entity(se) { in __clear_buddies_last()
3036 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last()
3037 if (cfs_rq->last != se) in __clear_buddies_last()
3044 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
3046 for_each_sched_entity(se) { in __clear_buddies_next()
3047 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
3048 if (cfs_rq->next != se) in __clear_buddies_next()
3055 static void __clear_buddies_skip(struct sched_entity *se) in __clear_buddies_skip() argument
3057 for_each_sched_entity(se) { in __clear_buddies_skip()
3058 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip()
3059 if (cfs_rq->skip != se) in __clear_buddies_skip()
3066 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
3068 if (cfs_rq->last == se) in clear_buddies()
3069 __clear_buddies_last(se); in clear_buddies()
3071 if (cfs_rq->next == se) in clear_buddies()
3072 __clear_buddies_next(se); in clear_buddies()
3074 if (cfs_rq->skip == se) in clear_buddies()
3075 __clear_buddies_skip(se); in clear_buddies()
3081 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
3087 dequeue_entity_load_avg(cfs_rq, se); in dequeue_entity()
3089 update_stats_dequeue(cfs_rq, se); in dequeue_entity()
3092 if (entity_is_task(se)) { in dequeue_entity()
3093 struct task_struct *tsk = task_of(se); in dequeue_entity()
3096 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq)); in dequeue_entity()
3098 se->statistics.block_start = rq_clock(rq_of(cfs_rq)); in dequeue_entity()
3103 clear_buddies(cfs_rq, se); in dequeue_entity()
3105 if (se != cfs_rq->curr) in dequeue_entity()
3106 __dequeue_entity(cfs_rq, se); in dequeue_entity()
3107 se->on_rq = 0; in dequeue_entity()
3108 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
3116 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
3132 struct sched_entity *se; in check_preempt_tick() local
3155 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
3156 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
3166 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
3169 if (se->on_rq) { in set_next_entity()
3175 update_stats_wait_end(cfs_rq, se); in set_next_entity()
3176 __dequeue_entity(cfs_rq, se); in set_next_entity()
3177 update_load_avg(se, 1); in set_next_entity()
3180 update_stats_curr_start(cfs_rq, se); in set_next_entity()
3181 cfs_rq->curr = se; in set_next_entity()
3188 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { in set_next_entity()
3189 se->statistics.slice_max = max(se->statistics.slice_max, in set_next_entity()
3190 se->sum_exec_runtime - se->prev_sum_exec_runtime); in set_next_entity()
3193 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
3197 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3210 struct sched_entity *se; in pick_next_entity() local
3219 se = left; /* ideally we run the leftmost entity */ in pick_next_entity()
3225 if (cfs_rq->skip == se) { in pick_next_entity()
3228 if (se == curr) { in pick_next_entity()
3231 second = __pick_next_entity(se); in pick_next_entity()
3237 se = second; in pick_next_entity()
3244 se = cfs_rq->last; in pick_next_entity()
3250 se = cfs_rq->next; in pick_next_entity()
3252 clear_buddies(cfs_rq, se); in pick_next_entity()
3254 return se; in pick_next_entity()
3559 struct sched_entity *se; in throttle_cfs_rq() local
3563 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
3571 for_each_sched_entity(se) { in throttle_cfs_rq()
3572 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
3574 if (!se->on_rq) in throttle_cfs_rq()
3578 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); in throttle_cfs_rq()
3585 if (!se) in throttle_cfs_rq()
3613 struct sched_entity *se; in unthrottle_cfs_rq() local
3617 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
3635 for_each_sched_entity(se) { in unthrottle_cfs_rq()
3636 if (se->on_rq) in unthrottle_cfs_rq()
3639 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
3641 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
3648 if (!se) in unthrottle_cfs_rq()
4084 struct sched_entity *se = &p->se; in hrtick_start_fair() local
4085 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair()
4090 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
4091 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
4115 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
4138 struct sched_entity *se = &p->se; in enqueue_task_fair() local
4140 for_each_sched_entity(se) { in enqueue_task_fair()
4141 if (se->on_rq) in enqueue_task_fair()
4143 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
4144 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
4159 for_each_sched_entity(se) { in enqueue_task_fair()
4160 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
4166 update_load_avg(se, 1); in enqueue_task_fair()
4170 if (!se) in enqueue_task_fair()
4176 static void set_next_buddy(struct sched_entity *se);
4186 struct sched_entity *se = &p->se; in dequeue_task_fair() local
4189 for_each_sched_entity(se) { in dequeue_task_fair()
4190 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
4191 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
4209 if (task_sleep && parent_entity(se)) in dequeue_task_fair()
4210 set_next_buddy(parent_entity(se)); in dequeue_task_fair()
4213 se = parent_entity(se); in dequeue_task_fair()
4219 for_each_sched_entity(se) { in dequeue_task_fair()
4220 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
4226 update_load_avg(se, 1); in dequeue_task_fair()
4230 if (!se) in dequeue_task_fair()
4501 struct sched_entity *se = &p->se; in task_waking_fair() local
4502 struct cfs_rq *cfs_rq = cfs_rq_of(se); in task_waking_fair()
4517 se->vruntime -= min_vruntime; in task_waking_fair()
4574 struct sched_entity *se = tg->se[cpu]; in effective_load() local
4579 for_each_sched_entity(se) { in effective_load()
4582 tg = se->my_q->tg; in effective_load()
4587 W = wg + calc_tg_weight(tg, se->my_q); in effective_load()
4592 w = cfs_rq_load_avg(se->my_q) + wl; in effective_load()
4613 wl -= se->avg.load_avg; in effective_load()
4683 weight = current->se.avg.load_avg; in wake_affine()
4690 weight = p->se.avg.load_avg; in wake_affine()
4716 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); in wake_affine()
4722 schedstat_inc(p, se.statistics.nr_wakeups_affine); in wake_affine()
5022 remove_entity_load_avg(&p->se); in migrate_task_rq_fair()
5025 p->se.avg.last_update_time = 0; in migrate_task_rq_fair()
5028 p->se.exec_start = 0; in migrate_task_rq_fair()
5033 remove_entity_load_avg(&p->se); in task_dead_fair()
5038 wakeup_gran(struct sched_entity *curr, struct sched_entity *se) in wakeup_gran() argument
5055 return calc_delta_fair(gran, se); in wakeup_gran()
5073 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
5075 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
5080 gran = wakeup_gran(curr, se); in wakeup_preempt_entity()
5087 static void set_last_buddy(struct sched_entity *se) in set_last_buddy() argument
5089 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) in set_last_buddy()
5092 for_each_sched_entity(se) in set_last_buddy()
5093 cfs_rq_of(se)->last = se; in set_last_buddy()
5096 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
5098 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) in set_next_buddy()
5101 for_each_sched_entity(se) in set_next_buddy()
5102 cfs_rq_of(se)->next = se; in set_next_buddy()
5105 static void set_skip_buddy(struct sched_entity *se) in set_skip_buddy() argument
5107 for_each_sched_entity(se) in set_skip_buddy()
5108 cfs_rq_of(se)->skip = se; in set_skip_buddy()
5117 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup() local
5122 if (unlikely(se == pse)) in check_preempt_wakeup()
5164 find_matching_se(&se, &pse); in check_preempt_wakeup()
5165 update_curr(cfs_rq_of(se)); in check_preempt_wakeup()
5167 if (wakeup_preempt_entity(se, pse) == 1) { in check_preempt_wakeup()
5190 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
5193 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) in check_preempt_wakeup()
5194 set_last_buddy(se); in check_preempt_wakeup()
5201 struct sched_entity *se; in pick_next_task_fair() local
5246 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
5247 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
5250 p = task_of(se); in pick_next_task_fair()
5258 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
5260 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
5261 int se_depth = se->depth; in pick_next_task_fair()
5269 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
5270 se = parent_entity(se); in pick_next_task_fair()
5275 set_next_entity(cfs_rq, se); in pick_next_task_fair()
5292 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
5293 set_next_entity(cfs_rq, se); in pick_next_task_fair()
5294 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
5297 p = task_of(se); in pick_next_task_fair()
5333 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
5336 for_each_sched_entity(se) { in put_prev_task_fair()
5337 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
5338 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
5351 struct sched_entity *se = &curr->se; in yield_task_fair() local
5359 clear_buddies(cfs_rq, se); in yield_task_fair()
5375 set_skip_buddy(se); in yield_task_fair()
5380 struct sched_entity *se = &p->se; in yield_to_task_fair() local
5383 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
5387 set_next_buddy(se); in yield_to_task_fair()
5567 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
5568 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
5576 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
5659 schedstat_inc(p, se.statistics.nr_failed_migrations_affine); in can_migrate_task()
5690 schedstat_inc(p, se.statistics.nr_failed_migrations_running); in can_migrate_task()
5708 schedstat_inc(p, se.statistics.nr_forced_migrations); in can_migrate_task()
5713 schedstat_inc(p, se.statistics.nr_failed_migrations_hot); in can_migrate_task()
5741 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
5787 p = list_first_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
5813 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
5837 list_move_tail(&p->se.group_node, tasks); in detach_tasks()
5886 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
5887 list_del_init(&p->se.group_node); in attach_tasks()
5928 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
5936 for_each_sched_entity(se) { in update_cfs_rq_h_load()
5937 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
5938 cfs_rq->h_load_next = se; in update_cfs_rq_h_load()
5943 if (!se) { in update_cfs_rq_h_load()
5948 while ((se = cfs_rq->h_load_next) != NULL) { in update_cfs_rq_h_load()
5950 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
5952 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
5963 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
5981 return p->se.avg.load_avg; in task_h_load()
7878 struct sched_entity *se = &curr->se; in task_tick_fair() local
7880 for_each_sched_entity(se) { in task_tick_fair()
7881 cfs_rq = cfs_rq_of(se); in task_tick_fair()
7882 entity_tick(cfs_rq, se, queued); in task_tick_fair()
7897 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
7922 se->vruntime = curr->vruntime; in task_fork_fair()
7923 place_entity(cfs_rq, se, 1); in task_fork_fair()
7925 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
7930 swap(curr->vruntime, se->vruntime); in task_fork_fair()
7934 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
7963 struct sched_entity *se = &p->se; in vruntime_normalized() local
7982 if (!se->sum_exec_runtime || p->state == TASK_WAKING) in vruntime_normalized()
7990 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
7991 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq()
7998 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
7999 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
8003 detach_entity_load_avg(cfs_rq, se); in detach_task_cfs_rq()
8008 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
8009 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq()
8016 se->depth = se->parent ? se->parent->depth + 1 : 0; in attach_task_cfs_rq()
8020 attach_entity_load_avg(cfs_rq, se); in attach_task_cfs_rq()
8023 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
8055 struct sched_entity *se = &rq->curr->se; in set_curr_task_fair() local
8057 for_each_sched_entity(se) { in set_curr_task_fair()
8058 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_curr_task_fair()
8060 set_next_entity(cfs_rq, se); in set_curr_task_fair()
8087 p->se.avg.last_update_time = 0; in task_move_group_fair()
8101 if (tg->se) { in free_fair_sched_group()
8102 if (tg->se[i]) in free_fair_sched_group()
8103 remove_entity_load_avg(tg->se[i]); in free_fair_sched_group()
8104 kfree(tg->se[i]); in free_fair_sched_group()
8109 kfree(tg->se); in free_fair_sched_group()
8115 struct sched_entity *se; in alloc_fair_sched_group() local
8121 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8122 if (!tg->se) in alloc_fair_sched_group()
8135 se = kzalloc_node(sizeof(struct sched_entity), in alloc_fair_sched_group()
8137 if (!se) in alloc_fair_sched_group()
8141 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
8142 init_entity_runnable_average(se); in alloc_fair_sched_group()
8171 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
8181 tg->se[cpu] = se; in init_tg_cfs_entry()
8184 if (!se) in init_tg_cfs_entry()
8188 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
8189 se->depth = 0; in init_tg_cfs_entry()
8191 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
8192 se->depth = parent->depth + 1; in init_tg_cfs_entry()
8195 se->my_q = cfs_rq; in init_tg_cfs_entry()
8197 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
8198 se->parent = parent; in init_tg_cfs_entry()
8211 if (!tg->se[0]) in sched_group_set_shares()
8223 struct sched_entity *se; in sched_group_set_shares() local
8225 se = tg->se[i]; in sched_group_set_shares()
8231 for_each_sched_entity(se) in sched_group_set_shares()
8232 update_cfs_shares(group_cfs_rq(se)); in sched_group_set_shares()
8256 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
8264 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); in get_rr_interval_fair()