cfs_rq             41 include/linux/sched.h struct cfs_rq;
cfs_rq            465 include/linux/sched.h 	struct cfs_rq			*cfs_rq;
cfs_rq            467 include/linux/sched.h 	struct cfs_rq			*my_q;
cfs_rq           1988 include/linux/sched.h const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
cfs_rq           1989 include/linux/sched.h char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
cfs_rq           1990 include/linux/sched.h int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
cfs_rq            605 include/trace/events/sched.h 	TP_PROTO(struct cfs_rq *cfs_rq),
cfs_rq            606 include/trace/events/sched.h 	TP_ARGS(cfs_rq));
cfs_rq           2691 kernel/sched/core.c 	p->se.cfs_rq			= NULL;
cfs_rq           3529 kernel/sched/core.c 	struct sched_entity *curr = (&p->se)->cfs_rq->curr;
cfs_rq           6576 kernel/sched/core.c 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
cfs_rq           7434 kernel/sched/core.c 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
cfs_rq           7435 kernel/sched/core.c 		struct rq *rq = cfs_rq->rq;
cfs_rq           7439 kernel/sched/core.c 		cfs_rq->runtime_enabled = runtime_enabled;
cfs_rq           7440 kernel/sched/core.c 		cfs_rq->runtime_remaining = 0;
cfs_rq           7442 kernel/sched/core.c 		if (cfs_rq->throttled)
cfs_rq           7443 kernel/sched/core.c 			unthrottle_cfs_rq(cfs_rq);
cfs_rq            483 kernel/sched/debug.c void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq            493 kernel/sched/debug.c 	SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
cfs_rq            499 kernel/sched/debug.c 			SPLIT_NS(cfs_rq->exec_clock));
cfs_rq            502 kernel/sched/debug.c 	if (rb_first_cached(&cfs_rq->tasks_timeline))
cfs_rq            503 kernel/sched/debug.c 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
cfs_rq            504 kernel/sched/debug.c 	last = __pick_last_entity(cfs_rq);
cfs_rq            507 kernel/sched/debug.c 	min_vruntime = cfs_rq->min_vruntime;
cfs_rq            523 kernel/sched/debug.c 			cfs_rq->nr_spread_over);
cfs_rq            524 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
cfs_rq            525 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
cfs_rq            527 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
cfs_rq            529 kernel/sched/debug.c 			cfs_rq->avg.load_avg);
cfs_rq            531 kernel/sched/debug.c 			cfs_rq->avg.runnable_load_avg);
cfs_rq            533 kernel/sched/debug.c 			cfs_rq->avg.util_avg);
cfs_rq            535 kernel/sched/debug.c 			cfs_rq->avg.util_est.enqueued);
cfs_rq            537 kernel/sched/debug.c 			cfs_rq->removed.load_avg);
cfs_rq            539 kernel/sched/debug.c 			cfs_rq->removed.util_avg);
cfs_rq            541 kernel/sched/debug.c 			cfs_rq->removed.runnable_sum);
cfs_rq            544 kernel/sched/debug.c 			cfs_rq->tg_load_avg_contrib);
cfs_rq            546 kernel/sched/debug.c 			atomic_long_read(&cfs_rq->tg->load_avg));
cfs_rq            551 kernel/sched/debug.c 			cfs_rq->throttled);
cfs_rq            553 kernel/sched/debug.c 			cfs_rq->throttle_count);
cfs_rq            557 kernel/sched/debug.c 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
cfs_rq            261 kernel/sched/fair.c static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
cfs_rq            263 kernel/sched/fair.c 	return p->se.cfs_rq;
cfs_rq            267 kernel/sched/fair.c static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
cfs_rq            269 kernel/sched/fair.c 	return se->cfs_rq;
cfs_rq            273 kernel/sched/fair.c static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
cfs_rq            278 kernel/sched/fair.c static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
cfs_rq            283 kernel/sched/fair.c 	if (cfs_rq && task_group_is_autogroup(cfs_rq->tg))
cfs_rq            284 kernel/sched/fair.c 		autogroup_path(cfs_rq->tg, path, len);
cfs_rq            285 kernel/sched/fair.c 	else if (cfs_rq && cfs_rq->tg->css.cgroup)
cfs_rq            286 kernel/sched/fair.c 		cgroup_path(cfs_rq->tg->css.cgroup, path, len);
cfs_rq            291 kernel/sched/fair.c static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq            293 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
cfs_rq            296 kernel/sched/fair.c 	if (cfs_rq->on_list)
cfs_rq            299 kernel/sched/fair.c 	cfs_rq->on_list = 1;
cfs_rq            310 kernel/sched/fair.c 	if (cfs_rq->tg->parent &&
cfs_rq            311 kernel/sched/fair.c 	    cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
cfs_rq            318 kernel/sched/fair.c 		list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
cfs_rq            319 kernel/sched/fair.c 			&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
cfs_rq            329 kernel/sched/fair.c 	if (!cfs_rq->tg->parent) {
cfs_rq            334 kernel/sched/fair.c 		list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
cfs_rq            350 kernel/sched/fair.c 	list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
cfs_rq            355 kernel/sched/fair.c 	rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
cfs_rq            359 kernel/sched/fair.c static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq            361 kernel/sched/fair.c 	if (cfs_rq->on_list) {
cfs_rq            362 kernel/sched/fair.c 		struct rq *rq = rq_of(cfs_rq);
cfs_rq            371 kernel/sched/fair.c 		if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
cfs_rq            372 kernel/sched/fair.c 			rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
cfs_rq            374 kernel/sched/fair.c 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
cfs_rq            375 kernel/sched/fair.c 		cfs_rq->on_list = 0;
cfs_rq            385 kernel/sched/fair.c #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
cfs_rq            386 kernel/sched/fair.c 	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
cfs_rq            390 kernel/sched/fair.c static inline struct cfs_rq *
cfs_rq            393 kernel/sched/fair.c 	if (se->cfs_rq == pse->cfs_rq)
cfs_rq            394 kernel/sched/fair.c 		return se->cfs_rq;
cfs_rq            446 kernel/sched/fair.c static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
cfs_rq            451 kernel/sched/fair.c static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
cfs_rq            460 kernel/sched/fair.c static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
cfs_rq            465 kernel/sched/fair.c static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
cfs_rq            471 kernel/sched/fair.c static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq            476 kernel/sched/fair.c static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq            484 kernel/sched/fair.c #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
cfs_rq            485 kernel/sched/fair.c 		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
cfs_rq            500 kernel/sched/fair.c void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
cfs_rq            530 kernel/sched/fair.c static void update_min_vruntime(struct cfs_rq *cfs_rq)
cfs_rq            532 kernel/sched/fair.c 	struct sched_entity *curr = cfs_rq->curr;
cfs_rq            533 kernel/sched/fair.c 	struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
cfs_rq            535 kernel/sched/fair.c 	u64 vruntime = cfs_rq->min_vruntime;
cfs_rq            555 kernel/sched/fair.c 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
cfs_rq            558 kernel/sched/fair.c 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
cfs_rq            565 kernel/sched/fair.c static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq            567 kernel/sched/fair.c 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
cfs_rq            592 kernel/sched/fair.c 			       &cfs_rq->tasks_timeline, leftmost);
cfs_rq            595 kernel/sched/fair.c static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq            597 kernel/sched/fair.c 	rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
cfs_rq            600 kernel/sched/fair.c struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
cfs_rq            602 kernel/sched/fair.c 	struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
cfs_rq            621 kernel/sched/fair.c struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
cfs_rq            623 kernel/sched/fair.c 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
cfs_rq            692 kernel/sched/fair.c static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq            694 kernel/sched/fair.c 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
cfs_rq            700 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq            701 kernel/sched/fair.c 		load = &cfs_rq->load;
cfs_rq            704 kernel/sched/fair.c 			lw = cfs_rq->load;
cfs_rq            719 kernel/sched/fair.c static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq            721 kernel/sched/fair.c 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
cfs_rq            783 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq            785 kernel/sched/fair.c 	long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
cfs_rq            786 kernel/sched/fair.c 	long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
cfs_rq            789 kernel/sched/fair.c 		if (cfs_rq->avg.util_avg != 0) {
cfs_rq            790 kernel/sched/fair.c 			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
cfs_rq            791 kernel/sched/fair.c 			sa->util_avg /= (cfs_rq->avg.load_avg + 1);
cfs_rq            811 kernel/sched/fair.c 		se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
cfs_rq            825 kernel/sched/fair.c static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
cfs_rq            833 kernel/sched/fair.c static void update_curr(struct cfs_rq *cfs_rq)
cfs_rq            835 kernel/sched/fair.c 	struct sched_entity *curr = cfs_rq->curr;
cfs_rq            836 kernel/sched/fair.c 	u64 now = rq_clock_task(rq_of(cfs_rq));
cfs_rq            852 kernel/sched/fair.c 	schedstat_add(cfs_rq->exec_clock, delta_exec);
cfs_rq            855 kernel/sched/fair.c 	update_min_vruntime(cfs_rq);
cfs_rq            865 kernel/sched/fair.c 	account_cfs_rq_runtime(cfs_rq, delta_exec);
cfs_rq            874 kernel/sched/fair.c update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq            881 kernel/sched/fair.c 	wait_start = rq_clock(rq_of(cfs_rq));
cfs_rq            892 kernel/sched/fair.c update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq            900 kernel/sched/fair.c 	delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
cfs_rq            924 kernel/sched/fair.c update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq            939 kernel/sched/fair.c 		u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
cfs_rq            956 kernel/sched/fair.c 		u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
cfs_rq            995 kernel/sched/fair.c update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
cfs_rq           1004 kernel/sched/fair.c 	if (se != cfs_rq->curr)
cfs_rq           1005 kernel/sched/fair.c 		update_stats_wait_start(cfs_rq, se);
cfs_rq           1008 kernel/sched/fair.c 		update_stats_enqueue_sleeper(cfs_rq, se);
cfs_rq           1012 kernel/sched/fair.c update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
cfs_rq           1022 kernel/sched/fair.c 	if (se != cfs_rq->curr)
cfs_rq           1023 kernel/sched/fair.c 		update_stats_wait_end(cfs_rq, se);
cfs_rq           1030 kernel/sched/fair.c 				      rq_clock(rq_of(cfs_rq)));
cfs_rq           1033 kernel/sched/fair.c 				      rq_clock(rq_of(cfs_rq)));
cfs_rq           1041 kernel/sched/fair.c update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           1046 kernel/sched/fair.c 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
cfs_rq           2757 kernel/sched/fair.c account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           2759 kernel/sched/fair.c 	update_load_add(&cfs_rq->load, se->load.weight);
cfs_rq           2762 kernel/sched/fair.c 		struct rq *rq = rq_of(cfs_rq);
cfs_rq           2768 kernel/sched/fair.c 	cfs_rq->nr_running++;
cfs_rq           2772 kernel/sched/fair.c account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           2774 kernel/sched/fair.c 	update_load_sub(&cfs_rq->load, se->load.weight);
cfs_rq           2777 kernel/sched/fair.c 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
cfs_rq           2781 kernel/sched/fair.c 	cfs_rq->nr_running--;
cfs_rq           2834 kernel/sched/fair.c enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           2836 kernel/sched/fair.c 	cfs_rq->runnable_weight += se->runnable_weight;
cfs_rq           2838 kernel/sched/fair.c 	cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
cfs_rq           2839 kernel/sched/fair.c 	cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
cfs_rq           2843 kernel/sched/fair.c dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           2845 kernel/sched/fair.c 	cfs_rq->runnable_weight -= se->runnable_weight;
cfs_rq           2847 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
cfs_rq           2848 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.runnable_load_sum,
cfs_rq           2853 kernel/sched/fair.c enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           2855 kernel/sched/fair.c 	cfs_rq->avg.load_avg += se->avg.load_avg;
cfs_rq           2856 kernel/sched/fair.c 	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
cfs_rq           2860 kernel/sched/fair.c dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           2862 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
cfs_rq           2863 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
cfs_rq           2867 kernel/sched/fair.c enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
cfs_rq           2869 kernel/sched/fair.c dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
cfs_rq           2871 kernel/sched/fair.c enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
cfs_rq           2873 kernel/sched/fair.c dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
cfs_rq           2876 kernel/sched/fair.c static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
cfs_rq           2881 kernel/sched/fair.c 		if (cfs_rq->curr == se)
cfs_rq           2882 kernel/sched/fair.c 			update_curr(cfs_rq);
cfs_rq           2883 kernel/sched/fair.c 		account_entity_dequeue(cfs_rq, se);
cfs_rq           2884 kernel/sched/fair.c 		dequeue_runnable_load_avg(cfs_rq, se);
cfs_rq           2886 kernel/sched/fair.c 	dequeue_load_avg(cfs_rq, se);
cfs_rq           2901 kernel/sched/fair.c 	enqueue_load_avg(cfs_rq, se);
cfs_rq           2903 kernel/sched/fair.c 		account_entity_enqueue(cfs_rq, se);
cfs_rq           2904 kernel/sched/fair.c 		enqueue_runnable_load_avg(cfs_rq, se);
cfs_rq           2911 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           2915 kernel/sched/fair.c 	reweight_entity(cfs_rq, se, weight, weight);
cfs_rq           2994 kernel/sched/fair.c static long calc_group_shares(struct cfs_rq *cfs_rq)
cfs_rq           2997 kernel/sched/fair.c 	struct task_group *tg = cfs_rq->tg;
cfs_rq           3001 kernel/sched/fair.c 	load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
cfs_rq           3006 kernel/sched/fair.c 	tg_weight -= cfs_rq->tg_load_avg_contrib;
cfs_rq           3055 kernel/sched/fair.c static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
cfs_rq           3059 kernel/sched/fair.c 	load_avg = max(cfs_rq->avg.load_avg,
cfs_rq           3060 kernel/sched/fair.c 		       scale_load_down(cfs_rq->load.weight));
cfs_rq           3062 kernel/sched/fair.c 	runnable = max(cfs_rq->avg.runnable_load_avg,
cfs_rq           3063 kernel/sched/fair.c 		       scale_load_down(cfs_rq->runnable_weight));
cfs_rq           3073 kernel/sched/fair.c static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
cfs_rq           3081 kernel/sched/fair.c 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
cfs_rq           3109 kernel/sched/fair.c static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
cfs_rq           3111 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
cfs_rq           3113 kernel/sched/fair.c 	if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
cfs_rq           3149 kernel/sched/fair.c static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
cfs_rq           3151 kernel/sched/fair.c 	long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
cfs_rq           3156 kernel/sched/fair.c 	if (cfs_rq->tg == &root_task_group)
cfs_rq           3159 kernel/sched/fair.c 	if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
cfs_rq           3160 kernel/sched/fair.c 		atomic_long_add(delta, &cfs_rq->tg->load_avg);
cfs_rq           3161 kernel/sched/fair.c 		cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
cfs_rq           3171 kernel/sched/fair.c 		      struct cfs_rq *prev, struct cfs_rq *next)
cfs_rq           3284 kernel/sched/fair.c update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
cfs_rq           3305 kernel/sched/fair.c 	add_positive(&cfs_rq->avg.util_avg, delta);
cfs_rq           3306 kernel/sched/fair.c 	cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
cfs_rq           3310 kernel/sched/fair.c update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
cfs_rq           3360 kernel/sched/fair.c 	add_positive(&cfs_rq->avg.load_avg, delta_avg);
cfs_rq           3361 kernel/sched/fair.c 	add_positive(&cfs_rq->avg.load_sum, delta_sum);
cfs_rq           3372 kernel/sched/fair.c 		add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
cfs_rq           3373 kernel/sched/fair.c 		add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
cfs_rq           3377 kernel/sched/fair.c static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
cfs_rq           3379 kernel/sched/fair.c 	cfs_rq->propagate = 1;
cfs_rq           3380 kernel/sched/fair.c 	cfs_rq->prop_runnable_sum += runnable_sum;
cfs_rq           3386 kernel/sched/fair.c 	struct cfs_rq *cfs_rq, *gcfs_rq;
cfs_rq           3397 kernel/sched/fair.c 	cfs_rq = cfs_rq_of(se);
cfs_rq           3399 kernel/sched/fair.c 	add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
cfs_rq           3401 kernel/sched/fair.c 	update_tg_cfs_util(cfs_rq, se, gcfs_rq);
cfs_rq           3402 kernel/sched/fair.c 	update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
cfs_rq           3404 kernel/sched/fair.c 	trace_pelt_cfs_tp(cfs_rq);
cfs_rq           3416 kernel/sched/fair.c 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
cfs_rq           3442 kernel/sched/fair.c static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
cfs_rq           3449 kernel/sched/fair.c static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
cfs_rq           3470 kernel/sched/fair.c update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq           3473 kernel/sched/fair.c 	struct sched_avg *sa = &cfs_rq->avg;
cfs_rq           3476 kernel/sched/fair.c 	if (cfs_rq->removed.nr) {
cfs_rq           3480 kernel/sched/fair.c 		raw_spin_lock(&cfs_rq->removed.lock);
cfs_rq           3481 kernel/sched/fair.c 		swap(cfs_rq->removed.util_avg, removed_util);
cfs_rq           3482 kernel/sched/fair.c 		swap(cfs_rq->removed.load_avg, removed_load);
cfs_rq           3483 kernel/sched/fair.c 		swap(cfs_rq->removed.runnable_sum, removed_runnable_sum);
cfs_rq           3484 kernel/sched/fair.c 		cfs_rq->removed.nr = 0;
cfs_rq           3485 kernel/sched/fair.c 		raw_spin_unlock(&cfs_rq->removed.lock);
cfs_rq           3495 kernel/sched/fair.c 		add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);
cfs_rq           3500 kernel/sched/fair.c 	decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
cfs_rq           3504 kernel/sched/fair.c 	cfs_rq->load_last_update_time_copy = sa->last_update_time;
cfs_rq           3519 kernel/sched/fair.c static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
cfs_rq           3521 kernel/sched/fair.c 	u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
cfs_rq           3530 kernel/sched/fair.c 	se->avg.last_update_time = cfs_rq->avg.last_update_time;
cfs_rq           3531 kernel/sched/fair.c 	se->avg.period_contrib = cfs_rq->avg.period_contrib;
cfs_rq           3549 kernel/sched/fair.c 	enqueue_load_avg(cfs_rq, se);
cfs_rq           3550 kernel/sched/fair.c 	cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq           3551 kernel/sched/fair.c 	cfs_rq->avg.util_sum += se->avg.util_sum;
cfs_rq           3553 kernel/sched/fair.c 	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
cfs_rq           3555 kernel/sched/fair.c 	cfs_rq_util_change(cfs_rq, flags);
cfs_rq           3557 kernel/sched/fair.c 	trace_pelt_cfs_tp(cfs_rq);
cfs_rq           3568 kernel/sched/fair.c static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           3570 kernel/sched/fair.c 	dequeue_load_avg(cfs_rq, se);
cfs_rq           3571 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
cfs_rq           3572 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
cfs_rq           3574 kernel/sched/fair.c 	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
cfs_rq           3576 kernel/sched/fair.c 	cfs_rq_util_change(cfs_rq, 0);
cfs_rq           3578 kernel/sched/fair.c 	trace_pelt_cfs_tp(cfs_rq);
cfs_rq           3589 kernel/sched/fair.c static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
cfs_rq           3591 kernel/sched/fair.c 	u64 now = cfs_rq_clock_pelt(cfs_rq);
cfs_rq           3599 kernel/sched/fair.c 		__update_load_avg_se(now, cfs_rq, se);
cfs_rq           3601 kernel/sched/fair.c 	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
cfs_rq           3613 kernel/sched/fair.c 		attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
cfs_rq           3614 kernel/sched/fair.c 		update_tg_load_avg(cfs_rq, 0);
cfs_rq           3617 kernel/sched/fair.c 		cfs_rq_util_change(cfs_rq, 0);
cfs_rq           3620 kernel/sched/fair.c 			update_tg_load_avg(cfs_rq, 0);
cfs_rq           3625 kernel/sched/fair.c static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
cfs_rq           3631 kernel/sched/fair.c 		last_update_time_copy = cfs_rq->load_last_update_time_copy;
cfs_rq           3633 kernel/sched/fair.c 		last_update_time = cfs_rq->avg.last_update_time;
cfs_rq           3639 kernel/sched/fair.c static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
cfs_rq           3641 kernel/sched/fair.c 	return cfs_rq->avg.last_update_time;
cfs_rq           3651 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           3654 kernel/sched/fair.c 	last_update_time = cfs_rq_last_update_time(cfs_rq);
cfs_rq           3664 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           3675 kernel/sched/fair.c 	raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
cfs_rq           3676 kernel/sched/fair.c 	++cfs_rq->removed.nr;
cfs_rq           3677 kernel/sched/fair.c 	cfs_rq->removed.util_avg	+= se->avg.util_avg;
cfs_rq           3678 kernel/sched/fair.c 	cfs_rq->removed.load_avg	+= se->avg.load_avg;
cfs_rq           3679 kernel/sched/fair.c 	cfs_rq->removed.runnable_sum	+= se->avg.load_sum; /* == runnable_sum */
cfs_rq           3680 kernel/sched/fair.c 	raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
cfs_rq           3683 kernel/sched/fair.c static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
cfs_rq           3685 kernel/sched/fair.c 	return cfs_rq->avg.runnable_load_avg;
cfs_rq           3688 kernel/sched/fair.c static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
cfs_rq           3690 kernel/sched/fair.c 	return cfs_rq->avg.load_avg;
cfs_rq           3710 kernel/sched/fair.c static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
cfs_rq           3719 kernel/sched/fair.c 	enqueued  = cfs_rq->avg.util_est.enqueued;
cfs_rq           3721 kernel/sched/fair.c 	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
cfs_rq           3738 kernel/sched/fair.c util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
cfs_rq           3748 kernel/sched/fair.c 	ue.enqueued  = cfs_rq->avg.util_est.enqueued;
cfs_rq           3750 kernel/sched/fair.c 	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
cfs_rq           3780 kernel/sched/fair.c 	cpu = cpu_of(rq_of(cfs_rq));
cfs_rq           3836 kernel/sched/fair.c static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
cfs_rq           3838 kernel/sched/fair.c 	cfs_rq_util_change(cfs_rq, 0);
cfs_rq           3844 kernel/sched/fair.c attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
cfs_rq           3846 kernel/sched/fair.c detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
cfs_rq           3854 kernel/sched/fair.c util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
cfs_rq           3857 kernel/sched/fair.c util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
cfs_rq           3863 kernel/sched/fair.c static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           3866 kernel/sched/fair.c 	s64 d = se->vruntime - cfs_rq->min_vruntime;
cfs_rq           3872 kernel/sched/fair.c 		schedstat_inc(cfs_rq->nr_spread_over);
cfs_rq           3877 kernel/sched/fair.c place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
cfs_rq           3879 kernel/sched/fair.c 	u64 vruntime = cfs_rq->min_vruntime;
cfs_rq           3888 kernel/sched/fair.c 		vruntime += sched_vslice(cfs_rq, se);
cfs_rq           3908 kernel/sched/fair.c static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
cfs_rq           3963 kernel/sched/fair.c enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
cfs_rq           3966 kernel/sched/fair.c 	bool curr = cfs_rq->curr == se;
cfs_rq           3973 kernel/sched/fair.c 		se->vruntime += cfs_rq->min_vruntime;
cfs_rq           3975 kernel/sched/fair.c 	update_curr(cfs_rq);
cfs_rq           3984 kernel/sched/fair.c 		se->vruntime += cfs_rq->min_vruntime;
cfs_rq           3994 kernel/sched/fair.c 	update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
cfs_rq           3996 kernel/sched/fair.c 	enqueue_runnable_load_avg(cfs_rq, se);
cfs_rq           3997 kernel/sched/fair.c 	account_entity_enqueue(cfs_rq, se);
cfs_rq           4000 kernel/sched/fair.c 		place_entity(cfs_rq, se, 0);
cfs_rq           4003 kernel/sched/fair.c 	update_stats_enqueue(cfs_rq, se, flags);
cfs_rq           4004 kernel/sched/fair.c 	check_spread(cfs_rq, se);
cfs_rq           4006 kernel/sched/fair.c 		__enqueue_entity(cfs_rq, se);
cfs_rq           4014 kernel/sched/fair.c 	if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
cfs_rq           4015 kernel/sched/fair.c 		list_add_leaf_cfs_rq(cfs_rq);
cfs_rq           4017 kernel/sched/fair.c 	if (cfs_rq->nr_running == 1)
cfs_rq           4018 kernel/sched/fair.c 		check_enqueue_throttle(cfs_rq);
cfs_rq           4024 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           4025 kernel/sched/fair.c 		if (cfs_rq->last != se)
cfs_rq           4028 kernel/sched/fair.c 		cfs_rq->last = NULL;
cfs_rq           4035 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           4036 kernel/sched/fair.c 		if (cfs_rq->next != se)
cfs_rq           4039 kernel/sched/fair.c 		cfs_rq->next = NULL;
cfs_rq           4046 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           4047 kernel/sched/fair.c 		if (cfs_rq->skip != se)
cfs_rq           4050 kernel/sched/fair.c 		cfs_rq->skip = NULL;
cfs_rq           4054 kernel/sched/fair.c static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           4056 kernel/sched/fair.c 	if (cfs_rq->last == se)
cfs_rq           4059 kernel/sched/fair.c 	if (cfs_rq->next == se)
cfs_rq           4062 kernel/sched/fair.c 	if (cfs_rq->skip == se)
cfs_rq           4066 kernel/sched/fair.c static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
cfs_rq           4069 kernel/sched/fair.c dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
cfs_rq           4074 kernel/sched/fair.c 	update_curr(cfs_rq);
cfs_rq           4084 kernel/sched/fair.c 	update_load_avg(cfs_rq, se, UPDATE_TG);
cfs_rq           4085 kernel/sched/fair.c 	dequeue_runnable_load_avg(cfs_rq, se);
cfs_rq           4087 kernel/sched/fair.c 	update_stats_dequeue(cfs_rq, se, flags);
cfs_rq           4089 kernel/sched/fair.c 	clear_buddies(cfs_rq, se);
cfs_rq           4091 kernel/sched/fair.c 	if (se != cfs_rq->curr)
cfs_rq           4092 kernel/sched/fair.c 		__dequeue_entity(cfs_rq, se);
cfs_rq           4094 kernel/sched/fair.c 	account_entity_dequeue(cfs_rq, se);
cfs_rq           4103 kernel/sched/fair.c 		se->vruntime -= cfs_rq->min_vruntime;
cfs_rq           4106 kernel/sched/fair.c 	return_cfs_rq_runtime(cfs_rq);
cfs_rq           4117 kernel/sched/fair.c 		update_min_vruntime(cfs_rq);
cfs_rq           4124 kernel/sched/fair.c check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
cfs_rq           4130 kernel/sched/fair.c 	ideal_runtime = sched_slice(cfs_rq, curr);
cfs_rq           4133 kernel/sched/fair.c 		resched_curr(rq_of(cfs_rq));
cfs_rq           4138 kernel/sched/fair.c 		clear_buddies(cfs_rq, curr);
cfs_rq           4150 kernel/sched/fair.c 	se = __pick_first_entity(cfs_rq);
cfs_rq           4157 kernel/sched/fair.c 		resched_curr(rq_of(cfs_rq));
cfs_rq           4161 kernel/sched/fair.c set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq           4170 kernel/sched/fair.c 		update_stats_wait_end(cfs_rq, se);
cfs_rq           4171 kernel/sched/fair.c 		__dequeue_entity(cfs_rq, se);
cfs_rq           4172 kernel/sched/fair.c 		update_load_avg(cfs_rq, se, UPDATE_TG);
cfs_rq           4175 kernel/sched/fair.c 	update_stats_curr_start(cfs_rq, se);
cfs_rq           4176 kernel/sched/fair.c 	cfs_rq->curr = se;
cfs_rq           4184 kernel/sched/fair.c 	    rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
cfs_rq           4204 kernel/sched/fair.c pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
cfs_rq           4206 kernel/sched/fair.c 	struct sched_entity *left = __pick_first_entity(cfs_rq);
cfs_rq           4222 kernel/sched/fair.c 	if (cfs_rq->skip == se) {
cfs_rq           4226 kernel/sched/fair.c 			second = __pick_first_entity(cfs_rq);
cfs_rq           4240 kernel/sched/fair.c 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
cfs_rq           4241 kernel/sched/fair.c 		se = cfs_rq->last;
cfs_rq           4246 kernel/sched/fair.c 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
cfs_rq           4247 kernel/sched/fair.c 		se = cfs_rq->next;
cfs_rq           4249 kernel/sched/fair.c 	clear_buddies(cfs_rq, se);
cfs_rq           4254 kernel/sched/fair.c static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
cfs_rq           4256 kernel/sched/fair.c static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
cfs_rq           4263 kernel/sched/fair.c 		update_curr(cfs_rq);
cfs_rq           4266 kernel/sched/fair.c 	check_cfs_rq_runtime(cfs_rq);
cfs_rq           4268 kernel/sched/fair.c 	check_spread(cfs_rq, prev);
cfs_rq           4271 kernel/sched/fair.c 		update_stats_wait_start(cfs_rq, prev);
cfs_rq           4273 kernel/sched/fair.c 		__enqueue_entity(cfs_rq, prev);
cfs_rq           4275 kernel/sched/fair.c 		update_load_avg(cfs_rq, prev, 0);
cfs_rq           4277 kernel/sched/fair.c 	cfs_rq->curr = NULL;
cfs_rq           4281 kernel/sched/fair.c entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
cfs_rq           4286 kernel/sched/fair.c 	update_curr(cfs_rq);
cfs_rq           4291 kernel/sched/fair.c 	update_load_avg(cfs_rq, curr, UPDATE_TG);
cfs_rq           4300 kernel/sched/fair.c 		resched_curr(rq_of(cfs_rq));
cfs_rq           4307 kernel/sched/fair.c 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
cfs_rq           4311 kernel/sched/fair.c 	if (cfs_rq->nr_running > 1)
cfs_rq           4312 kernel/sched/fair.c 		check_preempt_tick(cfs_rq, curr);
cfs_rq           4382 kernel/sched/fair.c static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
cfs_rq           4384 kernel/sched/fair.c 	struct task_group *tg = cfs_rq->tg;
cfs_rq           4389 kernel/sched/fair.c 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
cfs_rq           4405 kernel/sched/fair.c 	cfs_rq->runtime_remaining += amount;
cfs_rq           4407 kernel/sched/fair.c 	return cfs_rq->runtime_remaining > 0;
cfs_rq           4410 kernel/sched/fair.c static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
cfs_rq           4413 kernel/sched/fair.c 	cfs_rq->runtime_remaining -= delta_exec;
cfs_rq           4415 kernel/sched/fair.c 	if (likely(cfs_rq->runtime_remaining > 0))
cfs_rq           4418 kernel/sched/fair.c 	if (cfs_rq->throttled)
cfs_rq           4424 kernel/sched/fair.c 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
cfs_rq           4425 kernel/sched/fair.c 		resched_curr(rq_of(cfs_rq));
cfs_rq           4429 kernel/sched/fair.c void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
cfs_rq           4431 kernel/sched/fair.c 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
cfs_rq           4434 kernel/sched/fair.c 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
cfs_rq           4437 kernel/sched/fair.c static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
cfs_rq           4439 kernel/sched/fair.c 	return cfs_bandwidth_used() && cfs_rq->throttled;
cfs_rq           4443 kernel/sched/fair.c static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
cfs_rq           4445 kernel/sched/fair.c 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
cfs_rq           4456 kernel/sched/fair.c 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
cfs_rq           4458 kernel/sched/fair.c 	src_cfs_rq = tg->cfs_rq[src_cpu];
cfs_rq           4459 kernel/sched/fair.c 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
cfs_rq           4468 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cfs_rq           4470 kernel/sched/fair.c 	cfs_rq->throttle_count--;
cfs_rq           4471 kernel/sched/fair.c 	if (!cfs_rq->throttle_count) {
cfs_rq           4472 kernel/sched/fair.c 		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
cfs_rq           4473 kernel/sched/fair.c 					     cfs_rq->throttled_clock_task;
cfs_rq           4476 kernel/sched/fair.c 		if (cfs_rq->nr_running >= 1)
cfs_rq           4477 kernel/sched/fair.c 			list_add_leaf_cfs_rq(cfs_rq);
cfs_rq           4486 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cfs_rq           4489 kernel/sched/fair.c 	if (!cfs_rq->throttle_count) {
cfs_rq           4490 kernel/sched/fair.c 		cfs_rq->throttled_clock_task = rq_clock_task(rq);
cfs_rq           4491 kernel/sched/fair.c 		list_del_leaf_cfs_rq(cfs_rq);
cfs_rq           4493 kernel/sched/fair.c 	cfs_rq->throttle_count++;
cfs_rq           4498 kernel/sched/fair.c static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq           4500 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
cfs_rq           4501 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
cfs_rq           4506 kernel/sched/fair.c 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
cfs_rq           4510 kernel/sched/fair.c 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
cfs_rq           4513 kernel/sched/fair.c 	task_delta = cfs_rq->h_nr_running;
cfs_rq           4514 kernel/sched/fair.c 	idle_task_delta = cfs_rq->idle_h_nr_running;
cfs_rq           4516 kernel/sched/fair.c 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
cfs_rq           4533 kernel/sched/fair.c 	cfs_rq->throttled = 1;
cfs_rq           4534 kernel/sched/fair.c 	cfs_rq->throttled_clock = rq_clock(rq);
cfs_rq           4544 kernel/sched/fair.c 		list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
cfs_rq           4546 kernel/sched/fair.c 		list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
cfs_rq           4558 kernel/sched/fair.c void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq           4560 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
cfs_rq           4561 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
cfs_rq           4566 kernel/sched/fair.c 	se = cfs_rq->tg->se[cpu_of(rq)];
cfs_rq           4568 kernel/sched/fair.c 	cfs_rq->throttled = 0;
cfs_rq           4573 kernel/sched/fair.c 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
cfs_rq           4574 kernel/sched/fair.c 	list_del_rcu(&cfs_rq->throttled_list);
cfs_rq           4578 kernel/sched/fair.c 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
cfs_rq           4580 kernel/sched/fair.c 	if (!cfs_rq->load.weight)
cfs_rq           4583 kernel/sched/fair.c 	task_delta = cfs_rq->h_nr_running;
cfs_rq           4584 kernel/sched/fair.c 	idle_task_delta = cfs_rq->idle_h_nr_running;
cfs_rq           4589 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           4591 kernel/sched/fair.c 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq           4592 kernel/sched/fair.c 		cfs_rq->h_nr_running += task_delta;
cfs_rq           4593 kernel/sched/fair.c 		cfs_rq->idle_h_nr_running += idle_task_delta;
cfs_rq           4595 kernel/sched/fair.c 		if (cfs_rq_throttled(cfs_rq))
cfs_rq           4608 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           4610 kernel/sched/fair.c 		list_add_leaf_cfs_rq(cfs_rq);
cfs_rq           4622 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           4627 kernel/sched/fair.c 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
cfs_rq           4629 kernel/sched/fair.c 		struct rq *rq = rq_of(cfs_rq);
cfs_rq           4633 kernel/sched/fair.c 		if (!cfs_rq_throttled(cfs_rq))
cfs_rq           4637 kernel/sched/fair.c 		SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
cfs_rq           4639 kernel/sched/fair.c 		runtime = -cfs_rq->runtime_remaining + 1;
cfs_rq           4644 kernel/sched/fair.c 		cfs_rq->runtime_remaining += runtime;
cfs_rq           4647 kernel/sched/fair.c 		if (cfs_rq->runtime_remaining > 0)
cfs_rq           4648 kernel/sched/fair.c 			unthrottle_cfs_rq(cfs_rq);
cfs_rq           4782 kernel/sched/fair.c static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
cfs_rq           4784 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
cfs_rq           4785 kernel/sched/fair.c 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
cfs_rq           4802 kernel/sched/fair.c 	cfs_rq->runtime_remaining -= slack_runtime;
cfs_rq           4805 kernel/sched/fair.c static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
cfs_rq           4810 kernel/sched/fair.c 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
cfs_rq           4813 kernel/sched/fair.c 	__return_cfs_rq_runtime(cfs_rq);
cfs_rq           4862 kernel/sched/fair.c static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
cfs_rq           4868 kernel/sched/fair.c 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
cfs_rq           4872 kernel/sched/fair.c 	if (cfs_rq_throttled(cfs_rq))
cfs_rq           4876 kernel/sched/fair.c 	account_cfs_rq_runtime(cfs_rq, 0);
cfs_rq           4877 kernel/sched/fair.c 	if (cfs_rq->runtime_remaining <= 0)
cfs_rq           4878 kernel/sched/fair.c 		throttle_cfs_rq(cfs_rq);
cfs_rq           4883 kernel/sched/fair.c 	struct cfs_rq *pcfs_rq, *cfs_rq;
cfs_rq           4891 kernel/sched/fair.c 	cfs_rq = tg->cfs_rq[cpu];
cfs_rq           4892 kernel/sched/fair.c 	pcfs_rq = tg->parent->cfs_rq[cpu];
cfs_rq           4894 kernel/sched/fair.c 	cfs_rq->throttle_count = pcfs_rq->throttle_count;
cfs_rq           4895 kernel/sched/fair.c 	cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
cfs_rq           4899 kernel/sched/fair.c static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
cfs_rq           4904 kernel/sched/fair.c 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
cfs_rq           4911 kernel/sched/fair.c 	if (cfs_rq_throttled(cfs_rq))
cfs_rq           4914 kernel/sched/fair.c 	throttle_cfs_rq(cfs_rq);
cfs_rq           5000 kernel/sched/fair.c static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
cfs_rq           5002 kernel/sched/fair.c 	cfs_rq->runtime_enabled = 0;
cfs_rq           5003 kernel/sched/fair.c 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
cfs_rq           5045 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cfs_rq           5048 kernel/sched/fair.c 		cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
cfs_rq           5063 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cfs_rq           5065 kernel/sched/fair.c 		if (!cfs_rq->runtime_enabled)
cfs_rq           5072 kernel/sched/fair.c 		cfs_rq->runtime_remaining = 1;
cfs_rq           5077 kernel/sched/fair.c 		cfs_rq->runtime_enabled = 0;
cfs_rq           5079 kernel/sched/fair.c 		if (cfs_rq_throttled(cfs_rq))
cfs_rq           5080 kernel/sched/fair.c 			unthrottle_cfs_rq(cfs_rq);
cfs_rq           5092 kernel/sched/fair.c static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
cfs_rq           5093 kernel/sched/fair.c static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
cfs_rq           5094 kernel/sched/fair.c static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
cfs_rq           5096 kernel/sched/fair.c static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
cfs_rq           5098 kernel/sched/fair.c static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
cfs_rq           5103 kernel/sched/fair.c static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
cfs_rq           5117 kernel/sched/fair.c static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
cfs_rq           5138 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           5143 kernel/sched/fair.c 		u64 slice = sched_slice(cfs_rq, se);
cfs_rq           5209 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           5232 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           5233 kernel/sched/fair.c 		enqueue_entity(cfs_rq, se, flags);
cfs_rq           5235 kernel/sched/fair.c 		cfs_rq->h_nr_running++;
cfs_rq           5236 kernel/sched/fair.c 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
cfs_rq           5239 kernel/sched/fair.c 		if (cfs_rq_throttled(cfs_rq))
cfs_rq           5246 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           5248 kernel/sched/fair.c 		update_load_avg(cfs_rq, se, UPDATE_TG);
cfs_rq           5251 kernel/sched/fair.c 		cfs_rq->h_nr_running++;
cfs_rq           5252 kernel/sched/fair.c 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
cfs_rq           5255 kernel/sched/fair.c 		if (cfs_rq_throttled(cfs_rq))
cfs_rq           5262 kernel/sched/fair.c                if (throttled_hierarchy(cfs_rq))
cfs_rq           5263 kernel/sched/fair.c                        list_add_leaf_cfs_rq(cfs_rq);
cfs_rq           5296 kernel/sched/fair.c 			cfs_rq = cfs_rq_of(se);
cfs_rq           5298 kernel/sched/fair.c 			if (list_add_leaf_cfs_rq(cfs_rq))
cfs_rq           5317 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           5323 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           5324 kernel/sched/fair.c 		dequeue_entity(cfs_rq, se, flags);
cfs_rq           5326 kernel/sched/fair.c 		cfs_rq->h_nr_running--;
cfs_rq           5327 kernel/sched/fair.c 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
cfs_rq           5330 kernel/sched/fair.c 		if (cfs_rq_throttled(cfs_rq))
cfs_rq           5334 kernel/sched/fair.c 		if (cfs_rq->load.weight) {
cfs_rq           5341 kernel/sched/fair.c 			if (task_sleep && se && !throttled_hierarchy(cfs_rq))
cfs_rq           5349 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           5351 kernel/sched/fair.c 		update_load_avg(cfs_rq, se, UPDATE_TG);
cfs_rq           5354 kernel/sched/fair.c 		cfs_rq->h_nr_running--;
cfs_rq           5355 kernel/sched/fair.c 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
cfs_rq           5358 kernel/sched/fair.c 		if (cfs_rq_throttled(cfs_rq))
cfs_rq           6103 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           6106 kernel/sched/fair.c 	cfs_rq = &cpu_rq(cpu)->cfs;
cfs_rq           6107 kernel/sched/fair.c 	util = READ_ONCE(cfs_rq->avg.util_avg);
cfs_rq           6110 kernel/sched/fair.c 		util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
cfs_rq           6130 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           6137 kernel/sched/fair.c 	cfs_rq = &cpu_rq(cpu)->cfs;
cfs_rq           6138 kernel/sched/fair.c 	util = READ_ONCE(cfs_rq->avg.util_avg);
cfs_rq           6171 kernel/sched/fair.c 			READ_ONCE(cfs_rq->avg.util_est.enqueued);
cfs_rq           6237 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
cfs_rq           6238 kernel/sched/fair.c 	unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
cfs_rq           6252 kernel/sched/fair.c 		util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
cfs_rq           6548 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           6555 kernel/sched/fair.c 			min_vruntime_copy = cfs_rq->min_vruntime_copy;
cfs_rq           6557 kernel/sched/fair.c 			min_vruntime = cfs_rq->min_vruntime;
cfs_rq           6560 kernel/sched/fair.c 		min_vruntime = cfs_rq->min_vruntime;
cfs_rq           6696 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
cfs_rq           6697 kernel/sched/fair.c 	int scale = cfs_rq->nr_running >= sched_nr_latency;
cfs_rq           6778 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = &rq->cfs;
cfs_rq           6800 kernel/sched/fair.c 		struct sched_entity *curr = cfs_rq->curr;
cfs_rq           6810 kernel/sched/fair.c 				update_curr(cfs_rq);
cfs_rq           6820 kernel/sched/fair.c 			if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
cfs_rq           6821 kernel/sched/fair.c 				cfs_rq = &rq->cfs;
cfs_rq           6823 kernel/sched/fair.c 				if (!cfs_rq->nr_running)
cfs_rq           6830 kernel/sched/fair.c 		se = pick_next_entity(cfs_rq, curr);
cfs_rq           6831 kernel/sched/fair.c 		cfs_rq = group_cfs_rq(se);
cfs_rq           6832 kernel/sched/fair.c 	} while (cfs_rq);
cfs_rq           6844 kernel/sched/fair.c 		while (!(cfs_rq = is_same_group(se, pse))) {
cfs_rq           6858 kernel/sched/fair.c 		put_prev_entity(cfs_rq, pse);
cfs_rq           6859 kernel/sched/fair.c 		set_next_entity(cfs_rq, se);
cfs_rq           6869 kernel/sched/fair.c 		se = pick_next_entity(cfs_rq, NULL);
cfs_rq           6870 kernel/sched/fair.c 		set_next_entity(cfs_rq, se);
cfs_rq           6871 kernel/sched/fair.c 		cfs_rq = group_cfs_rq(se);
cfs_rq           6872 kernel/sched/fair.c 	} while (cfs_rq);
cfs_rq           6925 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           6928 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           6929 kernel/sched/fair.c 		put_prev_entity(cfs_rq, se);
cfs_rq           6941 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
cfs_rq           6950 kernel/sched/fair.c 	clear_buddies(cfs_rq, se);
cfs_rq           6957 kernel/sched/fair.c 		update_curr(cfs_rq);
cfs_rq           7506 kernel/sched/fair.c static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
cfs_rq           7508 kernel/sched/fair.c 	if (cfs_rq->avg.load_avg)
cfs_rq           7511 kernel/sched/fair.c 	if (cfs_rq->avg.util_avg)
cfs_rq           7541 kernel/sched/fair.c static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
cfs_rq           7570 kernel/sched/fair.c static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
cfs_rq           7572 kernel/sched/fair.c 	if (cfs_rq->load.weight)
cfs_rq           7575 kernel/sched/fair.c 	if (cfs_rq->avg.load_sum)
cfs_rq           7578 kernel/sched/fair.c 	if (cfs_rq->avg.util_sum)
cfs_rq           7581 kernel/sched/fair.c 	if (cfs_rq->avg.runnable_load_sum)
cfs_rq           7589 kernel/sched/fair.c 	struct cfs_rq *cfs_rq, *pos;
cfs_rq           7597 kernel/sched/fair.c 	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
cfs_rq           7600 kernel/sched/fair.c 		if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
cfs_rq           7601 kernel/sched/fair.c 			update_tg_load_avg(cfs_rq, 0);
cfs_rq           7603 kernel/sched/fair.c 			if (cfs_rq == &rq->cfs)
cfs_rq           7608 kernel/sched/fair.c 		se = cfs_rq->tg->se[cpu];
cfs_rq           7616 kernel/sched/fair.c 		if (cfs_rq_is_decayed(cfs_rq))
cfs_rq           7617 kernel/sched/fair.c 			list_del_leaf_cfs_rq(cfs_rq);
cfs_rq           7620 kernel/sched/fair.c 		if (cfs_rq_has_blocked(cfs_rq))
cfs_rq           7632 kernel/sched/fair.c static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
cfs_rq           7634 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
cfs_rq           7635 kernel/sched/fair.c 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
cfs_rq           7639 kernel/sched/fair.c 	if (cfs_rq->last_h_load_update == now)
cfs_rq           7642 kernel/sched/fair.c 	WRITE_ONCE(cfs_rq->h_load_next, NULL);
cfs_rq           7644 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           7645 kernel/sched/fair.c 		WRITE_ONCE(cfs_rq->h_load_next, se);
cfs_rq           7646 kernel/sched/fair.c 		if (cfs_rq->last_h_load_update == now)
cfs_rq           7651 kernel/sched/fair.c 		cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
cfs_rq           7652 kernel/sched/fair.c 		cfs_rq->last_h_load_update = now;
cfs_rq           7655 kernel/sched/fair.c 	while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
cfs_rq           7656 kernel/sched/fair.c 		load = cfs_rq->h_load;
cfs_rq           7658 kernel/sched/fair.c 			cfs_rq_load_avg(cfs_rq) + 1);
cfs_rq           7659 kernel/sched/fair.c 		cfs_rq = group_cfs_rq(se);
cfs_rq           7660 kernel/sched/fair.c 		cfs_rq->h_load = load;
cfs_rq           7661 kernel/sched/fair.c 		cfs_rq->last_h_load_update = now;
cfs_rq           7667 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
cfs_rq           7669 kernel/sched/fair.c 	update_cfs_rq_h_load(cfs_rq);
cfs_rq           7670 kernel/sched/fair.c 	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
cfs_rq           7671 kernel/sched/fair.c 			cfs_rq_load_avg(cfs_rq) + 1);
cfs_rq           7676 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = &rq->cfs;
cfs_rq           7679 kernel/sched/fair.c 	decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
cfs_rq           7680 kernel/sched/fair.c 	if (cfs_rq_has_blocked(cfs_rq))
cfs_rq           9981 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           9985 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           9986 kernel/sched/fair.c 		entity_tick(cfs_rq, se, queued);
cfs_rq           10003 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           10011 kernel/sched/fair.c 	cfs_rq = task_cfs_rq(current);
cfs_rq           10012 kernel/sched/fair.c 	curr = cfs_rq->curr;
cfs_rq           10014 kernel/sched/fair.c 		update_curr(cfs_rq);
cfs_rq           10017 kernel/sched/fair.c 	place_entity(cfs_rq, se, 1);
cfs_rq           10028 kernel/sched/fair.c 	se->vruntime -= cfs_rq->min_vruntime;
cfs_rq           10089 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           10095 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           10097 kernel/sched/fair.c 		if (cfs_rq_throttled(cfs_rq))
cfs_rq           10100 kernel/sched/fair.c 		update_load_avg(cfs_rq, se, UPDATE_TG);
cfs_rq           10109 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           10112 kernel/sched/fair.c 	update_load_avg(cfs_rq, se, 0);
cfs_rq           10113 kernel/sched/fair.c 	detach_entity_load_avg(cfs_rq, se);
cfs_rq           10114 kernel/sched/fair.c 	update_tg_load_avg(cfs_rq, false);
cfs_rq           10120 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           10131 kernel/sched/fair.c 	update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
cfs_rq           10132 kernel/sched/fair.c 	attach_entity_load_avg(cfs_rq, se, 0);
cfs_rq           10133 kernel/sched/fair.c 	update_tg_load_avg(cfs_rq, false);
cfs_rq           10140 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           10147 kernel/sched/fair.c 		place_entity(cfs_rq, se, 0);
cfs_rq           10148 kernel/sched/fair.c 		se->vruntime -= cfs_rq->min_vruntime;
cfs_rq           10157 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           10162 kernel/sched/fair.c 		se->vruntime += cfs_rq->min_vruntime;
cfs_rq           10207 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq           10209 kernel/sched/fair.c 		set_next_entity(cfs_rq, se);
cfs_rq           10211 kernel/sched/fair.c 		account_cfs_rq_runtime(cfs_rq, 0);
cfs_rq           10215 kernel/sched/fair.c void init_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq           10217 kernel/sched/fair.c 	cfs_rq->tasks_timeline = RB_ROOT_CACHED;
cfs_rq           10218 kernel/sched/fair.c 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
cfs_rq           10220 kernel/sched/fair.c 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
cfs_rq           10223 kernel/sched/fair.c 	raw_spin_lock_init(&cfs_rq->removed.lock);
cfs_rq           10268 kernel/sched/fair.c 		if (tg->cfs_rq)
cfs_rq           10269 kernel/sched/fair.c 			kfree(tg->cfs_rq[i]);
cfs_rq           10274 kernel/sched/fair.c 	kfree(tg->cfs_rq);
cfs_rq           10281 kernel/sched/fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           10284 kernel/sched/fair.c 	tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
cfs_rq           10285 kernel/sched/fair.c 	if (!tg->cfs_rq)
cfs_rq           10296 kernel/sched/fair.c 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
cfs_rq           10298 kernel/sched/fair.c 		if (!cfs_rq)
cfs_rq           10306 kernel/sched/fair.c 		init_cfs_rq(cfs_rq);
cfs_rq           10307 kernel/sched/fair.c 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
cfs_rq           10314 kernel/sched/fair.c 	kfree(cfs_rq);
cfs_rq           10351 kernel/sched/fair.c 		if (!tg->cfs_rq[cpu]->on_list)
cfs_rq           10357 kernel/sched/fair.c 		list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
cfs_rq           10362 kernel/sched/fair.c void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
cfs_rq           10368 kernel/sched/fair.c 	cfs_rq->tg = tg;
cfs_rq           10369 kernel/sched/fair.c 	cfs_rq->rq = rq;
cfs_rq           10370 kernel/sched/fair.c 	init_cfs_rq_runtime(cfs_rq);
cfs_rq           10372 kernel/sched/fair.c 	tg->cfs_rq[cpu] = cfs_rq;
cfs_rq           10380 kernel/sched/fair.c 		se->cfs_rq = &rq->cfs;
cfs_rq           10383 kernel/sched/fair.c 		se->cfs_rq = parent->my_q;
cfs_rq           10387 kernel/sched/fair.c 	se->my_q = cfs_rq;
cfs_rq           10513 kernel/sched/fair.c 	struct cfs_rq *cfs_rq, *pos;
cfs_rq           10516 kernel/sched/fair.c 	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
cfs_rq           10517 kernel/sched/fair.c 		print_cfs_rq(m, cpu, cfs_rq);
cfs_rq           10564 kernel/sched/fair.c const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq)
cfs_rq           10567 kernel/sched/fair.c 	return cfs_rq ? &cfs_rq->avg : NULL;
cfs_rq           10574 kernel/sched/fair.c char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len)
cfs_rq           10576 kernel/sched/fair.c 	if (!cfs_rq) {
cfs_rq           10583 kernel/sched/fair.c 	cfs_rq_tg_path(cfs_rq, str, len);
cfs_rq           10588 kernel/sched/fair.c int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq)
cfs_rq           10590 kernel/sched/fair.c 	return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1;
cfs_rq            277 kernel/sched/pelt.c int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq            280 kernel/sched/pelt.c 				cfs_rq->curr == se)) {
cfs_rq            291 kernel/sched/pelt.c int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
cfs_rq            293 kernel/sched/pelt.c 	if (___update_load_sum(now, &cfs_rq->avg,
cfs_rq            294 kernel/sched/pelt.c 				scale_load_down(cfs_rq->load.weight),
cfs_rq            295 kernel/sched/pelt.c 				scale_load_down(cfs_rq->runnable_weight),
cfs_rq            296 kernel/sched/pelt.c 				cfs_rq->curr != NULL)) {
cfs_rq            298 kernel/sched/pelt.c 		___update_load_avg(&cfs_rq->avg, 1, 1);
cfs_rq            299 kernel/sched/pelt.c 		trace_pelt_cfs_tp(cfs_rq);
cfs_rq              5 kernel/sched/pelt.h int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
cfs_rq              6 kernel/sched/pelt.h int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
cfs_rq            127 kernel/sched/pelt.h static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
cfs_rq            129 kernel/sched/pelt.h 	if (unlikely(cfs_rq->throttle_count))
cfs_rq            130 kernel/sched/pelt.h 		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
cfs_rq            132 kernel/sched/pelt.h 	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
cfs_rq            135 kernel/sched/pelt.h static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
cfs_rq            137 kernel/sched/pelt.h 	return rq_clock_pelt(rq_of(cfs_rq));
cfs_rq            144 kernel/sched/pelt.h update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq            332 kernel/sched/sched.h struct cfs_rq;
cfs_rq            368 kernel/sched/sched.h 	struct cfs_rq		**cfs_rq;
cfs_rq            449 kernel/sched/sched.h extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
cfs_rq            456 kernel/sched/sched.h extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
cfs_rq            482 kernel/sched/sched.h 			     struct cfs_rq *prev, struct cfs_rq *next);
cfs_rq            485 kernel/sched/sched.h 			     struct cfs_rq *prev, struct cfs_rq *next) { }
cfs_rq            887 kernel/sched/sched.h 	struct cfs_rq		cfs;
cfs_rq           1013 kernel/sched/sched.h static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
cfs_rq           1015 kernel/sched/sched.h 	return cfs_rq->rq;
cfs_rq           1020 kernel/sched/sched.h static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
cfs_rq           1022 kernel/sched/sched.h 	return container_of(cfs_rq, struct rq, cfs);
cfs_rq           1509 kernel/sched/sched.h 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
cfs_rq           1510 kernel/sched/sched.h 	p->se.cfs_rq = tg->cfs_rq[cpu];
cfs_rq           2178 kernel/sched/sched.h extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
cfs_rq           2179 kernel/sched/sched.h extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
cfs_rq           2187 kernel/sched/sched.h extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
cfs_rq           2199 kernel/sched/sched.h extern void init_cfs_rq(struct cfs_rq *cfs_rq);