Searched refs:cfs_rq (Results 1 – 5 of 5) sorted by relevance
/linux-4.1.27/kernel/sched/ |
D | fair.c | 249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument 251 return cfs_rq->rq; in rq_of() 269 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() 271 return p->se.cfs_rq; in task_cfs_rq() 275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() 277 return se->cfs_rq; in cfs_rq_of() 281 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq() 286 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, 289 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument 291 if (!cfs_rq->on_list) { in list_add_leaf_cfs_rq() [all …]
|
D | debug.c | 173 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument 182 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); in print_cfs_rq() 187 SPLIT_NS(cfs_rq->exec_clock)); in print_cfs_rq() 190 if (cfs_rq->rb_leftmost) in print_cfs_rq() 191 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; in print_cfs_rq() 192 last = __pick_last_entity(cfs_rq); in print_cfs_rq() 195 min_vruntime = cfs_rq->min_vruntime; in print_cfs_rq() 211 cfs_rq->nr_spread_over); in print_cfs_rq() 212 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); in print_cfs_rq() 213 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); in print_cfs_rq() [all …]
|
D | sched.h | 205 struct cfs_rq; 236 struct cfs_rq **cfs_rq; member 302 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 310 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 337 struct cfs_rq { struct 584 struct cfs_rq cfs; 919 p->se.cfs_rq = tg->cfs_rq[cpu]; in set_task_rq() 1668 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 1669 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1674 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
|
D | core.c | 7124 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init() 8130 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local 8131 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() 8134 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth() 8135 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth() 8137 if (cfs_rq->throttled) in tg_set_cfs_bandwidth() 8138 unthrottle_cfs_rq(cfs_rq); in tg_set_cfs_bandwidth()
|
/linux-4.1.27/include/linux/ |
D | sched.h | 183 struct cfs_rq; 189 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 1201 struct cfs_rq *cfs_rq; member 1203 struct cfs_rq *my_q;
|