Searched refs:sched_entity (Results 1 - 5 of 5) sorted by relevance

/linux-4.1.27/kernel/sched/
H A Dfair.c257 static inline struct task_struct *task_of(struct sched_entity *se) task_of()
275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) cfs_rq_of()
281 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) group_cfs_rq()
327 is_same_group(struct sched_entity *se, struct sched_entity *pse) is_same_group()
335 static inline struct sched_entity *parent_entity(struct sched_entity *se) parent_entity()
341 find_matching_se(struct sched_entity **se, struct sched_entity **pse) find_matching_se()
374 static inline struct task_struct *task_of(struct sched_entity *se) task_of()
394 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) cfs_rq_of()
403 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) group_cfs_rq()
419 static inline struct sched_entity *parent_entity(struct sched_entity *se) parent_entity()
425 find_matching_se(struct sched_entity **se, struct sched_entity **pse) find_matching_se()
456 static inline int entity_before(struct sched_entity *a, entity_before()
457 struct sched_entity *b) entity_before()
470 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, update_min_vruntime()
471 struct sched_entity, update_min_vruntime()
491 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __enqueue_entity()
495 struct sched_entity *entry; __enqueue_entity()
503 entry = rb_entry(parent, struct sched_entity, run_node); __enqueue_entity()
527 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity()
539 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) __pick_first_entity()
546 return rb_entry(left, struct sched_entity, run_node); __pick_first_entity()
549 static struct sched_entity *__pick_next_entity(struct sched_entity *se) __pick_next_entity()
556 return rb_entry(next, struct sched_entity, run_node); __pick_next_entity()
560 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) __pick_last_entity()
567 return rb_entry(last, struct sched_entity, run_node); __pick_last_entity()
601 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) calc_delta_fair()
636 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_slice()
663 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_vslice()
672 static inline void __update_task_entity_contrib(struct sched_entity *se);
673 static inline void __update_task_entity_utilization(struct sched_entity *se);
697 struct sched_entity *curr = cfs_rq->curr; update_curr()
736 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start()
744 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue()
755 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end()
772 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_dequeue()
786 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_curr_start()
2290 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_enqueue()
2307 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_dequeue()
2361 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, reweight_entity()
2382 struct sched_entity *se; update_cfs_shares()
2604 static inline u64 __synchronize_entity_decay(struct sched_entity *se) __synchronize_entity_decay()
2661 static inline void __update_group_entity_contrib(struct sched_entity *se) __update_group_entity_contrib()
2714 static inline void __update_group_entity_contrib(struct sched_entity *se) {} update_rq_runnable_avg()
2718 static inline void __update_task_entity_contrib(struct sched_entity *se) __update_task_entity_contrib()
2729 static long __update_entity_load_avg_contrib(struct sched_entity *se) __update_entity_load_avg_contrib()
2744 static inline void __update_task_entity_utilization(struct sched_entity *se) __update_task_entity_utilization()
2754 static long __update_entity_utilization_avg_contrib(struct sched_entity *se) __update_entity_utilization_avg_contrib()
2778 /* Update a sched_entity's runnable average */ update_entity_load_avg()
2779 static inline void update_entity_load_avg(struct sched_entity *se, update_entity_load_avg()
2845 struct sched_entity *se, enqueue_entity_load_avg()
2897 struct sched_entity *se, dequeue_entity_load_avg()
2936 static inline void update_entity_load_avg(struct sched_entity *se, update_entity_load_avg()
2940 struct sched_entity *se, enqueue_entity_load_avg()
2943 struct sched_entity *se, dequeue_entity_load_avg()
2955 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_sleeper()
3017 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) check_spread()
3031 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity()
3065 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity()
3099 static void __clear_buddies_last(struct sched_entity *se) __clear_buddies_last()
3110 static void __clear_buddies_next(struct sched_entity *se) __clear_buddies_next()
3121 static void __clear_buddies_skip(struct sched_entity *se) __clear_buddies_skip()
3132 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) clear_buddies()
3147 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity()
3195 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) check_preempt_tick()
3198 struct sched_entity *se; check_preempt_tick()
3232 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity()
3263 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3272 static struct sched_entity * pick_next_entity()
3273 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity()
3275 struct sched_entity *left = __pick_first_entity(cfs_rq); pick_next_entity()
3276 struct sched_entity *se; pick_next_entity()
3292 struct sched_entity *second; pick_next_entity()
3325 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) put_prev_entity()
3349 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) entity_tick()
3635 struct sched_entity *se; throttle_cfs_rq()
3680 struct sched_entity *se; unthrottle_cfs_rq()
4171 struct sched_entity *se = &p->se; hrtick_start_fair()
4225 struct sched_entity *se = &p->se; enqueue_task_fair()
4264 static void set_next_buddy(struct sched_entity *se);
4274 struct sched_entity *se = &p->se; dequeue_task_fair()
4407 struct sched_entity *se = &p->se; task_waking_fair()
4480 struct sched_entity *se = tg->se[cpu]; effective_load()
4916 struct sched_entity *se = &p->se; migrate_task_rq_fair()
4937 wakeup_gran(struct sched_entity *curr, struct sched_entity *se) wakeup_gran()
4972 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) wakeup_preempt_entity()
4986 static void set_last_buddy(struct sched_entity *se) set_last_buddy()
4995 static void set_next_buddy(struct sched_entity *se) set_next_buddy()
5004 static void set_skip_buddy(struct sched_entity *se) set_skip_buddy()
5016 struct sched_entity *se = &curr->se, *pse = &p->se; check_preempt_wakeup()
5100 struct sched_entity *se; pick_next_task_fair()
5121 struct sched_entity *curr = cfs_rq->curr; pick_next_task_fair()
5157 struct sched_entity *pse = &prev->se; pick_next_task_fair()
5224 struct sched_entity *se = &prev->se; put_prev_task_fair()
5242 struct sched_entity *se = &curr->se; yield_task_fair()
5271 struct sched_entity *se = &p->se; yield_to_task_fair()
5818 struct sched_entity *se = tg->se[cpu]; __update_blocked_averages_cpu()
5878 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; update_cfs_rq_h_load()
7817 struct sched_entity *se = &curr->se; task_tick_fair()
7838 struct sched_entity *se = &p->se, *curr; task_fork_fair()
7904 struct sched_entity *se = &p->se; switched_from_fair()
7944 struct sched_entity *se = &p->se; switched_to_fair()
7972 struct sched_entity *se = &rq->curr->se; set_curr_task_fair()
7999 struct sched_entity *se = &p->se; task_move_group_fair()
8069 struct sched_entity *se; alloc_fair_sched_group()
8089 se = kzalloc_node(sizeof(struct sched_entity), for_each_possible_cpu()
8124 struct sched_entity *se, int cpu, init_tg_cfs_entry()
8125 struct sched_entity *parent) init_tg_cfs_entry()
8176 struct sched_entity *se; for_each_possible_cpu()
8209 struct sched_entity *se = &task->se; get_rr_interval_fair()
H A Dsched.h234 struct sched_entity **se;
303 struct sched_entity *se, int cpu,
304 struct sched_entity *parent);
354 struct sched_entity *curr, *next, *last, *skip;
391 struct sched_entity *h_load_next;
1668 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1669 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
H A Ddebug.c64 struct sched_entity *se = tg->se[cpu]; print_cfs_group_stats()
178 struct sched_entity *last; print_cfs_rq()
H A Dcore.c7121 root_task_group.se = (struct sched_entity **)ptr; sched_init()
/linux-4.1.27/include/linux/
H A Dsched.h1124 * sched_entity is running on a CPU. It is based on running_avg_sum
1126 * load_avg_contrib described the amount of time that a sched_entity
1135 * running_avg_sum reflects the time that the sched_entity is
1137 * runnable_avg_sum represents the amount of time a sched_entity is on
1180 struct sched_entity { struct
1199 struct sched_entity *parent;
1314 struct sched_entity se;

Completed in 395 milliseconds