Searched refs:decay_count (Results 1 - 4 of 4) sorted by relevance

/linux-4.1.27/kernel/sched/
H A Ddebug.c101 P(se->avg.decay_count); print_cfs_group_stats()
647 P(se.avg.decay_count); proc_sched_show_task()
H A Dfair.c2609 decays -= se->avg.decay_count; __synchronize_entity_decay()
2610 se->avg.decay_count = 0; __synchronize_entity_decay()
2849 * We track migrations using entity decay_count <= 0, on a wake-up enqueue_entity_load_avg()
2853 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they enqueue_entity_load_avg()
2857 if (unlikely(se->avg.decay_count <= 0)) { enqueue_entity_load_avg()
2859 if (se->avg.decay_count) { enqueue_entity_load_avg()
2868 se->avg.last_runnable_update -= (-se->avg.decay_count) enqueue_entity_load_avg()
2872 se->avg.decay_count = 0; enqueue_entity_load_avg()
2908 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); dequeue_entity_load_avg()
2909 } /* migrations, e.g. sleep=0 leave decay_count == 0 */ dequeue_entity_load_avg()
4925 if (se->avg.decay_count) { migrate_task_rq_fair()
4926 se->avg.decay_count = -__synchronize_entity_decay(se); migrate_task_rq_fair()
7928 * and ensure we don't carry in an old decay_count if we switched_from_fair()
7931 if (se->avg.decay_count) { switched_from_fair()
8043 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); task_move_group_fair()
H A Dcore.c1807 p->se.avg.decay_count = 0; __sched_fork()
/linux-4.1.27/include/linux/
H A Dsched.h1121 s64 decay_count; member in struct:sched_avg

Completed in 237 milliseconds