on_rq             450 include/linux/sched.h 	unsigned int			on_rq;
on_rq             486 include/linux/sched.h 	unsigned short			on_rq;
on_rq             668 include/linux/sched.h 	int				on_rq;
on_rq             605 kernel/rcu/update.c 	    !READ_ONCE(t->on_rq) ||
on_rq             694 kernel/rcu/update.c 			if (t != current && READ_ONCE(t->on_rq) &&
on_rq            1320 kernel/sched/core.c 	p->on_rq = TASK_ON_RQ_QUEUED;
on_rq            1325 kernel/sched/core.c 	p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
on_rq            1489 kernel/sched/core.c 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
on_rq            1499 kernel/sched/core.c 	p->on_rq = TASK_ON_RQ_QUEUED;
on_rq            1710 kernel/sched/core.c 			!p->on_rq);
on_rq            1719 kernel/sched/core.c 		     (p->on_rq && !task_on_rq_migrating(p)));
on_rq            2579 kernel/sched/core.c 	if (p->on_rq && ttwu_remote(p, wake_flags))
on_rq            2680 kernel/sched/core.c 	p->on_rq			= 0;
on_rq            2682 kernel/sched/core.c 	p->se.on_rq			= 0;
on_rq            2707 kernel/sched/core.c 	p->rt.on_rq		= 0;
on_rq            6057 kernel/sched/core.c 	idle->on_rq = TASK_ON_RQ_QUEUED;
on_rq            1508 kernel/sched/deadline.c 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
on_rq            1549 kernel/sched/deadline.c 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
on_rq             538 kernel/sched/fair.c 		if (curr->on_rq)
on_rq             694 kernel/sched/fair.c 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
on_rq             703 kernel/sched/fair.c 		if (unlikely(!se->on_rq)) {
on_rq            2879 kernel/sched/fair.c 	if (se->on_rq) {
on_rq            2902 kernel/sched/fair.c 	if (se->on_rq) {
on_rq            3371 kernel/sched/fair.c 	if (se->on_rq) {
on_rq            4007 kernel/sched/fair.c 	se->on_rq = 1;
on_rq            4093 kernel/sched/fair.c 	se->on_rq = 0;
on_rq            4164 kernel/sched/fair.c 	if (se->on_rq) {
on_rq            4262 kernel/sched/fair.c 	if (prev->on_rq)
on_rq            4270 kernel/sched/fair.c 	if (prev->on_rq) {
on_rq            4518 kernel/sched/fair.c 		if (!se->on_rq)
on_rq            4586 kernel/sched/fair.c 		if (se->on_rq)
on_rq            5230 kernel/sched/fair.c 		if (se->on_rq)
on_rq            6566 kernel/sched/fair.c 	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
on_rq            6665 kernel/sched/fair.c 		if (SCHED_WARN_ON(!se->on_rq))
on_rq            6677 kernel/sched/fair.c 		if (SCHED_WARN_ON(!se->on_rq))
on_rq            6768 kernel/sched/fair.c 	if (unlikely(!se->on_rq || curr == rq->idle))
on_rq            6809 kernel/sched/fair.c 			if (curr->on_rq)
on_rq            6974 kernel/sched/fair.c 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
on_rq            10063 kernel/sched/fair.c 	if (p->on_rq)
on_rq             279 kernel/sched/pelt.c 	if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
on_rq             437 kernel/sched/rt.c 	return rt_se->on_rq;
on_rq            1260 kernel/sched/rt.c 	rt_se->on_rq = 1;
on_rq            1274 kernel/sched/rt.c 	rt_se->on_rq = 0;
on_rq            1639 kernel/sched/sched.h 	return p->on_rq == TASK_ON_RQ_QUEUED;
on_rq            1644 kernel/sched/sched.h 	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
on_rq            1117 kernel/trace/trace_selftest.c 	while (p->on_rq) {