Searched refs:rt_rq (Results 1 - 7 of 7) sorted by relevance

/linux-4.1.27/kernel/sched/
H A Drt.c67 void init_rt_rq(struct rt_rq *rt_rq) init_rt_rq() argument
72 array = &rt_rq->active; init_rt_rq()
81 rt_rq->highest_prio.curr = MAX_RT_PRIO; init_rt_rq()
82 rt_rq->highest_prio.next = MAX_RT_PRIO; init_rt_rq()
83 rt_rq->rt_nr_migratory = 0; init_rt_rq()
84 rt_rq->overloaded = 0; init_rt_rq()
85 plist_head_init(&rt_rq->pushable_tasks); init_rt_rq()
88 rt_rq->push_flags = 0; init_rt_rq()
89 rt_rq->push_cpu = nr_cpu_ids; init_rt_rq()
90 raw_spin_lock_init(&rt_rq->push_lock); init_rt_rq()
91 init_irq_work(&rt_rq->push_work, push_irq_work_func); init_rt_rq()
95 rt_rq->rt_queued = 0; init_rt_rq()
97 rt_rq->rt_time = 0; init_rt_rq()
98 rt_rq->rt_throttled = 0; init_rt_rq()
99 rt_rq->rt_runtime = 0; init_rt_rq()
100 raw_spin_lock_init(&rt_rq->rt_runtime_lock); init_rt_rq()
119 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) rq_of_rt_rq() argument
121 return rt_rq->rq; rq_of_rt_rq()
124 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) rt_rq_of_se()
126 return rt_se->rt_rq; rt_rq_of_se()
131 struct rt_rq *rt_rq = rt_se->rt_rq; rq_of_rt_se() local
133 return rt_rq->rq; rq_of_rt_se()
144 if (tg->rt_rq) for_each_possible_cpu()
145 kfree(tg->rt_rq[i]); for_each_possible_cpu()
150 kfree(tg->rt_rq);
154 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, init_tg_rt_entry() argument
160 rt_rq->highest_prio.curr = MAX_RT_PRIO; init_tg_rt_entry()
161 rt_rq->rt_nr_boosted = 0; init_tg_rt_entry()
162 rt_rq->rq = rq; init_tg_rt_entry()
163 rt_rq->tg = tg; init_tg_rt_entry()
165 tg->rt_rq[cpu] = rt_rq; init_tg_rt_entry()
172 rt_se->rt_rq = &rq->rt; init_tg_rt_entry()
174 rt_se->rt_rq = parent->my_q; init_tg_rt_entry()
176 rt_se->my_q = rt_rq; init_tg_rt_entry()
183 struct rt_rq *rt_rq; alloc_rt_sched_group() local
187 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); alloc_rt_sched_group()
188 if (!tg->rt_rq) alloc_rt_sched_group()
198 rt_rq = kzalloc_node(sizeof(struct rt_rq), for_each_possible_cpu()
200 if (!rt_rq) for_each_possible_cpu()
208 init_rt_rq(rt_rq); for_each_possible_cpu()
209 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; for_each_possible_cpu()
210 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); for_each_possible_cpu()
216 kfree(rt_rq);
230 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) rq_of_rt_rq() argument
232 return container_of(rt_rq, struct rq, rt); rq_of_rt_rq()
242 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) rt_rq_of_se()
301 static void update_rt_migration(struct rt_rq *rt_rq) update_rt_migration() argument
303 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { update_rt_migration()
304 if (!rt_rq->overloaded) { update_rt_migration()
305 rt_set_overload(rq_of_rt_rq(rt_rq)); update_rt_migration()
306 rt_rq->overloaded = 1; update_rt_migration()
308 } else if (rt_rq->overloaded) { update_rt_migration()
309 rt_clear_overload(rq_of_rt_rq(rt_rq)); update_rt_migration()
310 rt_rq->overloaded = 0; update_rt_migration()
314 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) inc_rt_migration() argument
322 rt_rq = &rq_of_rt_rq(rt_rq)->rt; inc_rt_migration()
324 rt_rq->rt_nr_total++; inc_rt_migration()
326 rt_rq->rt_nr_migratory++; inc_rt_migration()
328 update_rt_migration(rt_rq); inc_rt_migration()
331 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) dec_rt_migration() argument
339 rt_rq = &rq_of_rt_rq(rt_rq)->rt; dec_rt_migration()
341 rt_rq->rt_nr_total--; dec_rt_migration()
343 rt_rq->rt_nr_migratory--; dec_rt_migration()
345 update_rt_migration(rt_rq); dec_rt_migration()
397 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) inc_rt_migration() argument
402 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) dec_rt_migration() argument
421 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
422 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
431 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) sched_rt_runtime() argument
433 if (!rt_rq->tg) sched_rt_runtime()
436 return rt_rq->rt_runtime; sched_rt_runtime()
439 static inline u64 sched_rt_period(struct rt_rq *rt_rq) sched_rt_period() argument
441 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); sched_rt_period()
459 #define for_each_rt_rq(rt_rq, iter, rq) \
462 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
467 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) group_rt_rq()
475 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) sched_rt_rq_enqueue() argument
477 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; sched_rt_rq_enqueue()
478 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue()
483 rt_se = rt_rq->tg->rt_se[cpu]; sched_rt_rq_enqueue()
485 if (rt_rq->rt_nr_running) { sched_rt_rq_enqueue()
487 enqueue_top_rt_rq(rt_rq); sched_rt_rq_enqueue()
491 if (rt_rq->highest_prio.curr < curr->prio) sched_rt_rq_enqueue()
496 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) sched_rt_rq_dequeue() argument
499 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); sched_rt_rq_dequeue()
501 rt_se = rt_rq->tg->rt_se[cpu]; sched_rt_rq_dequeue()
504 dequeue_top_rt_rq(rt_rq); sched_rt_rq_dequeue()
509 static inline int rt_rq_throttled(struct rt_rq *rt_rq) rt_rq_throttled() argument
511 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; rt_rq_throttled()
516 struct rt_rq *rt_rq = group_rt_rq(rt_se); rt_se_boosted() local
519 if (rt_rq) rt_se_boosted()
520 return !!rt_rq->rt_nr_boosted; rt_se_boosted()
539 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) sched_rt_period_rt_rq()
541 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; sched_rt_period_rt_rq()
544 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) sched_rt_bandwidth() argument
546 return &rt_rq->tg->rt_bandwidth; sched_rt_bandwidth()
551 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) sched_rt_runtime() argument
553 return rt_rq->rt_runtime; sched_rt_runtime()
556 static inline u64 sched_rt_period(struct rt_rq *rt_rq) sched_rt_period() argument
561 typedef struct rt_rq *rt_rq_iter_t;
563 #define for_each_rt_rq(rt_rq, iter, rq) \
564 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
569 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) group_rt_rq()
574 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) sched_rt_rq_enqueue() argument
576 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue()
578 if (!rt_rq->rt_nr_running) sched_rt_rq_enqueue()
581 enqueue_top_rt_rq(rt_rq); sched_rt_rq_enqueue()
585 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) sched_rt_rq_dequeue() argument
587 dequeue_top_rt_rq(rt_rq); sched_rt_rq_dequeue()
590 static inline int rt_rq_throttled(struct rt_rq *rt_rq) rt_rq_throttled() argument
592 return rt_rq->rt_throttled; rt_rq_throttled()
601 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) sched_rt_period_rt_rq()
606 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) sched_rt_bandwidth() argument
613 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) sched_rt_bandwidth_account() argument
615 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); sched_rt_bandwidth_account()
618 rt_rq->rt_time < rt_b->rt_runtime); sched_rt_bandwidth_account()
625 static int do_balance_runtime(struct rt_rq *rt_rq) do_balance_runtime() argument
627 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); do_balance_runtime()
628 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; do_balance_runtime()
637 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); do_balance_runtime()
640 if (iter == rt_rq) do_balance_runtime()
659 if (rt_rq->rt_runtime + diff > rt_period) do_balance_runtime()
660 diff = rt_period - rt_rq->rt_runtime; do_balance_runtime()
662 rt_rq->rt_runtime += diff; do_balance_runtime()
664 if (rt_rq->rt_runtime == rt_period) { do_balance_runtime()
684 struct rt_rq *rt_rq; __disable_runtime() local
689 for_each_rt_rq(rt_rq, iter, rq) { for_each_rt_rq()
690 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); for_each_rt_rq()
695 raw_spin_lock(&rt_rq->rt_runtime_lock); for_each_rt_rq()
701 if (rt_rq->rt_runtime == RUNTIME_INF || for_each_rt_rq()
702 rt_rq->rt_runtime == rt_b->rt_runtime) for_each_rt_rq()
704 raw_spin_unlock(&rt_rq->rt_runtime_lock); for_each_rt_rq()
711 want = rt_b->rt_runtime - rt_rq->rt_runtime; for_each_rt_rq()
717 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); for_each_rt_rq()
723 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) for_each_rt_rq()
741 raw_spin_lock(&rt_rq->rt_runtime_lock); for_each_rt_rq()
752 rt_rq->rt_runtime = RUNTIME_INF; for_each_rt_rq()
753 rt_rq->rt_throttled = 0; for_each_rt_rq()
754 raw_spin_unlock(&rt_rq->rt_runtime_lock); for_each_rt_rq()
757 /* Make rt_rq available for pick_next_task() */ for_each_rt_rq()
758 sched_rt_rq_enqueue(rt_rq); for_each_rt_rq()
765 struct rt_rq *rt_rq; __enable_runtime() local
773 for_each_rt_rq(rt_rq, iter, rq) { for_each_rt_rq()
774 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); for_each_rt_rq()
777 raw_spin_lock(&rt_rq->rt_runtime_lock); for_each_rt_rq()
778 rt_rq->rt_runtime = rt_b->rt_runtime; for_each_rt_rq()
779 rt_rq->rt_time = 0; for_each_rt_rq()
780 rt_rq->rt_throttled = 0; for_each_rt_rq()
781 raw_spin_unlock(&rt_rq->rt_runtime_lock); for_each_rt_rq()
786 static int balance_runtime(struct rt_rq *rt_rq) balance_runtime() argument
793 if (rt_rq->rt_time > rt_rq->rt_runtime) { balance_runtime()
794 raw_spin_unlock(&rt_rq->rt_runtime_lock); balance_runtime()
795 more = do_balance_runtime(rt_rq); balance_runtime()
796 raw_spin_lock(&rt_rq->rt_runtime_lock); balance_runtime()
802 static inline int balance_runtime(struct rt_rq *rt_rq) balance_runtime() argument
829 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); for_each_cpu() local
830 struct rq *rq = rq_of_rt_rq(rt_rq); for_each_cpu()
833 if (rt_rq->rt_time) { for_each_cpu()
836 raw_spin_lock(&rt_rq->rt_runtime_lock); for_each_cpu()
837 if (rt_rq->rt_throttled) for_each_cpu()
838 balance_runtime(rt_rq); for_each_cpu()
839 runtime = rt_rq->rt_runtime; for_each_cpu()
840 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); for_each_cpu()
841 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { for_each_cpu()
842 rt_rq->rt_throttled = 0; for_each_cpu()
852 if (rt_rq->rt_nr_running && rq->curr == rq->idle) for_each_cpu()
855 if (rt_rq->rt_time || rt_rq->rt_nr_running) for_each_cpu()
857 raw_spin_unlock(&rt_rq->rt_runtime_lock); for_each_cpu()
858 } else if (rt_rq->rt_nr_running) { for_each_cpu()
860 if (!rt_rq_throttled(rt_rq)) for_each_cpu()
863 if (rt_rq->rt_throttled) for_each_cpu()
867 sched_rt_rq_enqueue(rt_rq); for_each_cpu()
880 struct rt_rq *rt_rq = group_rt_rq(rt_se); rt_se_prio() local
882 if (rt_rq) rt_se_prio()
883 return rt_rq->highest_prio.curr; rt_se_prio()
889 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) sched_rt_runtime_exceeded() argument
891 u64 runtime = sched_rt_runtime(rt_rq); sched_rt_runtime_exceeded()
893 if (rt_rq->rt_throttled) sched_rt_runtime_exceeded()
894 return rt_rq_throttled(rt_rq); sched_rt_runtime_exceeded()
896 if (runtime >= sched_rt_period(rt_rq)) sched_rt_runtime_exceeded()
899 balance_runtime(rt_rq); sched_rt_runtime_exceeded()
900 runtime = sched_rt_runtime(rt_rq); sched_rt_runtime_exceeded()
904 if (rt_rq->rt_time > runtime) { sched_rt_runtime_exceeded()
905 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); sched_rt_runtime_exceeded()
912 rt_rq->rt_throttled = 1; sched_rt_runtime_exceeded()
920 rt_rq->rt_time = 0; sched_rt_runtime_exceeded()
923 if (rt_rq_throttled(rt_rq)) { sched_rt_runtime_exceeded()
924 sched_rt_rq_dequeue(rt_rq); sched_rt_runtime_exceeded()
964 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); for_each_sched_rt_entity() local
966 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { for_each_sched_rt_entity()
967 raw_spin_lock(&rt_rq->rt_runtime_lock); for_each_sched_rt_entity()
968 rt_rq->rt_time += delta_exec; for_each_sched_rt_entity()
969 if (sched_rt_runtime_exceeded(rt_rq)) for_each_sched_rt_entity()
971 raw_spin_unlock(&rt_rq->rt_runtime_lock); for_each_sched_rt_entity()
977 dequeue_top_rt_rq(struct rt_rq *rt_rq) dequeue_top_rt_rq() argument
979 struct rq *rq = rq_of_rt_rq(rt_rq); dequeue_top_rt_rq()
981 BUG_ON(&rq->rt != rt_rq); dequeue_top_rt_rq()
983 if (!rt_rq->rt_queued) dequeue_top_rt_rq()
988 sub_nr_running(rq, rt_rq->rt_nr_running); dequeue_top_rt_rq()
989 rt_rq->rt_queued = 0; dequeue_top_rt_rq()
993 enqueue_top_rt_rq(struct rt_rq *rt_rq) enqueue_top_rt_rq() argument
995 struct rq *rq = rq_of_rt_rq(rt_rq); enqueue_top_rt_rq()
997 BUG_ON(&rq->rt != rt_rq); enqueue_top_rt_rq()
999 if (rt_rq->rt_queued) enqueue_top_rt_rq()
1001 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) enqueue_top_rt_rq()
1004 add_nr_running(rq, rt_rq->rt_nr_running); enqueue_top_rt_rq()
1005 rt_rq->rt_queued = 1; enqueue_top_rt_rq()
1011 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) inc_rt_prio_smp() argument
1013 struct rq *rq = rq_of_rt_rq(rt_rq); inc_rt_prio_smp()
1017 * Change rq's cpupri only if rt_rq is the top queue. inc_rt_prio_smp()
1019 if (&rq->rt != rt_rq) inc_rt_prio_smp()
1027 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) dec_rt_prio_smp() argument
1029 struct rq *rq = rq_of_rt_rq(rt_rq); dec_rt_prio_smp()
1033 * Change rq's cpupri only if rt_rq is the top queue. dec_rt_prio_smp()
1035 if (&rq->rt != rt_rq) dec_rt_prio_smp()
1038 if (rq->online && rt_rq->highest_prio.curr != prev_prio) dec_rt_prio_smp()
1039 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); dec_rt_prio_smp()
1045 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} inc_rt_prio_smp() argument
1047 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} dec_rt_prio_smp() argument
1053 inc_rt_prio(struct rt_rq *rt_rq, int prio) inc_rt_prio() argument
1055 int prev_prio = rt_rq->highest_prio.curr; inc_rt_prio()
1058 rt_rq->highest_prio.curr = prio; inc_rt_prio()
1060 inc_rt_prio_smp(rt_rq, prio, prev_prio); inc_rt_prio()
1064 dec_rt_prio(struct rt_rq *rt_rq, int prio) dec_rt_prio() argument
1066 int prev_prio = rt_rq->highest_prio.curr; dec_rt_prio()
1068 if (rt_rq->rt_nr_running) { dec_rt_prio()
1077 struct rt_prio_array *array = &rt_rq->active; dec_rt_prio()
1079 rt_rq->highest_prio.curr = dec_rt_prio()
1084 rt_rq->highest_prio.curr = MAX_RT_PRIO; dec_rt_prio()
1086 dec_rt_prio_smp(rt_rq, prio, prev_prio); dec_rt_prio()
1091 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} dec_rt_prio() argument
1092 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} dec_rt_prio() argument
1099 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) inc_rt_group() argument
1102 rt_rq->rt_nr_boosted++; inc_rt_group()
1104 if (rt_rq->tg) inc_rt_group()
1105 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); inc_rt_group()
1109 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) dec_rt_group() argument
1112 rt_rq->rt_nr_boosted--; dec_rt_group()
1114 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); dec_rt_group()
1120 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) inc_rt_group() argument
1126 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} dec_rt_group() argument
1133 struct rt_rq *group_rq = group_rt_rq(rt_se); rt_se_nr_running()
1142 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) inc_rt_tasks() argument
1147 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); inc_rt_tasks()
1149 inc_rt_prio(rt_rq, prio); inc_rt_tasks()
1150 inc_rt_migration(rt_se, rt_rq); inc_rt_tasks()
1151 inc_rt_group(rt_se, rt_rq); inc_rt_tasks()
1155 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) dec_rt_tasks() argument
1158 WARN_ON(!rt_rq->rt_nr_running); dec_rt_tasks()
1159 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); dec_rt_tasks()
1161 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); dec_rt_tasks()
1162 dec_rt_migration(rt_se, rt_rq); dec_rt_tasks()
1163 dec_rt_group(rt_se, rt_rq); dec_rt_tasks()
1168 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); __enqueue_rt_entity() local
1169 struct rt_prio_array *array = &rt_rq->active; __enqueue_rt_entity()
1170 struct rt_rq *group_rq = group_rt_rq(rt_se); __enqueue_rt_entity()
1188 inc_rt_tasks(rt_se, rt_rq); __enqueue_rt_entity()
1193 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); __dequeue_rt_entity() local
1194 struct rt_prio_array *array = &rt_rq->active; __dequeue_rt_entity()
1200 dec_rt_tasks(rt_se, rt_rq); __dequeue_rt_entity()
1241 struct rt_rq *rt_rq = group_rt_rq(rt_se); for_each_sched_rt_entity() local
1243 if (rt_rq && rt_rq->rt_nr_running) for_each_sched_rt_entity()
1281 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) requeue_rt_entity() argument
1284 struct rt_prio_array *array = &rt_rq->active; requeue_rt_entity()
1297 struct rt_rq *rt_rq; requeue_task_rt() local
1300 rt_rq = rt_rq_of_se(rt_se); for_each_sched_rt_entity()
1301 requeue_rt_entity(rt_rq, rt_se, head); for_each_sched_rt_entity()
1427 struct rt_rq *rt_rq) pick_next_rt_entity()
1429 struct rt_prio_array *array = &rt_rq->active; pick_next_rt_entity()
1447 struct rt_rq *rt_rq = &rq->rt; _pick_next_task_rt() local
1450 rt_se = pick_next_rt_entity(rq, rt_rq); _pick_next_task_rt()
1452 rt_rq = group_rt_rq(rt_se); _pick_next_task_rt()
1453 } while (rt_rq); _pick_next_task_rt()
1465 struct rt_rq *rt_rq = &rq->rt; pick_next_task_rt() local
1480 * We may dequeue prev's rt_rq in put_prev_task(). pick_next_task_rt()
1486 if (!rt_rq->rt_queued) pick_next_task_rt()
1891 struct rt_rq *rt_rq = arg; try_to_push_tasks() local
1896 this_cpu = rt_rq->push_cpu; try_to_push_tasks()
1902 src_rq = rq_of_rt_rq(rt_rq); try_to_push_tasks()
1912 raw_spin_lock(&rt_rq->push_lock); try_to_push_tasks()
1917 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) { try_to_push_tasks()
1918 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART; try_to_push_tasks()
1919 rt_rq->push_cpu = src_rq->cpu; try_to_push_tasks()
1925 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING; try_to_push_tasks()
1926 raw_spin_unlock(&rt_rq->push_lock); try_to_push_tasks()
1940 irq_work_queue_on(&rt_rq->push_work, cpu); try_to_push_tasks()
1945 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work); push_irq_work_func() local
1947 try_to_push_tasks(rt_rq); push_irq_work_func()
2334 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2339 struct rt_rq *rt_rq; print_rt_stats() local
2342 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) print_rt_stats()
2343 print_rt_rq(m, cpu, rt_rq); print_rt_stats()
1426 pick_next_rt_entity(struct rq *rq, struct rt_rq *rt_rq) pick_next_rt_entity() argument
H A Dauto_group.c36 ag->tg->rt_rq = NULL; autogroup_destroy()
94 tg->rt_rq = root_task_group.rt_rq; autogroup_create()
H A Ddebug.c246 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) print_rt_rq() argument
249 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); print_rt_rq()
255 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) print_rt_rq()
257 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) print_rt_rq()
H A Dsched.h206 struct rt_rq;
247 struct rt_rq **rt_rq; member in struct:task_group
314 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
434 struct rt_rq { struct
585 struct rt_rq rt;
667 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
924 p->rt.rt_rq = tg->rt_rq[cpu]; set_task_rq()
1675 extern void init_rt_rq(struct rt_rq *rt_rq);
H A Ddeadline.c648 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
709 struct rt_rq *rt_rq = &rq->rt; update_curr_dl() local
711 raw_spin_lock(&rt_rq->rt_runtime_lock); update_curr_dl()
717 if (sched_rt_bandwidth_account(rt_rq)) update_curr_dl()
718 rt_rq->rt_time += delta_exec; update_curr_dl()
719 raw_spin_unlock(&rt_rq->rt_runtime_lock); update_curr_dl()
H A Dcore.c7132 root_task_group.rt_rq = (struct rt_rq **)ptr; sched_init()
7714 struct rt_rq *rt_rq = tg->rt_rq[i]; for_each_possible_cpu() local
7716 raw_spin_lock(&rt_rq->rt_runtime_lock); for_each_possible_cpu()
7717 rt_rq->rt_runtime = rt_runtime; for_each_possible_cpu()
7718 raw_spin_unlock(&rt_rq->rt_runtime_lock); for_each_possible_cpu()
7803 struct rt_rq *rt_rq = &cpu_rq(i)->rt; for_each_possible_cpu() local
7805 raw_spin_lock(&rt_rq->rt_runtime_lock); for_each_possible_cpu()
7806 rt_rq->rt_runtime = global_rt_runtime(); for_each_possible_cpu()
7807 raw_spin_unlock(&rt_rq->rt_runtime_lock); for_each_possible_cpu()
/linux-4.1.27/include/linux/
H A Dsched.h1222 struct rt_rq *rt_rq; member in struct:sched_rt_entity
1224 struct rt_rq *my_q;

Completed in 249 milliseconds