rt_rq 493 include/linux/sched.h struct rt_rq *rt_rq; rt_rq 495 include/linux/sched.h struct rt_rq *my_q; rt_rq 32 kernel/sched/autogroup.c ag->tg->rt_rq = NULL; rt_rq 89 kernel/sched/autogroup.c tg->rt_rq = root_task_group.rt_rq; rt_rq 6584 kernel/sched/core.c root_task_group.rt_rq = (struct rt_rq **)ptr; rt_rq 1132 kernel/sched/deadline.c extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); rt_rq 1268 kernel/sched/deadline.c struct rt_rq *rt_rq = &rq->rt; rt_rq 1270 kernel/sched/deadline.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 1276 kernel/sched/deadline.c if (sched_rt_bandwidth_account(rt_rq)) rt_rq 1277 kernel/sched/deadline.c rt_rq->rt_time += delta_exec; rt_rq 1278 kernel/sched/deadline.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 561 kernel/sched/debug.c void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) rt_rq 565 kernel/sched/debug.c SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); rt_rq 572 kernel/sched/debug.c SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) rt_rq 574 kernel/sched/debug.c SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x)) rt_rq 576 kernel/sched/debug.c SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) rt_rq 76 kernel/sched/rt.c void init_rt_rq(struct rt_rq *rt_rq) rt_rq 81 kernel/sched/rt.c array = &rt_rq->active; rt_rq 90 kernel/sched/rt.c rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq 91 kernel/sched/rt.c rt_rq->highest_prio.next = MAX_RT_PRIO; rt_rq 92 kernel/sched/rt.c rt_rq->rt_nr_migratory = 0; rt_rq 93 kernel/sched/rt.c rt_rq->overloaded = 0; rt_rq 94 kernel/sched/rt.c plist_head_init(&rt_rq->pushable_tasks); rt_rq 97 kernel/sched/rt.c rt_rq->rt_queued = 0; rt_rq 99 kernel/sched/rt.c rt_rq->rt_time = 0; rt_rq 100 kernel/sched/rt.c rt_rq->rt_throttled = 0; rt_rq 101 kernel/sched/rt.c rt_rq->rt_runtime = 0; rt_rq 102 kernel/sched/rt.c raw_spin_lock_init(&rt_rq->rt_runtime_lock); rt_rq 121 kernel/sched/rt.c static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) rt_rq 123 kernel/sched/rt.c return rt_rq->rq; rt_rq 126 kernel/sched/rt.c static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) rt_rq 128 kernel/sched/rt.c return rt_se->rt_rq; rt_rq 133 kernel/sched/rt.c struct rt_rq *rt_rq = rt_se->rt_rq; rt_rq 135 kernel/sched/rt.c return rt_rq->rq; rt_rq 146 kernel/sched/rt.c if (tg->rt_rq) rt_rq 147 kernel/sched/rt.c kfree(tg->rt_rq[i]); rt_rq 152 kernel/sched/rt.c kfree(tg->rt_rq); rt_rq 156 kernel/sched/rt.c void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, rt_rq 162 kernel/sched/rt.c rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq 163 kernel/sched/rt.c rt_rq->rt_nr_boosted = 0; rt_rq 164 kernel/sched/rt.c rt_rq->rq = rq; rt_rq 165 kernel/sched/rt.c rt_rq->tg = tg; rt_rq 167 kernel/sched/rt.c tg->rt_rq[cpu] = rt_rq; rt_rq 174 kernel/sched/rt.c rt_se->rt_rq = &rq->rt; rt_rq 176 kernel/sched/rt.c rt_se->rt_rq = parent->my_q; rt_rq 178 kernel/sched/rt.c rt_se->my_q = rt_rq; rt_rq 185 kernel/sched/rt.c struct rt_rq *rt_rq; rt_rq 189 kernel/sched/rt.c tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); rt_rq 190 kernel/sched/rt.c if (!tg->rt_rq) rt_rq 200 kernel/sched/rt.c rt_rq = kzalloc_node(sizeof(struct rt_rq), rt_rq 202 kernel/sched/rt.c if (!rt_rq) rt_rq 210 kernel/sched/rt.c init_rt_rq(rt_rq); rt_rq 211 kernel/sched/rt.c rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; rt_rq 212 kernel/sched/rt.c init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); rt_rq 218 kernel/sched/rt.c kfree(rt_rq); rt_rq 232 kernel/sched/rt.c static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) rt_rq 234 kernel/sched/rt.c return container_of(rt_rq, struct rq, rt); rt_rq 244 kernel/sched/rt.c static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) rt_rq 303 kernel/sched/rt.c static void update_rt_migration(struct rt_rq *rt_rq) rt_rq 305 kernel/sched/rt.c if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { rt_rq 306 kernel/sched/rt.c if (!rt_rq->overloaded) { rt_rq 307 kernel/sched/rt.c rt_set_overload(rq_of_rt_rq(rt_rq)); rt_rq 308 kernel/sched/rt.c rt_rq->overloaded = 1; rt_rq 310 kernel/sched/rt.c } else if (rt_rq->overloaded) { rt_rq 311 kernel/sched/rt.c rt_clear_overload(rq_of_rt_rq(rt_rq)); rt_rq 312 kernel/sched/rt.c rt_rq->overloaded = 0; rt_rq 316 kernel/sched/rt.c static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 324 kernel/sched/rt.c rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq 326 kernel/sched/rt.c rt_rq->rt_nr_total++; rt_rq 328 kernel/sched/rt.c rt_rq->rt_nr_migratory++; rt_rq 330 kernel/sched/rt.c update_rt_migration(rt_rq); rt_rq 333 kernel/sched/rt.c static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 341 kernel/sched/rt.c rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq 343 kernel/sched/rt.c rt_rq->rt_nr_total--; rt_rq 345 kernel/sched/rt.c rt_rq->rt_nr_migratory--; rt_rq 347 kernel/sched/rt.c update_rt_migration(rt_rq); rt_rq 409 kernel/sched/rt.c void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 414 kernel/sched/rt.c void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 432 kernel/sched/rt.c static void enqueue_top_rt_rq(struct rt_rq *rt_rq); rt_rq 433 kernel/sched/rt.c static void dequeue_top_rt_rq(struct rt_rq *rt_rq); rt_rq 442 kernel/sched/rt.c static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) rt_rq 444 kernel/sched/rt.c if (!rt_rq->tg) rt_rq 447 kernel/sched/rt.c return rt_rq->rt_runtime; rt_rq 450 kernel/sched/rt.c static inline u64 sched_rt_period(struct rt_rq *rt_rq) rt_rq 452 kernel/sched/rt.c return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); rt_rq 470 kernel/sched/rt.c #define for_each_rt_rq(rt_rq, iter, rq) \ rt_rq 473 kernel/sched/rt.c (rt_rq = iter->rt_rq[cpu_of(rq)]);) rt_rq 478 kernel/sched/rt.c static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) rt_rq 486 kernel/sched/rt.c static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) rt_rq 488 kernel/sched/rt.c struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; rt_rq 489 kernel/sched/rt.c struct rq *rq = rq_of_rt_rq(rt_rq); rt_rq 494 kernel/sched/rt.c rt_se = rt_rq->tg->rt_se[cpu]; rt_rq 496 kernel/sched/rt.c if (rt_rq->rt_nr_running) { rt_rq 498 kernel/sched/rt.c enqueue_top_rt_rq(rt_rq); rt_rq 502 kernel/sched/rt.c if (rt_rq->highest_prio.curr < curr->prio) rt_rq 507 kernel/sched/rt.c static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) rt_rq 510 kernel/sched/rt.c int cpu = cpu_of(rq_of_rt_rq(rt_rq)); rt_rq 512 kernel/sched/rt.c rt_se = rt_rq->tg->rt_se[cpu]; rt_rq 515 kernel/sched/rt.c dequeue_top_rt_rq(rt_rq); rt_rq 517 kernel/sched/rt.c cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); rt_rq 523 kernel/sched/rt.c static inline int rt_rq_throttled(struct rt_rq *rt_rq) rt_rq 525 kernel/sched/rt.c return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; rt_rq 530 kernel/sched/rt.c struct rt_rq *rt_rq = group_rt_rq(rt_se); rt_rq 533 kernel/sched/rt.c if (rt_rq) rt_rq 534 kernel/sched/rt.c return !!rt_rq->rt_nr_boosted; rt_rq 553 kernel/sched/rt.c struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) rt_rq 555 kernel/sched/rt.c return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; rt_rq 558 kernel/sched/rt.c static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) rt_rq 560 kernel/sched/rt.c return &rt_rq->tg->rt_bandwidth; rt_rq 565 kernel/sched/rt.c static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) rt_rq 567 kernel/sched/rt.c return rt_rq->rt_runtime; rt_rq 570 kernel/sched/rt.c static inline u64 sched_rt_period(struct rt_rq *rt_rq) rt_rq 575 kernel/sched/rt.c typedef struct rt_rq *rt_rq_iter_t; rt_rq 577 kernel/sched/rt.c #define for_each_rt_rq(rt_rq, iter, rq) \ rt_rq 578 kernel/sched/rt.c for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) rt_rq 583 kernel/sched/rt.c static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) rt_rq 588 kernel/sched/rt.c static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) rt_rq 590 kernel/sched/rt.c struct rq *rq = rq_of_rt_rq(rt_rq); rt_rq 592 kernel/sched/rt.c if (!rt_rq->rt_nr_running) rt_rq 595 kernel/sched/rt.c enqueue_top_rt_rq(rt_rq); rt_rq 599 kernel/sched/rt.c static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) rt_rq 601 kernel/sched/rt.c dequeue_top_rt_rq(rt_rq); rt_rq 604 kernel/sched/rt.c static inline int rt_rq_throttled(struct rt_rq *rt_rq) rt_rq 606 kernel/sched/rt.c return rt_rq->rt_throttled; rt_rq 615 kernel/sched/rt.c struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) rt_rq 620 kernel/sched/rt.c static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) rt_rq 627 kernel/sched/rt.c bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) rt_rq 629 kernel/sched/rt.c struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); rt_rq 632 kernel/sched/rt.c rt_rq->rt_time < rt_b->rt_runtime); rt_rq 639 kernel/sched/rt.c static void do_balance_runtime(struct rt_rq *rt_rq) rt_rq 641 kernel/sched/rt.c struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); rt_rq 642 kernel/sched/rt.c struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; rt_rq 651 kernel/sched/rt.c struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); rt_rq 654 kernel/sched/rt.c if (iter == rt_rq) rt_rq 673 kernel/sched/rt.c if (rt_rq->rt_runtime + diff > rt_period) rt_rq 674 kernel/sched/rt.c diff = rt_period - rt_rq->rt_runtime; rt_rq 676 kernel/sched/rt.c rt_rq->rt_runtime += diff; rt_rq 677 kernel/sched/rt.c if (rt_rq->rt_runtime == rt_period) { rt_rq 695 kernel/sched/rt.c struct rt_rq *rt_rq; rt_rq 700 kernel/sched/rt.c for_each_rt_rq(rt_rq, iter, rq) { rt_rq 701 kernel/sched/rt.c struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); rt_rq 706 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 712 kernel/sched/rt.c if (rt_rq->rt_runtime == RUNTIME_INF || rt_rq 713 kernel/sched/rt.c rt_rq->rt_runtime == rt_b->rt_runtime) rt_rq 715 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 722 kernel/sched/rt.c want = rt_b->rt_runtime - rt_rq->rt_runtime; rt_rq 728 kernel/sched/rt.c struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); rt_rq 734 kernel/sched/rt.c if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) rt_rq 752 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 763 kernel/sched/rt.c rt_rq->rt_runtime = RUNTIME_INF; rt_rq 764 kernel/sched/rt.c rt_rq->rt_throttled = 0; rt_rq 765 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 769 kernel/sched/rt.c sched_rt_rq_enqueue(rt_rq); rt_rq 776 kernel/sched/rt.c struct rt_rq *rt_rq; rt_rq 784 kernel/sched/rt.c for_each_rt_rq(rt_rq, iter, rq) { rt_rq 785 kernel/sched/rt.c struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); rt_rq 788 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 789 kernel/sched/rt.c rt_rq->rt_runtime = rt_b->rt_runtime; rt_rq 790 kernel/sched/rt.c rt_rq->rt_time = 0; rt_rq 791 kernel/sched/rt.c rt_rq->rt_throttled = 0; rt_rq 792 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 797 kernel/sched/rt.c static void balance_runtime(struct rt_rq *rt_rq) rt_rq 802 kernel/sched/rt.c if (rt_rq->rt_time > rt_rq->rt_runtime) { rt_rq 803 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 804 kernel/sched/rt.c do_balance_runtime(rt_rq); rt_rq 805 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 809 kernel/sched/rt.c static inline void balance_runtime(struct rt_rq *rt_rq) {} rt_rq 833 kernel/sched/rt.c struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); rt_rq 834 kernel/sched/rt.c struct rq *rq = rq_of_rt_rq(rt_rq); rt_rq 841 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 842 kernel/sched/rt.c if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) rt_rq 843 kernel/sched/rt.c rt_rq->rt_runtime = rt_b->rt_runtime; rt_rq 844 kernel/sched/rt.c skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; rt_rq 845 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 852 kernel/sched/rt.c if (rt_rq->rt_time) { rt_rq 855 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 856 kernel/sched/rt.c if (rt_rq->rt_throttled) rt_rq 857 kernel/sched/rt.c balance_runtime(rt_rq); rt_rq 858 kernel/sched/rt.c runtime = rt_rq->rt_runtime; rt_rq 859 kernel/sched/rt.c rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); rt_rq 860 kernel/sched/rt.c if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { rt_rq 861 kernel/sched/rt.c rt_rq->rt_throttled = 0; rt_rq 871 kernel/sched/rt.c if (rt_rq->rt_nr_running && rq->curr == rq->idle) rt_rq 874 kernel/sched/rt.c if (rt_rq->rt_time || rt_rq->rt_nr_running) rt_rq 876 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 877 kernel/sched/rt.c } else if (rt_rq->rt_nr_running) { rt_rq 879 kernel/sched/rt.c if (!rt_rq_throttled(rt_rq)) rt_rq 882 kernel/sched/rt.c if (rt_rq->rt_throttled) rt_rq 886 kernel/sched/rt.c sched_rt_rq_enqueue(rt_rq); rt_rq 899 kernel/sched/rt.c struct rt_rq *rt_rq = group_rt_rq(rt_se); rt_rq 901 kernel/sched/rt.c if (rt_rq) rt_rq 902 kernel/sched/rt.c return rt_rq->highest_prio.curr; rt_rq 908 kernel/sched/rt.c static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) rt_rq 910 kernel/sched/rt.c u64 runtime = sched_rt_runtime(rt_rq); rt_rq 912 kernel/sched/rt.c if (rt_rq->rt_throttled) rt_rq 913 kernel/sched/rt.c return rt_rq_throttled(rt_rq); rt_rq 915 kernel/sched/rt.c if (runtime >= sched_rt_period(rt_rq)) rt_rq 918 kernel/sched/rt.c balance_runtime(rt_rq); rt_rq 919 kernel/sched/rt.c runtime = sched_rt_runtime(rt_rq); rt_rq 923 kernel/sched/rt.c if (rt_rq->rt_time > runtime) { rt_rq 924 kernel/sched/rt.c struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); rt_rq 931 kernel/sched/rt.c rt_rq->rt_throttled = 1; rt_rq 939 kernel/sched/rt.c rt_rq->rt_time = 0; rt_rq 942 kernel/sched/rt.c if (rt_rq_throttled(rt_rq)) { rt_rq 943 kernel/sched/rt.c sched_rt_rq_dequeue(rt_rq); rt_rq 983 kernel/sched/rt.c struct rt_rq *rt_rq = rt_rq_of_se(rt_se); rt_rq 985 kernel/sched/rt.c if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { rt_rq 986 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 987 kernel/sched/rt.c rt_rq->rt_time += delta_exec; rt_rq 988 kernel/sched/rt.c if (sched_rt_runtime_exceeded(rt_rq)) rt_rq 990 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 996 kernel/sched/rt.c dequeue_top_rt_rq(struct rt_rq *rt_rq) rt_rq 998 kernel/sched/rt.c struct rq *rq = rq_of_rt_rq(rt_rq); rt_rq 1000 kernel/sched/rt.c BUG_ON(&rq->rt != rt_rq); rt_rq 1002 kernel/sched/rt.c if (!rt_rq->rt_queued) rt_rq 1007 kernel/sched/rt.c sub_nr_running(rq, rt_rq->rt_nr_running); rt_rq 1008 kernel/sched/rt.c rt_rq->rt_queued = 0; rt_rq 1013 kernel/sched/rt.c enqueue_top_rt_rq(struct rt_rq *rt_rq) rt_rq 1015 kernel/sched/rt.c struct rq *rq = rq_of_rt_rq(rt_rq); rt_rq 1017 kernel/sched/rt.c BUG_ON(&rq->rt != rt_rq); rt_rq 1019 kernel/sched/rt.c if (rt_rq->rt_queued) rt_rq 1022 kernel/sched/rt.c if (rt_rq_throttled(rt_rq)) rt_rq 1025 kernel/sched/rt.c if (rt_rq->rt_nr_running) { rt_rq 1026 kernel/sched/rt.c add_nr_running(rq, rt_rq->rt_nr_running); rt_rq 1027 kernel/sched/rt.c rt_rq->rt_queued = 1; rt_rq 1037 kernel/sched/rt.c inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) rt_rq 1039 kernel/sched/rt.c struct rq *rq = rq_of_rt_rq(rt_rq); rt_rq 1045 kernel/sched/rt.c if (&rq->rt != rt_rq) rt_rq 1053 kernel/sched/rt.c dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) rt_rq 1055 kernel/sched/rt.c struct rq *rq = rq_of_rt_rq(rt_rq); rt_rq 1061 kernel/sched/rt.c if (&rq->rt != rt_rq) rt_rq 1064 kernel/sched/rt.c if (rq->online && rt_rq->highest_prio.curr != prev_prio) rt_rq 1065 kernel/sched/rt.c cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); rt_rq 1071 kernel/sched/rt.c void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} rt_rq 1073 kernel/sched/rt.c void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} rt_rq 1079 kernel/sched/rt.c inc_rt_prio(struct rt_rq *rt_rq, int prio) rt_rq 1081 kernel/sched/rt.c int prev_prio = rt_rq->highest_prio.curr; rt_rq 1084 kernel/sched/rt.c rt_rq->highest_prio.curr = prio; rt_rq 1086 kernel/sched/rt.c inc_rt_prio_smp(rt_rq, prio, prev_prio); rt_rq 1090 kernel/sched/rt.c dec_rt_prio(struct rt_rq *rt_rq, int prio) rt_rq 1092 kernel/sched/rt.c int prev_prio = rt_rq->highest_prio.curr; rt_rq 1094 kernel/sched/rt.c if (rt_rq->rt_nr_running) { rt_rq 1103 kernel/sched/rt.c struct rt_prio_array *array = &rt_rq->active; rt_rq 1105 kernel/sched/rt.c rt_rq->highest_prio.curr = rt_rq 1110 kernel/sched/rt.c rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq 1112 kernel/sched/rt.c dec_rt_prio_smp(rt_rq, prio, prev_prio); rt_rq 1117 kernel/sched/rt.c static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} rt_rq 1118 kernel/sched/rt.c static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} rt_rq 1125 kernel/sched/rt.c inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 1128 kernel/sched/rt.c rt_rq->rt_nr_boosted++; rt_rq 1130 kernel/sched/rt.c if (rt_rq->tg) rt_rq 1131 kernel/sched/rt.c start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); rt_rq 1135 kernel/sched/rt.c dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 1138 kernel/sched/rt.c rt_rq->rt_nr_boosted--; rt_rq 1140 kernel/sched/rt.c WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); rt_rq 1146 kernel/sched/rt.c inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 1152 kernel/sched/rt.c void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} rt_rq 1159 kernel/sched/rt.c struct rt_rq *group_rq = group_rt_rq(rt_se); rt_rq 1170 kernel/sched/rt.c struct rt_rq *group_rq = group_rt_rq(rt_se); rt_rq 1182 kernel/sched/rt.c void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 1187 kernel/sched/rt.c rt_rq->rt_nr_running += rt_se_nr_running(rt_se); rt_rq 1188 kernel/sched/rt.c rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); rt_rq 1190 kernel/sched/rt.c inc_rt_prio(rt_rq, prio); rt_rq 1191 kernel/sched/rt.c inc_rt_migration(rt_se, rt_rq); rt_rq 1192 kernel/sched/rt.c inc_rt_group(rt_se, rt_rq); rt_rq 1196 kernel/sched/rt.c void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq 1199 kernel/sched/rt.c WARN_ON(!rt_rq->rt_nr_running); rt_rq 1200 kernel/sched/rt.c rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); rt_rq 1201 kernel/sched/rt.c rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); rt_rq 1203 kernel/sched/rt.c dec_rt_prio(rt_rq, rt_se_prio(rt_se)); rt_rq 1204 kernel/sched/rt.c dec_rt_migration(rt_se, rt_rq); rt_rq 1205 kernel/sched/rt.c dec_rt_group(rt_se, rt_rq); rt_rq 1233 kernel/sched/rt.c struct rt_rq *rt_rq = rt_rq_of_se(rt_se); rt_rq 1234 kernel/sched/rt.c struct rt_prio_array *array = &rt_rq->active; rt_rq 1235 kernel/sched/rt.c struct rt_rq *group_rq = group_rt_rq(rt_se); rt_rq 1262 kernel/sched/rt.c inc_rt_tasks(rt_se, rt_rq); rt_rq 1267 kernel/sched/rt.c struct rt_rq *rt_rq = rt_rq_of_se(rt_se); rt_rq 1268 kernel/sched/rt.c struct rt_prio_array *array = &rt_rq->active; rt_rq 1276 kernel/sched/rt.c dec_rt_tasks(rt_se, rt_rq); rt_rq 1317 kernel/sched/rt.c struct rt_rq *rt_rq = group_rt_rq(rt_se); rt_rq 1319 kernel/sched/rt.c if (rt_rq && rt_rq->rt_nr_running) rt_rq 1357 kernel/sched/rt.c requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) rt_rq 1360 kernel/sched/rt.c struct rt_prio_array *array = &rt_rq->active; rt_rq 1373 kernel/sched/rt.c struct rt_rq *rt_rq; rt_rq 1376 kernel/sched/rt.c rt_rq = rt_rq_of_se(rt_se); rt_rq 1377 kernel/sched/rt.c requeue_rt_entity(rt_rq, rt_se, head); rt_rq 1540 kernel/sched/rt.c struct rt_rq *rt_rq) rt_rq 1542 kernel/sched/rt.c struct rt_prio_array *array = &rt_rq->active; rt_rq 1559 kernel/sched/rt.c struct rt_rq *rt_rq = &rq->rt; rt_rq 1562 kernel/sched/rt.c rt_se = pick_next_rt_entity(rq, rt_rq); rt_rq 1564 kernel/sched/rt.c rt_rq = group_rt_rq(rt_se); rt_rq 1565 kernel/sched/rt.c } while (rt_rq); rt_rq 2527 kernel/sched/rt.c struct rt_rq *rt_rq = tg->rt_rq[i]; rt_rq 2529 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 2530 kernel/sched/rt.c rt_rq->rt_runtime = rt_runtime; rt_rq 2531 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 2619 kernel/sched/rt.c struct rt_rq *rt_rq = &cpu_rq(i)->rt; rt_rq 2621 kernel/sched/rt.c raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq 2622 kernel/sched/rt.c rt_rq->rt_runtime = global_rt_runtime(); rt_rq 2623 kernel/sched/rt.c raw_spin_unlock(&rt_rq->rt_runtime_lock); rt_rq 2716 kernel/sched/rt.c struct rt_rq *rt_rq; rt_rq 2719 kernel/sched/rt.c for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) rt_rq 2720 kernel/sched/rt.c print_rt_rq(m, cpu, rt_rq); rt_rq 333 kernel/sched/sched.h struct rt_rq; rt_rq 383 kernel/sched/sched.h struct rt_rq **rt_rq; rt_rq 460 kernel/sched/sched.h extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, rt_rq 632 kernel/sched/sched.h static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) rt_rq 634 kernel/sched/sched.h return rt_rq->rt_queued && rt_rq->rt_nr_running; rt_rq 888 kernel/sched/sched.h struct rt_rq rt; rt_rq 1515 kernel/sched/sched.h p->rt.rt_rq = tg->rt_rq[cpu]; rt_rq 2188 kernel/sched/sched.h extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); rt_rq 2200 kernel/sched/sched.h extern void init_rt_rq(struct rt_rq *rt_rq);