rt_b               13 kernel/sched/rt.c static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
rt_b               19 kernel/sched/rt.c 	struct rt_bandwidth *rt_b =
rt_b               24 kernel/sched/rt.c 	raw_spin_lock(&rt_b->rt_runtime_lock);
rt_b               26 kernel/sched/rt.c 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
rt_b               30 kernel/sched/rt.c 		raw_spin_unlock(&rt_b->rt_runtime_lock);
rt_b               31 kernel/sched/rt.c 		idle = do_sched_rt_period_timer(rt_b, overrun);
rt_b               32 kernel/sched/rt.c 		raw_spin_lock(&rt_b->rt_runtime_lock);
rt_b               35 kernel/sched/rt.c 		rt_b->rt_period_active = 0;
rt_b               36 kernel/sched/rt.c 	raw_spin_unlock(&rt_b->rt_runtime_lock);
rt_b               41 kernel/sched/rt.c void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
rt_b               43 kernel/sched/rt.c 	rt_b->rt_period = ns_to_ktime(period);
rt_b               44 kernel/sched/rt.c 	rt_b->rt_runtime = runtime;
rt_b               46 kernel/sched/rt.c 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
rt_b               48 kernel/sched/rt.c 	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
rt_b               50 kernel/sched/rt.c 	rt_b->rt_period_timer.function = sched_rt_period_timer;
rt_b               53 kernel/sched/rt.c static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
rt_b               55 kernel/sched/rt.c 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
rt_b               58 kernel/sched/rt.c 	raw_spin_lock(&rt_b->rt_runtime_lock);
rt_b               59 kernel/sched/rt.c 	if (!rt_b->rt_period_active) {
rt_b               60 kernel/sched/rt.c 		rt_b->rt_period_active = 1;
rt_b               69 kernel/sched/rt.c 		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
rt_b               70 kernel/sched/rt.c 		hrtimer_start_expires(&rt_b->rt_period_timer,
rt_b               73 kernel/sched/rt.c 	raw_spin_unlock(&rt_b->rt_runtime_lock);
rt_b              106 kernel/sched/rt.c static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
rt_b              108 kernel/sched/rt.c 	hrtimer_cancel(&rt_b->rt_period_timer);
rt_b              553 kernel/sched/rt.c struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
rt_b              555 kernel/sched/rt.c 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
rt_b              615 kernel/sched/rt.c struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
rt_b              629 kernel/sched/rt.c 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
rt_b              631 kernel/sched/rt.c 	return (hrtimer_active(&rt_b->rt_period_timer) ||
rt_b              632 kernel/sched/rt.c 		rt_rq->rt_time < rt_b->rt_runtime);
rt_b              641 kernel/sched/rt.c 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
rt_b              648 kernel/sched/rt.c 	raw_spin_lock(&rt_b->rt_runtime_lock);
rt_b              649 kernel/sched/rt.c 	rt_period = ktime_to_ns(rt_b->rt_period);
rt_b              651 kernel/sched/rt.c 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
rt_b              685 kernel/sched/rt.c 	raw_spin_unlock(&rt_b->rt_runtime_lock);
rt_b              701 kernel/sched/rt.c 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
rt_b              705 kernel/sched/rt.c 		raw_spin_lock(&rt_b->rt_runtime_lock);
rt_b              713 kernel/sched/rt.c 				rt_rq->rt_runtime == rt_b->rt_runtime)
rt_b              722 kernel/sched/rt.c 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
rt_b              728 kernel/sched/rt.c 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
rt_b              766 kernel/sched/rt.c 		raw_spin_unlock(&rt_b->rt_runtime_lock);
rt_b              785 kernel/sched/rt.c 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
rt_b              787 kernel/sched/rt.c 		raw_spin_lock(&rt_b->rt_runtime_lock);
rt_b              789 kernel/sched/rt.c 		rt_rq->rt_runtime = rt_b->rt_runtime;
rt_b              793 kernel/sched/rt.c 		raw_spin_unlock(&rt_b->rt_runtime_lock);
rt_b              812 kernel/sched/rt.c static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
rt_b              828 kernel/sched/rt.c 	if (rt_b == &root_task_group.rt_bandwidth)
rt_b              833 kernel/sched/rt.c 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
rt_b              843 kernel/sched/rt.c 			rt_rq->rt_runtime = rt_b->rt_runtime;
rt_b              890 kernel/sched/rt.c 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
rt_b              924 kernel/sched/rt.c 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
rt_b              930 kernel/sched/rt.c 		if (likely(rt_b->rt_runtime)) {
rt_b             1881 kernel/sched/sched.h extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);