this_rq           295 kernel/sched/core.c 	if (rq == this_rq()) {
this_rq          1541 kernel/sched/core.c 	struct rq *rq = this_rq();
this_rq          2178 kernel/sched/core.c 	rq = this_rq();
this_rq          2291 kernel/sched/core.c 	struct rq *rq = this_rq();
this_rq          2317 kernel/sched/core.c 	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
this_rq          2340 kernel/sched/core.c 		this_rq()->idle_balance = 1;
this_rq          3182 kernel/sched/core.c 	struct rq *rq = this_rq();
this_rq          3897 kernel/sched/core.c 	schedstat_inc(this_rq()->sched_count);
this_rq          5700 kernel/sched/core.c 	rq = this_rq();
this_rq           221 kernel/sched/cputime.c 	struct rq *rq = this_rq();
this_rq           241 kernel/sched/cputime.c 		steal -= this_rq()->prev_steal_time;
this_rq           244 kernel/sched/cputime.c 		this_rq()->prev_steal_time += steal;
this_rq           395 kernel/sched/cputime.c 	struct rq *rq = this_rq();
this_rq           478 kernel/sched/cputime.c 	struct rq *rq = this_rq();
this_rq          2134 kernel/sched/deadline.c static void pull_dl_task(struct rq *this_rq)
this_rq          2136 kernel/sched/deadline.c 	int this_cpu = this_rq->cpu, cpu;
this_rq          2142 kernel/sched/deadline.c 	if (likely(!dl_overloaded(this_rq)))
this_rq          2151 kernel/sched/deadline.c 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
this_rq          2161 kernel/sched/deadline.c 		if (this_rq->dl.dl_nr_running &&
this_rq          2162 kernel/sched/deadline.c 		    dl_time_before(this_rq->dl.earliest_dl.curr,
this_rq          2167 kernel/sched/deadline.c 		double_lock_balance(this_rq, src_rq);
this_rq          2184 kernel/sched/deadline.c 		    (!this_rq->dl.dl_nr_running ||
this_rq          2186 kernel/sched/deadline.c 				    this_rq->dl.earliest_dl.curr))) {
this_rq          2202 kernel/sched/deadline.c 			activate_task(this_rq, p, 0);
this_rq          2208 kernel/sched/deadline.c 		double_unlock_balance(this_rq, src_rq);
this_rq          2212 kernel/sched/deadline.c 		resched_curr(this_rq);
this_rq          5976 kernel/sched/fair.c 	avg_idle = this_rq()->avg_idle / 512;
this_rq          8829 kernel/sched/fair.c static int load_balance(int this_cpu, struct rq *this_rq,
this_rq          8843 kernel/sched/fair.c 		.dst_rq		= this_rq,
this_rq          9529 kernel/sched/fair.c 	SCHED_WARN_ON(rq != this_rq());
this_rq          9624 kernel/sched/fair.c static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
this_rq          9632 kernel/sched/fair.c 	int this_cpu = this_rq->cpu;
this_rq          9697 kernel/sched/fair.c 		has_blocked_load |= this_rq->has_blocked_load;
this_rq          9701 kernel/sched/fair.c 		rebalance_domains(this_rq, CPU_IDLE);
this_rq          9729 kernel/sched/fair.c static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
this_rq          9731 kernel/sched/fair.c 	int this_cpu = this_rq->cpu;
this_rq          9747 kernel/sched/fair.c 	_nohz_idle_balance(this_rq, flags, idle);
this_rq          9752 kernel/sched/fair.c static void nohz_newidle_balance(struct rq *this_rq)
this_rq          9754 kernel/sched/fair.c 	int this_cpu = this_rq->cpu;
this_rq          9764 kernel/sched/fair.c 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
this_rq          9772 kernel/sched/fair.c 	raw_spin_unlock(&this_rq->lock);
this_rq          9779 kernel/sched/fair.c 	if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
this_rq          9781 kernel/sched/fair.c 	raw_spin_lock(&this_rq->lock);
this_rq          9787 kernel/sched/fair.c static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
this_rq          9792 kernel/sched/fair.c static inline void nohz_newidle_balance(struct rq *this_rq) { }
this_rq          9799 kernel/sched/fair.c int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
this_rq          9802 kernel/sched/fair.c 	int this_cpu = this_rq->cpu;
this_rq          9807 kernel/sched/fair.c 	update_misfit_status(NULL, this_rq);
this_rq          9812 kernel/sched/fair.c 	this_rq->idle_stamp = rq_clock(this_rq);
this_rq          9826 kernel/sched/fair.c 	rq_unpin_lock(this_rq, rf);
this_rq          9828 kernel/sched/fair.c 	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
this_rq          9829 kernel/sched/fair.c 	    !READ_ONCE(this_rq->rd->overload)) {
this_rq          9832 kernel/sched/fair.c 		sd = rcu_dereference_check_sched_domain(this_rq->sd);
this_rq          9837 kernel/sched/fair.c 		nohz_newidle_balance(this_rq);
this_rq          9842 kernel/sched/fair.c 	raw_spin_unlock(&this_rq->lock);
this_rq          9853 kernel/sched/fair.c 		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
this_rq          9861 kernel/sched/fair.c 			pulled_task = load_balance(this_cpu, this_rq,
this_rq          9878 kernel/sched/fair.c 		if (pulled_task || this_rq->nr_running > 0)
this_rq          9883 kernel/sched/fair.c 	raw_spin_lock(&this_rq->lock);
this_rq          9885 kernel/sched/fair.c 	if (curr_cost > this_rq->max_idle_balance_cost)
this_rq          9886 kernel/sched/fair.c 		this_rq->max_idle_balance_cost = curr_cost;
this_rq          9894 kernel/sched/fair.c 	if (this_rq->cfs.h_nr_running && !pulled_task)
this_rq          9898 kernel/sched/fair.c 	if (time_after(this_rq->next_balance, next_balance))
this_rq          9899 kernel/sched/fair.c 		this_rq->next_balance = next_balance;
this_rq          9902 kernel/sched/fair.c 	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
this_rq          9906 kernel/sched/fair.c 		this_rq->idle_stamp = 0;
this_rq          9908 kernel/sched/fair.c 	rq_repin_lock(this_rq, rf);
this_rq          9919 kernel/sched/fair.c 	struct rq *this_rq = this_rq();
this_rq          9920 kernel/sched/fair.c 	enum cpu_idle_type idle = this_rq->idle_balance ?
this_rq          9931 kernel/sched/fair.c 	if (nohz_idle_balance(this_rq, idle))
this_rq          9935 kernel/sched/fair.c 	update_blocked_averages(this_rq->cpu);
this_rq          9936 kernel/sched/fair.c 	rebalance_domains(this_rq, idle);
this_rq          10005 kernel/sched/fair.c 	struct rq *rq = this_rq();
this_rq            22 kernel/sched/idle.c 	idle_set_state(this_rq(), idle_state);
this_rq            79 kernel/sched/loadavg.c long calc_load_fold_active(struct rq *this_rq, long adjust)
this_rq            83 kernel/sched/loadavg.c 	nr_active = this_rq->nr_running - adjust;
this_rq            84 kernel/sched/loadavg.c 	nr_active += (long)this_rq->nr_uninterruptible;
this_rq            86 kernel/sched/loadavg.c 	if (nr_active != this_rq->calc_load_active) {
this_rq            87 kernel/sched/loadavg.c 		delta = nr_active - this_rq->calc_load_active;
this_rq            88 kernel/sched/loadavg.c 		this_rq->calc_load_active = nr_active;
this_rq           252 kernel/sched/loadavg.c 	calc_load_nohz_fold(this_rq());
this_rq           266 kernel/sched/loadavg.c 	struct rq *this_rq = this_rq();
this_rq           271 kernel/sched/loadavg.c 	this_rq->calc_load_update = READ_ONCE(calc_load_update);
this_rq           272 kernel/sched/loadavg.c 	if (time_before(jiffies, this_rq->calc_load_update))
this_rq           280 kernel/sched/loadavg.c 	if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq           281 kernel/sched/loadavg.c 		this_rq->calc_load_update += LOAD_FREQ;
this_rq           386 kernel/sched/loadavg.c void calc_global_load_tick(struct rq *this_rq)
this_rq           390 kernel/sched/loadavg.c 	if (time_before(jiffies, this_rq->calc_load_update))
this_rq           393 kernel/sched/loadavg.c 	delta  = calc_load_fold_active(this_rq, 0);
this_rq           397 kernel/sched/loadavg.c 	this_rq->calc_load_update += LOAD_FREQ;
this_rq           261 kernel/sched/rt.c static void pull_rt_task(struct rq *this_rq);
this_rq           423 kernel/sched/rt.c static inline void pull_rt_task(struct rq *this_rq)
this_rq           543 kernel/sched/rt.c 	return this_rq()->rd->span;
this_rq          2020 kernel/sched/rt.c 	rq = this_rq();
this_rq          2049 kernel/sched/rt.c static void pull_rt_task(struct rq *this_rq)
this_rq          2051 kernel/sched/rt.c 	int this_cpu = this_rq->cpu, cpu;
this_rq          2055 kernel/sched/rt.c 	int rt_overload_count = rt_overloaded(this_rq);
this_rq          2068 kernel/sched/rt.c 	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
this_rq          2073 kernel/sched/rt.c 		tell_cpu_to_push(this_rq);
this_rq          2078 kernel/sched/rt.c 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
this_rq          2092 kernel/sched/rt.c 		    this_rq->rt.highest_prio.curr)
this_rq          2100 kernel/sched/rt.c 		double_lock_balance(this_rq, src_rq);
this_rq          2112 kernel/sched/rt.c 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
this_rq          2131 kernel/sched/rt.c 			activate_task(this_rq, p, 0);
this_rq          2140 kernel/sched/rt.c 		double_unlock_balance(this_rq, src_rq);
this_rq          2144 kernel/sched/rt.c 		resched_curr(this_rq);
this_rq            96 kernel/sched/sched.h extern void calc_global_load_tick(struct rq *this_rq);
this_rq            97 kernel/sched/sched.h extern long calc_load_fold_active(struct rq *this_rq, long adjust);
this_rq          1264 kernel/sched/sched.h 	rq = this_rq();
this_rq          1468 kernel/sched/sched.h extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
this_rq          1474 kernel/sched/sched.h static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; }
this_rq          1744 kernel/sched/sched.h 	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
this_rq          1762 kernel/sched/sched.h 	void (*switched_from)(struct rq *this_rq, struct task_struct *task);
this_rq          1763 kernel/sched/sched.h 	void (*switched_to)  (struct rq *this_rq, struct task_struct *task);
this_rq          1764 kernel/sched/sched.h 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
this_rq          2007 kernel/sched/sched.h static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
this_rq          2008 kernel/sched/sched.h 	__releases(this_rq->lock)
this_rq          2010 kernel/sched/sched.h 	__acquires(this_rq->lock)
this_rq          2012 kernel/sched/sched.h 	raw_spin_unlock(&this_rq->lock);
this_rq          2013 kernel/sched/sched.h 	double_rq_lock(this_rq, busiest);
this_rq          2026 kernel/sched/sched.h static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
this_rq          2027 kernel/sched/sched.h 	__releases(this_rq->lock)
this_rq          2029 kernel/sched/sched.h 	__acquires(this_rq->lock)
this_rq          2034 kernel/sched/sched.h 		if (busiest < this_rq) {
this_rq          2035 kernel/sched/sched.h 			raw_spin_unlock(&this_rq->lock);
this_rq          2037 kernel/sched/sched.h 			raw_spin_lock_nested(&this_rq->lock,
this_rq          2052 kernel/sched/sched.h static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
this_rq          2056 kernel/sched/sched.h 		raw_spin_unlock(&this_rq->lock);
this_rq          2060 kernel/sched/sched.h 	return _double_lock_balance(this_rq, busiest);
this_rq          2063 kernel/sched/sched.h static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
this_rq          2067 kernel/sched/sched.h 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);