Lines Matching refs:rq

17 struct rq;
29 extern void calc_global_load_tick(struct rq *this_rq);
30 extern long calc_load_fold_active(struct rq *this_rq);
33 extern void update_cpu_load_active(struct rq *this_rq);
35 static inline void update_cpu_load_active(struct rq *this_rq) { } in update_cpu_load_active()
399 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ member
471 struct rq *rq; member
559 struct rq { struct
693 static inline int cpu_of(struct rq *rq) in cpu_of() argument
696 return rq->cpu; in cpu_of()
702 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
710 static inline u64 __rq_clock_broken(struct rq *rq) in __rq_clock_broken() argument
712 return READ_ONCE(rq->clock); in __rq_clock_broken()
715 static inline u64 rq_clock(struct rq *rq) in rq_clock() argument
717 lockdep_assert_held(&rq->lock); in rq_clock()
718 return rq->clock; in rq_clock()
721 static inline u64 rq_clock_task(struct rq *rq) in rq_clock_task() argument
723 lockdep_assert_held(&rq->lock); in rq_clock_task()
724 return rq->clock_task; in rq_clock_task()
730 static inline void rq_clock_skip_update(struct rq *rq, bool skip) in rq_clock_skip_update() argument
732 lockdep_assert_held(&rq->lock); in rq_clock_skip_update()
734 rq->clock_skip_update |= RQCF_REQ_SKIP; in rq_clock_skip_update()
736 rq->clock_skip_update &= ~RQCF_REQ_SKIP; in rq_clock_skip_update()
766 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
768 void (*func)(struct rq *rq)) in queue_balance_callback() argument
770 lockdep_assert_held(&rq->lock); in queue_balance_callback()
776 head->next = rq->balance_callback; in queue_balance_callback()
777 rq->balance_callback = head; in queue_balance_callback()
1025 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
1027 return rq->curr == p; in task_current()
1030 static inline int task_running(struct rq *rq, struct task_struct *p) in task_running() argument
1035 return task_current(rq, p); in task_running()
1056 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) in prepare_lock_switch() argument
1068 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) in finish_lock_switch() argument
1085 rq->lock.owner = current; in finish_lock_switch()
1092 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
1094 raw_spin_unlock_irq(&rq->lock); in finish_lock_switch()
1175 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1176 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1177 void (*yield_task) (struct rq *rq);
1178 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1180 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1190 struct task_struct * (*pick_next_task) (struct rq *rq,
1192 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1199 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1204 void (*rq_online)(struct rq *rq);
1205 void (*rq_offline)(struct rq *rq);
1208 void (*set_curr_task) (struct rq *rq);
1209 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1218 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1219 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1220 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1223 unsigned int (*get_rr_interval) (struct rq *rq,
1226 void (*update_curr) (struct rq *rq);
1233 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) in put_prev_task() argument
1235 prev->sched_class->put_prev_task(rq, prev); in put_prev_task()
1253 extern void trigger_load_balance(struct rq *rq);
1255 extern void idle_enter_fair(struct rq *this_rq);
1256 extern void idle_exit_fair(struct rq *this_rq);
1262 static inline void idle_enter_fair(struct rq *rq) { } in idle_enter_fair() argument
1263 static inline void idle_exit_fair(struct rq *rq) { } in idle_exit_fair() argument
1268 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
1271 rq->idle_state = idle_state; in idle_set_state()
1274 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
1277 return rq->idle_state; in idle_get_state()
1280 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
1285 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
1299 extern void resched_curr(struct rq *rq);
1313 static inline void add_nr_running(struct rq *rq, unsigned count) in add_nr_running() argument
1315 unsigned prev_nr = rq->nr_running; in add_nr_running()
1317 rq->nr_running = prev_nr + count; in add_nr_running()
1319 if (prev_nr < 2 && rq->nr_running >= 2) { in add_nr_running()
1321 if (!rq->rd->overload) in add_nr_running()
1322 rq->rd->overload = true; in add_nr_running()
1326 if (tick_nohz_full_cpu(rq->cpu)) { in add_nr_running()
1335 tick_nohz_full_kick_cpu(rq->cpu); in add_nr_running()
1341 static inline void sub_nr_running(struct rq *rq, unsigned count) in sub_nr_running() argument
1343 rq->nr_running -= count; in sub_nr_running()
1346 static inline void rq_last_tick_reset(struct rq *rq) in rq_last_tick_reset() argument
1349 rq->last_sched_tick = jiffies; in rq_last_tick_reset()
1353 extern void update_rq_clock(struct rq *rq);
1355 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1356 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1358 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1376 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
1380 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
1382 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
1385 void hrtick_start(struct rq *rq, u64 delay);
1389 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
1397 extern void sched_avg_update(struct rq *rq);
1418 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) in sched_rt_avg_update() argument
1420 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); in sched_rt_avg_update()
1421 sched_avg_update(rq); in sched_rt_avg_update()
1424 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } in sched_rt_avg_update() argument
1425 static inline void sched_avg_update(struct rq *rq) { } in sched_avg_update() argument
1431 static inline struct rq *__task_rq_lock(struct task_struct *p) in __task_rq_lock()
1432 __acquires(rq->lock) in __task_rq_lock()
1434 struct rq *rq; in __task_rq_lock() local
1439 rq = task_rq(p); in __task_rq_lock()
1440 raw_spin_lock(&rq->lock); in __task_rq_lock()
1441 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
1442 lockdep_pin_lock(&rq->lock); in __task_rq_lock()
1443 return rq; in __task_rq_lock()
1445 raw_spin_unlock(&rq->lock); in __task_rq_lock()
1455 static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) in task_rq_lock()
1457 __acquires(rq->lock) in task_rq_lock()
1459 struct rq *rq; in task_rq_lock() local
1463 rq = task_rq(p); in task_rq_lock()
1464 raw_spin_lock(&rq->lock); in task_rq_lock()
1481 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
1482 lockdep_pin_lock(&rq->lock); in task_rq_lock()
1483 return rq; in task_rq_lock()
1485 raw_spin_unlock(&rq->lock); in task_rq_lock()
1493 static inline void __task_rq_unlock(struct rq *rq) in __task_rq_unlock() argument
1494 __releases(rq->lock) in __task_rq_unlock()
1496 lockdep_unpin_lock(&rq->lock); in __task_rq_unlock()
1497 raw_spin_unlock(&rq->lock); in __task_rq_unlock()
1501 task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) in task_rq_unlock() argument
1502 __releases(rq->lock) in task_rq_unlock()
1505 lockdep_unpin_lock(&rq->lock); in task_rq_unlock()
1506 raw_spin_unlock(&rq->lock); in task_rq_unlock()
1513 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1523 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
1542 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
1568 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance()
1579 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) in double_unlock_balance()
1619 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
1644 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
1663 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
1679 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
1774 static inline void account_reset_rq(struct rq *rq) in account_reset_rq() argument
1777 rq->prev_irq_time = 0; in account_reset_rq()
1780 rq->prev_steal_time = 0; in account_reset_rq()
1783 rq->prev_steal_time_rq = 0; in account_reset_rq()