Lines Matching refs:rq
94 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
96 static void update_rq_clock_task(struct rq *rq, s64 delta);
98 void update_rq_clock(struct rq *rq) in update_rq_clock() argument
102 lockdep_assert_held(&rq->lock); in update_rq_clock()
104 if (rq->clock_skip_update & RQCF_ACT_SKIP) in update_rq_clock()
107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
110 rq->clock += delta; in update_rq_clock()
111 update_rq_clock_task(rq, delta); in update_rq_clock()
293 static struct rq *this_rq_lock(void) in this_rq_lock()
294 __acquires(rq->lock) in this_rq_lock()
296 struct rq *rq; in this_rq_lock() local
299 rq = this_rq(); in this_rq_lock()
300 raw_spin_lock(&rq->lock); in this_rq_lock()
302 return rq; in this_rq_lock()
310 static void hrtick_clear(struct rq *rq) in hrtick_clear() argument
312 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
313 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
322 struct rq *rq = container_of(timer, struct rq, hrtick_timer); in hrtick() local
324 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick()
326 raw_spin_lock(&rq->lock); in hrtick()
327 update_rq_clock(rq); in hrtick()
328 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
329 raw_spin_unlock(&rq->lock); in hrtick()
336 static void __hrtick_restart(struct rq *rq) in __hrtick_restart() argument
338 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
348 struct rq *rq = arg; in __hrtick_start() local
350 raw_spin_lock(&rq->lock); in __hrtick_start()
351 __hrtick_restart(rq); in __hrtick_start()
352 rq->hrtick_csd_pending = 0; in __hrtick_start()
353 raw_spin_unlock(&rq->lock); in __hrtick_start()
361 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
363 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
376 if (rq == this_rq()) { in hrtick_start()
377 __hrtick_restart(rq); in hrtick_start()
378 } else if (!rq->hrtick_csd_pending) { in hrtick_start()
379 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
380 rq->hrtick_csd_pending = 1; in hrtick_start()
413 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
420 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
429 static void init_rq_hrtick(struct rq *rq) in init_rq_hrtick() argument
432 rq->hrtick_csd_pending = 0; in init_rq_hrtick()
434 rq->hrtick_csd.flags = 0; in init_rq_hrtick()
435 rq->hrtick_csd.func = __hrtick_start; in init_rq_hrtick()
436 rq->hrtick_csd.info = rq; in init_rq_hrtick()
439 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in init_rq_hrtick()
440 rq->hrtick_timer.function = hrtick; in init_rq_hrtick()
443 static inline void hrtick_clear(struct rq *rq) in hrtick_clear() argument
447 static inline void init_rq_hrtick(struct rq *rq) in init_rq_hrtick() argument
574 void resched_curr(struct rq *rq) in resched_curr() argument
576 struct task_struct *curr = rq->curr; in resched_curr()
579 lockdep_assert_held(&rq->lock); in resched_curr()
584 cpu = cpu_of(rq); in resched_curr()
600 struct rq *rq = cpu_rq(cpu); in resched_cpu() local
603 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) in resched_cpu()
605 resched_curr(rq); in resched_cpu()
606 raw_spin_unlock_irqrestore(&rq->lock, flags); in resched_cpu()
655 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() local
660 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
749 void sched_avg_update(struct rq *rq) in sched_avg_update() argument
753 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { in sched_avg_update()
759 asm("" : "+rm" (rq->age_stamp)); in sched_avg_update()
760 rq->age_stamp += period; in sched_avg_update()
761 rq->rt_avg /= 2; in sched_avg_update()
830 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
832 update_rq_clock(rq); in enqueue_task()
834 sched_info_queued(rq, p); in enqueue_task()
835 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
838 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
840 update_rq_clock(rq); in dequeue_task()
842 sched_info_dequeued(rq, p); in dequeue_task()
843 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
846 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
849 rq->nr_uninterruptible--; in activate_task()
851 enqueue_task(rq, p, flags); in activate_task()
854 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
857 rq->nr_uninterruptible++; in deactivate_task()
859 dequeue_task(rq, p, flags); in deactivate_task()
862 static void update_rq_clock_task(struct rq *rq, s64 delta) in update_rq_clock_task() argument
872 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
892 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
897 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task()
898 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
903 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
908 rq->clock_task += delta; in update_rq_clock_task()
912 sched_rt_avg_update(rq, irq_delta + steal); in update_rq_clock_task()
1012 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
1018 prev_class->switched_from(rq, p); in check_class_changed()
1020 p->sched_class->switched_to(rq, p); in check_class_changed()
1022 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
1025 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
1029 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr()
1030 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
1033 if (class == rq->curr->sched_class) in check_preempt_curr()
1036 resched_curr(rq); in check_preempt_curr()
1046 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
1047 rq_clock_skip_update(rq, true); in check_preempt_curr()
1070 static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu) in move_queued_task() argument
1072 lockdep_assert_held(&rq->lock); in move_queued_task()
1074 dequeue_task(rq, p, 0); in move_queued_task()
1077 raw_spin_unlock(&rq->lock); in move_queued_task()
1079 rq = cpu_rq(new_cpu); in move_queued_task()
1081 raw_spin_lock(&rq->lock); in move_queued_task()
1084 enqueue_task(rq, p, 0); in move_queued_task()
1085 check_preempt_curr(rq, p, 0); in move_queued_task()
1087 return rq; in move_queued_task()
1104 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) in __migrate_task() argument
1107 return rq; in __migrate_task()
1111 return rq; in __migrate_task()
1113 rq = move_queued_task(rq, p, dest_cpu); in __migrate_task()
1115 return rq; in __migrate_task()
1127 struct rq *rq = this_rq(); in migration_cpu_stop() local
1142 raw_spin_lock(&rq->lock); in migration_cpu_stop()
1148 if (task_rq(p) == rq && task_on_rq_queued(p)) in migration_cpu_stop()
1149 rq = __migrate_task(rq, p, arg->dest_cpu); in migration_cpu_stop()
1150 raw_spin_unlock(&rq->lock); in migration_cpu_stop()
1169 struct rq *rq = task_rq(p); in do_set_cpus_allowed() local
1175 running = task_current(rq, p); in do_set_cpus_allowed()
1182 lockdep_assert_held(&rq->lock); in do_set_cpus_allowed()
1183 dequeue_task(rq, p, DEQUEUE_SAVE); in do_set_cpus_allowed()
1186 put_prev_task(rq, p); in do_set_cpus_allowed()
1191 p->sched_class->set_curr_task(rq); in do_set_cpus_allowed()
1193 enqueue_task(rq, p, ENQUEUE_RESTORE); in do_set_cpus_allowed()
1209 struct rq *rq; in __set_cpus_allowed_ptr() local
1213 rq = task_rq_lock(p, &flags); in __set_cpus_allowed_ptr()
1239 if (task_running(rq, p) || p->state == TASK_WAKING) { in __set_cpus_allowed_ptr()
1242 task_rq_unlock(rq, p, &flags); in __set_cpus_allowed_ptr()
1243 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in __set_cpus_allowed_ptr()
1251 lockdep_unpin_lock(&rq->lock); in __set_cpus_allowed_ptr()
1252 rq = move_queued_task(rq, p, dest_cpu); in __set_cpus_allowed_ptr()
1253 lockdep_pin_lock(&rq->lock); in __set_cpus_allowed_ptr()
1256 task_rq_unlock(rq, p, &flags); in __set_cpus_allowed_ptr()
1308 struct rq *src_rq, *dst_rq; in __migrate_swap_task()
1335 struct rq *src_rq, *dst_rq; in migrate_swap_stop()
1432 struct rq *rq; in wait_task_inactive() local
1441 rq = task_rq(p); in wait_task_inactive()
1454 while (task_running(rq, p)) { in wait_task_inactive()
1465 rq = task_rq_lock(p, &flags); in wait_task_inactive()
1467 running = task_running(rq, p); in wait_task_inactive()
1472 task_rq_unlock(rq, p, &flags); in wait_task_inactive()
1667 struct rq *rq = this_rq(); in ttwu_stat() local
1673 schedstat_inc(rq, ttwu_local); in ttwu_stat()
1694 schedstat_inc(rq, ttwu_count); in ttwu_stat()
1703 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) in ttwu_activate() argument
1705 activate_task(rq, p, en_flags); in ttwu_activate()
1710 wq_worker_waking_up(p, cpu_of(rq)); in ttwu_activate()
1717 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in ttwu_do_wakeup() argument
1719 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
1729 lockdep_unpin_lock(&rq->lock); in ttwu_do_wakeup()
1730 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
1731 lockdep_pin_lock(&rq->lock); in ttwu_do_wakeup()
1734 if (rq->idle_stamp) { in ttwu_do_wakeup()
1735 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_wakeup()
1736 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_wakeup()
1738 update_avg(&rq->avg_idle, delta); in ttwu_do_wakeup()
1740 if (rq->avg_idle > max) in ttwu_do_wakeup()
1741 rq->avg_idle = max; in ttwu_do_wakeup()
1743 rq->idle_stamp = 0; in ttwu_do_wakeup()
1749 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) in ttwu_do_activate() argument
1751 lockdep_assert_held(&rq->lock); in ttwu_do_activate()
1755 rq->nr_uninterruptible--; in ttwu_do_activate()
1758 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); in ttwu_do_activate()
1759 ttwu_do_wakeup(rq, p, wake_flags); in ttwu_do_activate()
1770 struct rq *rq; in ttwu_remote() local
1773 rq = __task_rq_lock(p); in ttwu_remote()
1776 update_rq_clock(rq); in ttwu_remote()
1777 ttwu_do_wakeup(rq, p, wake_flags); in ttwu_remote()
1780 __task_rq_unlock(rq); in ttwu_remote()
1788 struct rq *rq = this_rq(); in sched_ttwu_pending() local
1789 struct llist_node *llist = llist_del_all(&rq->wake_list); in sched_ttwu_pending()
1796 raw_spin_lock_irqsave(&rq->lock, flags); in sched_ttwu_pending()
1797 lockdep_pin_lock(&rq->lock); in sched_ttwu_pending()
1802 ttwu_do_activate(rq, p, 0); in sched_ttwu_pending()
1805 lockdep_unpin_lock(&rq->lock); in sched_ttwu_pending()
1806 raw_spin_unlock_irqrestore(&rq->lock, flags); in sched_ttwu_pending()
1849 struct rq *rq = cpu_rq(cpu); in ttwu_queue_remote() local
1852 if (!set_nr_if_polling(rq->idle)) in ttwu_queue_remote()
1861 struct rq *rq = cpu_rq(cpu); in wake_up_if_idle() local
1866 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
1869 if (set_nr_if_polling(rq->idle)) { in wake_up_if_idle()
1872 raw_spin_lock_irqsave(&rq->lock, flags); in wake_up_if_idle()
1873 if (is_idle_task(rq->curr)) in wake_up_if_idle()
1876 raw_spin_unlock_irqrestore(&rq->lock, flags); in wake_up_if_idle()
1891 struct rq *rq = cpu_rq(cpu); in ttwu_queue() local
1901 raw_spin_lock(&rq->lock); in ttwu_queue()
1902 lockdep_pin_lock(&rq->lock); in ttwu_queue()
1903 ttwu_do_activate(rq, p, 0); in ttwu_queue()
1904 lockdep_unpin_lock(&rq->lock); in ttwu_queue()
1905 raw_spin_unlock(&rq->lock); in ttwu_queue()
2017 struct rq *rq = task_rq(p); in try_to_wake_up_local() local
2019 if (WARN_ON_ONCE(rq != this_rq()) || in try_to_wake_up_local()
2023 lockdep_assert_held(&rq->lock); in try_to_wake_up_local()
2032 lockdep_unpin_lock(&rq->lock); in try_to_wake_up_local()
2033 raw_spin_unlock(&rq->lock); in try_to_wake_up_local()
2035 raw_spin_lock(&rq->lock); in try_to_wake_up_local()
2036 lockdep_pin_lock(&rq->lock); in try_to_wake_up_local()
2045 ttwu_activate(rq, p, ENQUEUE_WAKEUP); in try_to_wake_up_local()
2047 ttwu_do_wakeup(rq, p, 0); in try_to_wake_up_local()
2375 struct rq *rq; in wake_up_new_task() local
2389 rq = __task_rq_lock(p); in wake_up_new_task()
2390 activate_task(rq, p, 0); in wake_up_new_task()
2393 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
2400 lockdep_unpin_lock(&rq->lock); in wake_up_new_task()
2401 p->sched_class->task_woken(rq, p); in wake_up_new_task()
2402 lockdep_pin_lock(&rq->lock); in wake_up_new_task()
2405 task_rq_unlock(rq, p, &flags); in wake_up_new_task()
2509 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
2512 sched_info_switch(rq, prev, next); in prepare_task_switch()
2515 prepare_lock_switch(rq, next); in prepare_task_switch()
2538 static struct rq *finish_task_switch(struct task_struct *prev) in finish_task_switch()
2539 __releases(rq->lock) in finish_task_switch()
2541 struct rq *rq = this_rq(); in finish_task_switch() local
2542 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
2561 rq->prev_mm = NULL; in finish_task_switch()
2577 finish_lock_switch(rq, prev); in finish_task_switch()
2596 return rq; in finish_task_switch()
2602 static void __balance_callback(struct rq *rq) in __balance_callback() argument
2605 void (*func)(struct rq *rq); in __balance_callback()
2608 raw_spin_lock_irqsave(&rq->lock, flags); in __balance_callback()
2609 head = rq->balance_callback; in __balance_callback()
2610 rq->balance_callback = NULL; in __balance_callback()
2612 func = (void (*)(struct rq *))head->func; in __balance_callback()
2617 func(rq); in __balance_callback()
2619 raw_spin_unlock_irqrestore(&rq->lock, flags); in __balance_callback()
2622 static inline void balance_callback(struct rq *rq) in balance_callback() argument
2624 if (unlikely(rq->balance_callback)) in balance_callback()
2625 __balance_callback(rq); in balance_callback()
2630 static inline void balance_callback(struct rq *rq) in balance_callback() argument
2641 __releases(rq->lock) in schedule_tail()
2643 struct rq *rq; in schedule_tail() local
2654 rq = finish_task_switch(prev); in schedule_tail()
2655 balance_callback(rq); in schedule_tail()
2665 static inline struct rq *
2666 context_switch(struct rq *rq, struct task_struct *prev, in context_switch() argument
2671 prepare_task_switch(rq, prev, next); in context_switch()
2691 rq->prev_mm = oldmm; in context_switch()
2699 lockdep_unpin_lock(&rq->lock); in context_switch()
2700 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); in context_switch()
2767 struct rq *this = cpu_rq(cpu); in nr_iowait_cpu()
2773 struct rq *rq = this_rq(); in get_iowait_load() local
2774 *nr_waiters = atomic_read(&rq->nr_iowait); in get_iowait_load()
2775 *load = rq->load.weight; in get_iowait_load()
2822 struct rq *rq; in task_sched_runtime() local
2841 rq = task_rq_lock(p, &flags); in task_sched_runtime()
2847 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
2848 update_rq_clock(rq); in task_sched_runtime()
2849 p->sched_class->update_curr(rq); in task_sched_runtime()
2852 task_rq_unlock(rq, p, &flags); in task_sched_runtime()
2864 struct rq *rq = cpu_rq(cpu); in scheduler_tick() local
2865 struct task_struct *curr = rq->curr; in scheduler_tick()
2869 raw_spin_lock(&rq->lock); in scheduler_tick()
2870 update_rq_clock(rq); in scheduler_tick()
2871 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
2872 update_cpu_load_active(rq); in scheduler_tick()
2873 calc_global_load_tick(rq); in scheduler_tick()
2874 raw_spin_unlock(&rq->lock); in scheduler_tick()
2879 rq->idle_balance = idle_cpu(cpu); in scheduler_tick()
2880 trigger_load_balance(rq); in scheduler_tick()
2882 rq_last_tick_reset(rq); in scheduler_tick()
2901 struct rq *rq = this_rq(); in scheduler_tick_max_deferment() local
2904 next = rq->last_sched_tick + HZ; in scheduler_tick_max_deferment()
3030 pick_next_task(struct rq *rq, struct task_struct *prev) in pick_next_task() argument
3040 rq->nr_running == rq->cfs.h_nr_running)) { in pick_next_task()
3041 p = fair_sched_class.pick_next_task(rq, prev); in pick_next_task()
3047 p = idle_sched_class.pick_next_task(rq, prev); in pick_next_task()
3054 p = class->pick_next_task(rq, prev); in pick_next_task()
3108 struct rq *rq; in __schedule() local
3112 rq = cpu_rq(cpu); in __schedule()
3114 prev = rq->curr; in __schedule()
3130 hrtick_clear(rq); in __schedule()
3138 raw_spin_lock_irq(&rq->lock); in __schedule()
3139 lockdep_pin_lock(&rq->lock); in __schedule()
3141 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ in __schedule()
3148 deactivate_task(rq, prev, DEQUEUE_SLEEP); in __schedule()
3168 update_rq_clock(rq); in __schedule()
3170 next = pick_next_task(rq, prev); in __schedule()
3173 rq->clock_skip_update = 0; in __schedule()
3176 rq->nr_switches++; in __schedule()
3177 rq->curr = next; in __schedule()
3181 rq = context_switch(rq, prev, next); /* unlocks the rq */ in __schedule()
3182 cpu = cpu_of(rq); in __schedule()
3184 lockdep_unpin_lock(&rq->lock); in __schedule()
3185 raw_spin_unlock_irq(&rq->lock); in __schedule()
3188 balance_callback(rq); in __schedule()
3369 struct rq *rq; in rt_mutex_setprio() local
3374 rq = __task_rq_lock(p); in rt_mutex_setprio()
3388 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
3389 WARN_ON(p != rq->curr); in rt_mutex_setprio()
3398 running = task_current(rq, p); in rt_mutex_setprio()
3400 dequeue_task(rq, p, DEQUEUE_SAVE); in rt_mutex_setprio()
3402 put_prev_task(rq, p); in rt_mutex_setprio()
3439 p->sched_class->set_curr_task(rq); in rt_mutex_setprio()
3441 enqueue_task(rq, p, enqueue_flag); in rt_mutex_setprio()
3443 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
3446 __task_rq_unlock(rq); in rt_mutex_setprio()
3448 balance_callback(rq); in rt_mutex_setprio()
3457 struct rq *rq; in set_user_nice() local
3465 rq = task_rq_lock(p, &flags); in set_user_nice()
3478 dequeue_task(rq, p, DEQUEUE_SAVE); in set_user_nice()
3487 enqueue_task(rq, p, ENQUEUE_RESTORE); in set_user_nice()
3492 if (delta < 0 || (delta > 0 && task_running(rq, p))) in set_user_nice()
3493 resched_curr(rq); in set_user_nice()
3496 task_rq_unlock(rq, p, &flags); in set_user_nice()
3570 struct rq *rq = cpu_rq(cpu); in idle_cpu() local
3572 if (rq->curr != rq->idle) in idle_cpu()
3575 if (rq->nr_running) in idle_cpu()
3579 if (!llist_empty(&rq->wake_list)) in idle_cpu()
3680 static void __setscheduler(struct rq *rq, struct task_struct *p, in __setscheduler() argument
3795 struct rq *rq; in __sched_setscheduler() local
3891 rq = task_rq_lock(p, &flags); in __sched_setscheduler()
3896 if (p == rq->stop) { in __sched_setscheduler()
3897 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3914 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3928 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3934 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
3942 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
3943 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3953 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3963 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3981 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3987 running = task_current(rq, p); in __sched_setscheduler()
3989 dequeue_task(rq, p, DEQUEUE_SAVE); in __sched_setscheduler()
3991 put_prev_task(rq, p); in __sched_setscheduler()
3994 __setscheduler(rq, p, attr, pi); in __sched_setscheduler()
3997 p->sched_class->set_curr_task(rq); in __sched_setscheduler()
4007 enqueue_task(rq, p, enqueue_flags); in __sched_setscheduler()
4010 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
4012 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
4020 balance_callback(rq); in __sched_setscheduler()
4597 struct rq *rq = this_rq_lock(); in SYSCALL_DEFINE0() local
4599 schedstat_inc(rq, yld_count); in SYSCALL_DEFINE0()
4600 current->sched_class->yield_task(rq); in SYSCALL_DEFINE0()
4606 __release(rq->lock); in SYSCALL_DEFINE0()
4607 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); in SYSCALL_DEFINE0()
4608 do_raw_spin_unlock(&rq->lock); in SYSCALL_DEFINE0()
4715 struct rq *rq, *p_rq; in yield_to() local
4720 rq = this_rq(); in yield_to()
4728 if (rq->nr_running == 1 && p_rq->nr_running == 1) { in yield_to()
4733 double_rq_lock(rq, p_rq); in yield_to()
4735 double_rq_unlock(rq, p_rq); in yield_to()
4748 yielded = curr->sched_class->yield_to_task(rq, p, preempt); in yield_to()
4750 schedstat_inc(rq, yld_count); in yield_to()
4755 if (preempt && rq != p_rq) in yield_to()
4760 double_rq_unlock(rq, p_rq); in yield_to()
4778 struct rq *rq; in io_schedule_timeout() local
4785 rq = raw_rq(); in io_schedule_timeout()
4786 atomic_inc(&rq->nr_iowait); in io_schedule_timeout()
4789 atomic_dec(&rq->nr_iowait); in io_schedule_timeout()
4866 struct rq *rq; in SYSCALL_DEFINE2() local
4883 rq = task_rq_lock(p, &flags); in SYSCALL_DEFINE2()
4886 time_slice = p->sched_class->get_rr_interval(rq, p); in SYSCALL_DEFINE2()
4887 task_rq_unlock(rq, p, &flags); in SYSCALL_DEFINE2()
4988 struct rq *rq = cpu_rq(cpu); in init_idle() local
4992 raw_spin_lock(&rq->lock); in init_idle()
5021 rq->curr = rq->idle = idle; in init_idle()
5026 raw_spin_unlock(&rq->lock); in init_idle()
5148 struct rq *rq; in sched_setnuma() local
5152 rq = task_rq_lock(p, &flags); in sched_setnuma()
5154 running = task_current(rq, p); in sched_setnuma()
5157 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
5159 put_prev_task(rq, p); in sched_setnuma()
5164 p->sched_class->set_curr_task(rq); in sched_setnuma()
5166 enqueue_task(rq, p, ENQUEUE_RESTORE); in sched_setnuma()
5167 task_rq_unlock(rq, p, &flags); in sched_setnuma()
5196 static void calc_load_migrate(struct rq *rq) in calc_load_migrate() argument
5198 long delta = calc_load_fold_active(rq); in calc_load_migrate()
5203 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) in put_prev_task_fake() argument
5227 static void migrate_tasks(struct rq *dead_rq) in migrate_tasks()
5229 struct rq *rq = dead_rq; in migrate_tasks() local
5230 struct task_struct *next, *stop = rq->stop; in migrate_tasks()
5242 rq->stop = NULL; in migrate_tasks()
5249 update_rq_clock(rq); in migrate_tasks()
5256 if (rq->nr_running == 1) in migrate_tasks()
5262 lockdep_pin_lock(&rq->lock); in migrate_tasks()
5263 next = pick_next_task(rq, &fake_task); in migrate_tasks()
5265 next->sched_class->put_prev_task(rq, next); in migrate_tasks()
5276 lockdep_unpin_lock(&rq->lock); in migrate_tasks()
5277 raw_spin_unlock(&rq->lock); in migrate_tasks()
5279 raw_spin_lock(&rq->lock); in migrate_tasks()
5286 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { in migrate_tasks()
5294 rq = __migrate_task(rq, next, dest_cpu); in migrate_tasks()
5295 if (rq != dead_rq) { in migrate_tasks()
5296 raw_spin_unlock(&rq->lock); in migrate_tasks()
5297 rq = dead_rq; in migrate_tasks()
5298 raw_spin_lock(&rq->lock); in migrate_tasks()
5303 rq->stop = stop; in migrate_tasks()
5484 static void set_rq_online(struct rq *rq) in set_rq_online() argument
5486 if (!rq->online) { in set_rq_online()
5489 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
5490 rq->online = 1; in set_rq_online()
5494 class->rq_online(rq); in set_rq_online()
5499 static void set_rq_offline(struct rq *rq) in set_rq_offline() argument
5501 if (rq->online) { in set_rq_offline()
5506 class->rq_offline(rq); in set_rq_offline()
5509 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
5510 rq->online = 0; in set_rq_offline()
5523 struct rq *rq = cpu_rq(cpu); in migration_call() local
5528 rq->calc_load_update = calc_load_update; in migration_call()
5529 account_reset_rq(rq); in migration_call()
5534 raw_spin_lock_irqsave(&rq->lock, flags); in migration_call()
5535 if (rq->rd) { in migration_call()
5536 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in migration_call()
5538 set_rq_online(rq); in migration_call()
5540 raw_spin_unlock_irqrestore(&rq->lock, flags); in migration_call()
5547 raw_spin_lock_irqsave(&rq->lock, flags); in migration_call()
5548 if (rq->rd) { in migration_call()
5549 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in migration_call()
5550 set_rq_offline(rq); in migration_call()
5552 migrate_tasks(rq); in migration_call()
5553 BUG_ON(rq->nr_running != 1); /* the migration thread */ in migration_call()
5554 raw_spin_unlock_irqrestore(&rq->lock, flags); in migration_call()
5558 calc_load_migrate(rq); in migration_call()
5581 struct rq *rq = cpu_rq(cpu); in set_cpu_rq_start_time() local
5582 rq->age_stamp = sched_clock_cpu(cpu); in set_cpu_rq_start_time()
5835 static void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument
5840 raw_spin_lock_irqsave(&rq->lock, flags); in rq_attach_root()
5842 if (rq->rd) { in rq_attach_root()
5843 old_rd = rq->rd; in rq_attach_root()
5845 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
5846 set_rq_offline(rq); in rq_attach_root()
5848 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
5860 rq->rd = rd; in rq_attach_root()
5862 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
5863 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
5864 set_rq_online(rq); in rq_attach_root()
5866 raw_spin_unlock_irqrestore(&rq->lock, flags); in rq_attach_root()
6030 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain() local
6065 rq_attach_root(rq, rd); in cpu_attach_domain()
6066 tmp = rq->sd; in cpu_attach_domain()
6067 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
7425 struct rq *rq; in sched_init() local
7427 rq = cpu_rq(i); in sched_init()
7428 raw_spin_lock_init(&rq->lock); in sched_init()
7429 rq->nr_running = 0; in sched_init()
7430 rq->calc_load_active = 0; in sched_init()
7431 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
7432 init_cfs_rq(&rq->cfs); in sched_init()
7433 init_rt_rq(&rq->rt); in sched_init()
7434 init_dl_rq(&rq->dl); in sched_init()
7437 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
7458 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
7461 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; in sched_init()
7463 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
7467 rq->cpu_load[j] = 0; in sched_init()
7469 rq->last_load_update_tick = jiffies; in sched_init()
7472 rq->sd = NULL; in sched_init()
7473 rq->rd = NULL; in sched_init()
7474 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()
7475 rq->balance_callback = NULL; in sched_init()
7476 rq->active_balance = 0; in sched_init()
7477 rq->next_balance = jiffies; in sched_init()
7478 rq->push_cpu = 0; in sched_init()
7479 rq->cpu = i; in sched_init()
7480 rq->online = 0; in sched_init()
7481 rq->idle_stamp = 0; in sched_init()
7482 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
7483 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
7485 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
7487 rq_attach_root(rq, &def_root_domain); in sched_init()
7489 rq->nohz_flags = 0; in sched_init()
7492 rq->last_sched_tick = 0; in sched_init()
7495 init_rq_hrtick(rq); in sched_init()
7496 atomic_set(&rq->nr_iowait, 0); in sched_init()
7780 struct rq *rq; in sched_move_task() local
7782 rq = task_rq_lock(tsk, &flags); in sched_move_task()
7784 running = task_current(rq, tsk); in sched_move_task()
7788 dequeue_task(rq, tsk, DEQUEUE_SAVE); in sched_move_task()
7790 put_prev_task(rq, tsk); in sched_move_task()
7810 tsk->sched_class->set_curr_task(rq); in sched_move_task()
7812 enqueue_task(rq, tsk, ENQUEUE_RESTORE); in sched_move_task()
7814 task_rq_unlock(rq, tsk, &flags); in sched_move_task()
8344 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() local
8346 raw_spin_lock_irq(&rq->lock); in tg_set_cfs_bandwidth()
8352 raw_spin_unlock_irq(&rq->lock); in tg_set_cfs_bandwidth()