Lines Matching refs:p

499 static bool set_nr_and_not_polling(struct task_struct *p)  in set_nr_and_not_polling()  argument
501 struct thread_info *ti = task_thread_info(p); in set_nr_and_not_polling()
511 static bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
513 struct thread_info *ti = task_thread_info(p); in set_nr_if_polling()
530 static bool set_nr_and_not_polling(struct task_struct *p) in set_nr_and_not_polling() argument
532 set_tsk_need_resched(p); in set_nr_and_not_polling()
537 static bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
787 static void set_load_weight(struct task_struct *p) in set_load_weight() argument
789 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
790 struct load_weight *load = &p->se.load; in set_load_weight()
795 if (p->policy == SCHED_IDLE) { in set_load_weight()
805 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
808 sched_info_queued(rq, p); in enqueue_task()
809 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
812 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
815 sched_info_dequeued(rq, p); in dequeue_task()
816 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
819 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
821 if (task_contributes_to_load(p)) in activate_task()
824 enqueue_task(rq, p, flags); in activate_task()
827 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
829 if (task_contributes_to_load(p)) in deactivate_task()
832 dequeue_task(rq, p, flags); in deactivate_task()
922 static inline int __normal_prio(struct task_struct *p) in __normal_prio() argument
924 return p->static_prio; in __normal_prio()
934 static inline int normal_prio(struct task_struct *p) in normal_prio() argument
938 if (task_has_dl_policy(p)) in normal_prio()
940 else if (task_has_rt_policy(p)) in normal_prio()
941 prio = MAX_RT_PRIO-1 - p->rt_priority; in normal_prio()
943 prio = __normal_prio(p); in normal_prio()
954 static int effective_prio(struct task_struct *p) in effective_prio() argument
956 p->normal_prio = normal_prio(p); in effective_prio()
962 if (!rt_prio(p->prio)) in effective_prio()
963 return p->normal_prio; in effective_prio()
964 return p->prio; in effective_prio()
973 inline int task_curr(const struct task_struct *p) in task_curr() argument
975 return cpu_curr(task_cpu(p)) == p; in task_curr()
981 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
985 if (prev_class != p->sched_class) { in check_class_changed()
987 prev_class->switched_from(rq, p); in check_class_changed()
989 p->sched_class->switched_to(rq, p); in check_class_changed()
990 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
991 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
994 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
998 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr()
999 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
1004 if (class == p->sched_class) { in check_preempt_curr()
1020 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) in set_task_cpu() argument
1027 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && in set_task_cpu()
1028 !p->on_rq); in set_task_cpu()
1041 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
1042 lockdep_is_held(&task_rq(p)->lock))); in set_task_cpu()
1046 trace_sched_migrate_task(p, new_cpu); in set_task_cpu()
1048 if (task_cpu(p) != new_cpu) { in set_task_cpu()
1049 if (p->sched_class->migrate_task_rq) in set_task_cpu()
1050 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
1051 p->se.nr_migrations++; in set_task_cpu()
1055 __set_task_cpu(p, new_cpu); in set_task_cpu()
1058 static void __migrate_swap_task(struct task_struct *p, int cpu) in __migrate_swap_task() argument
1060 if (task_on_rq_queued(p)) { in __migrate_swap_task()
1063 src_rq = task_rq(p); in __migrate_swap_task()
1066 deactivate_task(src_rq, p, 0); in __migrate_swap_task()
1067 set_task_cpu(p, cpu); in __migrate_swap_task()
1068 activate_task(dst_rq, p, 0); in __migrate_swap_task()
1069 check_preempt_curr(dst_rq, p, 0); in __migrate_swap_task()
1076 p->wake_cpu = cpu; in __migrate_swap_task()
1125 int migrate_swap(struct task_struct *cur, struct task_struct *p) in migrate_swap() argument
1133 .dst_task = p, in migrate_swap()
1134 .dst_cpu = task_cpu(p), in migrate_swap()
1153 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); in migrate_swap()
1183 unsigned long wait_task_inactive(struct task_struct *p, long match_state) in wait_task_inactive() argument
1197 rq = task_rq(p); in wait_task_inactive()
1210 while (task_running(rq, p)) { in wait_task_inactive()
1211 if (match_state && unlikely(p->state != match_state)) in wait_task_inactive()
1221 rq = task_rq_lock(p, &flags); in wait_task_inactive()
1222 trace_sched_wait_task(p); in wait_task_inactive()
1223 running = task_running(rq, p); in wait_task_inactive()
1224 queued = task_on_rq_queued(p); in wait_task_inactive()
1226 if (!match_state || p->state == match_state) in wait_task_inactive()
1227 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
1228 task_rq_unlock(rq, p, &flags); in wait_task_inactive()
1288 void kick_process(struct task_struct *p) in kick_process() argument
1293 cpu = task_cpu(p); in kick_process()
1294 if ((cpu != smp_processor_id()) && task_curr(p)) in kick_process()
1305 static int select_fallback_rq(int cpu, struct task_struct *p) in select_fallback_rq() argument
1326 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) in select_fallback_rq()
1333 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { in select_fallback_rq()
1344 cpuset_cpus_allowed_fallback(p); in select_fallback_rq()
1349 do_set_cpus_allowed(p, cpu_possible_mask); in select_fallback_rq()
1366 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
1368 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
1379 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) in select_task_rq() argument
1381 if (p->nr_cpus_allowed > 1) in select_task_rq()
1382 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); in select_task_rq()
1394 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || in select_task_rq()
1396 cpu = select_fallback_rq(task_cpu(p), p); in select_task_rq()
1409 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) in ttwu_stat() argument
1419 schedstat_inc(p, se.statistics.nr_wakeups_local); in ttwu_stat()
1423 schedstat_inc(p, se.statistics.nr_wakeups_remote); in ttwu_stat()
1435 schedstat_inc(p, se.statistics.nr_wakeups_migrate); in ttwu_stat()
1440 schedstat_inc(p, se.statistics.nr_wakeups); in ttwu_stat()
1443 schedstat_inc(p, se.statistics.nr_wakeups_sync); in ttwu_stat()
1448 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) in ttwu_activate() argument
1450 activate_task(rq, p, en_flags); in ttwu_activate()
1451 p->on_rq = TASK_ON_RQ_QUEUED; in ttwu_activate()
1454 if (p->flags & PF_WQ_WORKER) in ttwu_activate()
1455 wq_worker_waking_up(p, cpu_of(rq)); in ttwu_activate()
1462 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in ttwu_do_wakeup() argument
1464 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
1465 trace_sched_wakeup(p, true); in ttwu_do_wakeup()
1467 p->state = TASK_RUNNING; in ttwu_do_wakeup()
1469 if (p->sched_class->task_woken) in ttwu_do_wakeup()
1470 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
1487 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) in ttwu_do_activate() argument
1490 if (p->sched_contributes_to_load) in ttwu_do_activate()
1494 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); in ttwu_do_activate()
1495 ttwu_do_wakeup(rq, p, wake_flags); in ttwu_do_activate()
1504 static int ttwu_remote(struct task_struct *p, int wake_flags) in ttwu_remote() argument
1509 rq = __task_rq_lock(p); in ttwu_remote()
1510 if (task_on_rq_queued(p)) { in ttwu_remote()
1513 ttwu_do_wakeup(rq, p, wake_flags); in ttwu_remote()
1526 struct task_struct *p; in sched_ttwu_pending() local
1535 p = llist_entry(llist, struct task_struct, wake_entry); in sched_ttwu_pending()
1537 ttwu_do_activate(rq, p, 0); in sched_ttwu_pending()
1581 static void ttwu_queue_remote(struct task_struct *p, int cpu) in ttwu_queue_remote() argument
1585 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { in ttwu_queue_remote()
1623 static void ttwu_queue(struct task_struct *p, int cpu) in ttwu_queue() argument
1630 ttwu_queue_remote(p, cpu); in ttwu_queue()
1636 ttwu_do_activate(rq, p, 0); in ttwu_queue()
1656 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) in try_to_wake_up() argument
1668 raw_spin_lock_irqsave(&p->pi_lock, flags); in try_to_wake_up()
1669 if (!(p->state & state)) in try_to_wake_up()
1673 cpu = task_cpu(p); in try_to_wake_up()
1675 if (p->on_rq && ttwu_remote(p, wake_flags)) in try_to_wake_up()
1683 while (p->on_cpu) in try_to_wake_up()
1690 p->sched_contributes_to_load = !!task_contributes_to_load(p); in try_to_wake_up()
1691 p->state = TASK_WAKING; in try_to_wake_up()
1693 if (p->sched_class->task_waking) in try_to_wake_up()
1694 p->sched_class->task_waking(p); in try_to_wake_up()
1696 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); in try_to_wake_up()
1697 if (task_cpu(p) != cpu) { in try_to_wake_up()
1699 set_task_cpu(p, cpu); in try_to_wake_up()
1703 ttwu_queue(p, cpu); in try_to_wake_up()
1705 ttwu_stat(p, cpu, wake_flags); in try_to_wake_up()
1707 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in try_to_wake_up()
1720 static void try_to_wake_up_local(struct task_struct *p) in try_to_wake_up_local() argument
1722 struct rq *rq = task_rq(p); in try_to_wake_up_local()
1725 WARN_ON_ONCE(p == current)) in try_to_wake_up_local()
1730 if (!raw_spin_trylock(&p->pi_lock)) { in try_to_wake_up_local()
1732 raw_spin_lock(&p->pi_lock); in try_to_wake_up_local()
1736 if (!(p->state & TASK_NORMAL)) in try_to_wake_up_local()
1739 if (!task_on_rq_queued(p)) in try_to_wake_up_local()
1740 ttwu_activate(rq, p, ENQUEUE_WAKEUP); in try_to_wake_up_local()
1742 ttwu_do_wakeup(rq, p, 0); in try_to_wake_up_local()
1743 ttwu_stat(p, smp_processor_id(), 0); in try_to_wake_up_local()
1745 raw_spin_unlock(&p->pi_lock); in try_to_wake_up_local()
1760 int wake_up_process(struct task_struct *p) in wake_up_process() argument
1762 WARN_ON(task_is_stopped_or_traced(p)); in wake_up_process()
1763 return try_to_wake_up(p, TASK_NORMAL, 0); in wake_up_process()
1767 int wake_up_state(struct task_struct *p, unsigned int state) in wake_up_state() argument
1769 return try_to_wake_up(p, state, 0); in wake_up_state()
1775 void __dl_clear_params(struct task_struct *p) in __dl_clear_params() argument
1777 struct sched_dl_entity *dl_se = &p->dl; in __dl_clear_params()
1796 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) in __sched_fork() argument
1798 p->on_rq = 0; in __sched_fork()
1800 p->se.on_rq = 0; in __sched_fork()
1801 p->se.exec_start = 0; in __sched_fork()
1802 p->se.sum_exec_runtime = 0; in __sched_fork()
1803 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
1804 p->se.nr_migrations = 0; in __sched_fork()
1805 p->se.vruntime = 0; in __sched_fork()
1807 p->se.avg.decay_count = 0; in __sched_fork()
1809 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
1812 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); in __sched_fork()
1815 RB_CLEAR_NODE(&p->dl.rb_node); in __sched_fork()
1816 init_dl_task_timer(&p->dl); in __sched_fork()
1817 __dl_clear_params(p); in __sched_fork()
1819 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
1822 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
1826 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { in __sched_fork()
1827 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); in __sched_fork()
1828 p->mm->numa_scan_seq = 0; in __sched_fork()
1832 p->numa_preferred_nid = current->numa_preferred_nid; in __sched_fork()
1834 p->numa_preferred_nid = -1; in __sched_fork()
1836 p->node_stamp = 0ULL; in __sched_fork()
1837 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; in __sched_fork()
1838 p->numa_scan_period = sysctl_numa_balancing_scan_delay; in __sched_fork()
1839 p->numa_work.next = &p->numa_work; in __sched_fork()
1840 p->numa_faults = NULL; in __sched_fork()
1841 p->last_task_numa_placement = 0; in __sched_fork()
1842 p->last_sum_exec_runtime = 0; in __sched_fork()
1844 p->numa_group = NULL; in __sched_fork()
1892 int sched_fork(unsigned long clone_flags, struct task_struct *p) in sched_fork() argument
1897 __sched_fork(clone_flags, p); in sched_fork()
1903 p->state = TASK_RUNNING; in sched_fork()
1908 p->prio = current->normal_prio; in sched_fork()
1913 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
1914 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in sched_fork()
1915 p->policy = SCHED_NORMAL; in sched_fork()
1916 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
1917 p->rt_priority = 0; in sched_fork()
1918 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
1919 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
1921 p->prio = p->normal_prio = __normal_prio(p); in sched_fork()
1922 set_load_weight(p); in sched_fork()
1928 p->sched_reset_on_fork = 0; in sched_fork()
1931 if (dl_prio(p->prio)) { in sched_fork()
1934 } else if (rt_prio(p->prio)) { in sched_fork()
1935 p->sched_class = &rt_sched_class; in sched_fork()
1937 p->sched_class = &fair_sched_class; in sched_fork()
1940 if (p->sched_class->task_fork) in sched_fork()
1941 p->sched_class->task_fork(p); in sched_fork()
1950 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_fork()
1951 set_task_cpu(p, cpu); in sched_fork()
1952 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_fork()
1956 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
1959 p->on_cpu = 0; in sched_fork()
1961 init_task_preempt_count(p); in sched_fork()
1963 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
1964 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
2030 static int dl_overflow(struct task_struct *p, int policy, in dl_overflow() argument
2034 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in dl_overflow()
2040 if (new_bw == p->dl.dl_bw) in dl_overflow()
2049 cpus = dl_bw_cpus(task_cpu(p)); in dl_overflow()
2050 if (dl_policy(policy) && !task_has_dl_policy(p) && in dl_overflow()
2054 } else if (dl_policy(policy) && task_has_dl_policy(p) && in dl_overflow()
2055 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { in dl_overflow()
2056 __dl_clear(dl_b, p->dl.dl_bw); in dl_overflow()
2059 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { in dl_overflow()
2060 __dl_clear(dl_b, p->dl.dl_bw); in dl_overflow()
2077 void wake_up_new_task(struct task_struct *p) in wake_up_new_task() argument
2082 raw_spin_lock_irqsave(&p->pi_lock, flags); in wake_up_new_task()
2089 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); in wake_up_new_task()
2093 init_task_runnable_average(p); in wake_up_new_task()
2094 rq = __task_rq_lock(p); in wake_up_new_task()
2095 activate_task(rq, p, 0); in wake_up_new_task()
2096 p->on_rq = TASK_ON_RQ_QUEUED; in wake_up_new_task()
2097 trace_sched_wakeup_new(p, true); in wake_up_new_task()
2098 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
2100 if (p->sched_class->task_woken) in wake_up_new_task()
2101 p->sched_class->task_woken(rq, p); in wake_up_new_task()
2103 task_rq_unlock(rq, p, &flags); in wake_up_new_task()
2420 struct task_struct *p = current; in sched_exec() local
2424 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_exec()
2425 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); in sched_exec()
2430 struct migration_arg arg = { p, dest_cpu }; in sched_exec()
2432 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_exec()
2433 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); in sched_exec()
2437 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_exec()
2453 unsigned long long task_sched_runtime(struct task_struct *p) in task_sched_runtime() argument
2471 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
2472 return p->se.sum_exec_runtime; in task_sched_runtime()
2475 rq = task_rq_lock(p, &flags); in task_sched_runtime()
2481 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
2483 p->sched_class->update_curr(rq); in task_sched_runtime()
2485 ns = p->se.sum_exec_runtime; in task_sched_runtime()
2486 task_rq_unlock(rq, p, &flags); in task_sched_runtime()
2667 struct task_struct *p; in pick_next_task() local
2675 p = fair_sched_class.pick_next_task(rq, prev); in pick_next_task()
2676 if (unlikely(p == RETRY_TASK)) in pick_next_task()
2680 if (unlikely(!p)) in pick_next_task()
2681 p = idle_sched_class.pick_next_task(rq, prev); in pick_next_task()
2683 return p; in pick_next_task()
2688 p = class->pick_next_task(rq, prev); in pick_next_task()
2689 if (p) { in pick_next_task()
2690 if (unlikely(p == RETRY_TASK)) in pick_next_task()
2692 return p; in pick_next_task()
2998 void rt_mutex_setprio(struct task_struct *p, int prio) in rt_mutex_setprio() argument
3006 rq = __task_rq_lock(p); in rt_mutex_setprio()
3020 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
3021 WARN_ON(p != rq->curr); in rt_mutex_setprio()
3022 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
3026 trace_sched_pi_setprio(p, prio); in rt_mutex_setprio()
3027 oldprio = p->prio; in rt_mutex_setprio()
3028 prev_class = p->sched_class; in rt_mutex_setprio()
3029 queued = task_on_rq_queued(p); in rt_mutex_setprio()
3030 running = task_current(rq, p); in rt_mutex_setprio()
3032 dequeue_task(rq, p, 0); in rt_mutex_setprio()
3034 put_prev_task(rq, p); in rt_mutex_setprio()
3046 struct task_struct *pi_task = rt_mutex_get_top_task(p); in rt_mutex_setprio()
3047 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
3048 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
3049 p->dl.dl_boosted = 1; in rt_mutex_setprio()
3050 p->dl.dl_throttled = 0; in rt_mutex_setprio()
3053 p->dl.dl_boosted = 0; in rt_mutex_setprio()
3054 p->sched_class = &dl_sched_class; in rt_mutex_setprio()
3057 p->dl.dl_boosted = 0; in rt_mutex_setprio()
3060 p->sched_class = &rt_sched_class; in rt_mutex_setprio()
3063 p->dl.dl_boosted = 0; in rt_mutex_setprio()
3065 p->rt.timeout = 0; in rt_mutex_setprio()
3066 p->sched_class = &fair_sched_class; in rt_mutex_setprio()
3069 p->prio = prio; in rt_mutex_setprio()
3072 p->sched_class->set_curr_task(rq); in rt_mutex_setprio()
3074 enqueue_task(rq, p, enqueue_flag); in rt_mutex_setprio()
3076 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
3082 void set_user_nice(struct task_struct *p, long nice) in set_user_nice() argument
3088 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) in set_user_nice()
3094 rq = task_rq_lock(p, &flags); in set_user_nice()
3101 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in set_user_nice()
3102 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
3105 queued = task_on_rq_queued(p); in set_user_nice()
3107 dequeue_task(rq, p, 0); in set_user_nice()
3109 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
3110 set_load_weight(p); in set_user_nice()
3111 old_prio = p->prio; in set_user_nice()
3112 p->prio = effective_prio(p); in set_user_nice()
3113 delta = p->prio - old_prio; in set_user_nice()
3116 enqueue_task(rq, p, 0); in set_user_nice()
3121 if (delta < 0 || (delta > 0 && task_running(rq, p))) in set_user_nice()
3125 task_rq_unlock(rq, p, &flags); in set_user_nice()
3134 int can_nice(const struct task_struct *p, const int nice) in can_nice() argument
3139 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || in can_nice()
3186 int task_prio(const struct task_struct *p) in task_prio() argument
3188 return p->prio - MAX_RT_PRIO; in task_prio()
3246 __setparam_dl(struct task_struct *p, const struct sched_attr *attr) in __setparam_dl() argument
3248 struct sched_dl_entity *dl_se = &p->dl; in __setparam_dl()
3283 static void __setscheduler_params(struct task_struct *p, in __setscheduler_params() argument
3289 policy = p->policy; in __setscheduler_params()
3291 p->policy = policy; in __setscheduler_params()
3294 __setparam_dl(p, attr); in __setscheduler_params()
3296 p->static_prio = NICE_TO_PRIO(attr->sched_nice); in __setscheduler_params()
3303 p->rt_priority = attr->sched_priority; in __setscheduler_params()
3304 p->normal_prio = normal_prio(p); in __setscheduler_params()
3305 set_load_weight(p); in __setscheduler_params()
3309 static void __setscheduler(struct rq *rq, struct task_struct *p, in __setscheduler() argument
3312 __setscheduler_params(p, attr); in __setscheduler()
3319 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); in __setscheduler()
3321 p->prio = normal_prio(p); in __setscheduler()
3323 if (dl_prio(p->prio)) in __setscheduler()
3324 p->sched_class = &dl_sched_class; in __setscheduler()
3325 else if (rt_prio(p->prio)) in __setscheduler()
3326 p->sched_class = &rt_sched_class; in __setscheduler()
3328 p->sched_class = &fair_sched_class; in __setscheduler()
3332 __getparam_dl(struct task_struct *p, struct sched_attr *attr) in __getparam_dl() argument
3334 struct sched_dl_entity *dl_se = &p->dl; in __getparam_dl()
3336 attr->sched_priority = p->rt_priority; in __getparam_dl()
3387 static bool check_same_owner(struct task_struct *p) in check_same_owner() argument
3393 pcred = __task_cred(p); in check_same_owner()
3400 static bool dl_param_changed(struct task_struct *p, in dl_param_changed() argument
3403 struct sched_dl_entity *dl_se = &p->dl; in dl_param_changed()
3414 static int __sched_setscheduler(struct task_struct *p, in __sched_setscheduler() argument
3432 reset_on_fork = p->sched_reset_on_fork; in __sched_setscheduler()
3433 policy = oldpolicy = p->policy; in __sched_setscheduler()
3452 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || in __sched_setscheduler()
3453 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) in __sched_setscheduler()
3464 if (attr->sched_nice < task_nice(p) && in __sched_setscheduler()
3465 !can_nice(p, attr->sched_nice)) in __sched_setscheduler()
3471 task_rlimit(p, RLIMIT_RTPRIO); in __sched_setscheduler()
3474 if (policy != p->policy && !rlim_rtprio) in __sched_setscheduler()
3478 if (attr->sched_priority > p->rt_priority && in __sched_setscheduler()
3496 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { in __sched_setscheduler()
3497 if (!can_nice(p, task_nice(p))) in __sched_setscheduler()
3502 if (!check_same_owner(p)) in __sched_setscheduler()
3506 if (p->sched_reset_on_fork && !reset_on_fork) in __sched_setscheduler()
3511 retval = security_task_setscheduler(p); in __sched_setscheduler()
3523 rq = task_rq_lock(p, &flags); in __sched_setscheduler()
3528 if (p == rq->stop) { in __sched_setscheduler()
3529 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3537 if (unlikely(policy == p->policy)) { in __sched_setscheduler()
3538 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) in __sched_setscheduler()
3540 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) in __sched_setscheduler()
3542 if (dl_policy(policy) && dl_param_changed(p, attr)) in __sched_setscheduler()
3545 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
3546 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3558 task_group(p)->rt_bandwidth.rt_runtime == 0 && in __sched_setscheduler()
3559 !task_group_is_autogroup(task_group(p))) { in __sched_setscheduler()
3560 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3573 if (!cpumask_subset(span, &p->cpus_allowed) || in __sched_setscheduler()
3575 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3583 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { in __sched_setscheduler()
3585 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3594 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { in __sched_setscheduler()
3595 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3599 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
3600 oldprio = p->prio; in __sched_setscheduler()
3609 new_effective_prio = rt_mutex_get_effective_prio(p, newprio); in __sched_setscheduler()
3611 __setscheduler_params(p, attr); in __sched_setscheduler()
3612 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3616 queued = task_on_rq_queued(p); in __sched_setscheduler()
3617 running = task_current(rq, p); in __sched_setscheduler()
3619 dequeue_task(rq, p, 0); in __sched_setscheduler()
3621 put_prev_task(rq, p); in __sched_setscheduler()
3623 prev_class = p->sched_class; in __sched_setscheduler()
3624 __setscheduler(rq, p, attr, true); in __sched_setscheduler()
3627 p->sched_class->set_curr_task(rq); in __sched_setscheduler()
3633 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); in __sched_setscheduler()
3636 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
3637 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3639 rt_mutex_adjust_pi(p); in __sched_setscheduler()
3644 static int _sched_setscheduler(struct task_struct *p, int policy, in _sched_setscheduler() argument
3650 .sched_nice = PRIO_TO_NICE(p->static_prio), in _sched_setscheduler()
3660 return __sched_setscheduler(p, &attr, check); in _sched_setscheduler()
3672 int sched_setscheduler(struct task_struct *p, int policy, in sched_setscheduler() argument
3675 return _sched_setscheduler(p, policy, param, true); in sched_setscheduler()
3679 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) in sched_setattr() argument
3681 return __sched_setscheduler(p, attr, true); in sched_setattr()
3698 int sched_setscheduler_nocheck(struct task_struct *p, int policy, in sched_setscheduler_nocheck() argument
3701 return _sched_setscheduler(p, policy, param, false); in sched_setscheduler_nocheck()
3708 struct task_struct *p; in do_sched_setscheduler() local
3718 p = find_process_by_pid(pid); in do_sched_setscheduler()
3719 if (p != NULL) in do_sched_setscheduler()
3720 retval = sched_setscheduler(p, policy, &lparam); in do_sched_setscheduler()
3837 struct task_struct *p; in SYSCALL_DEFINE3() local
3852 p = find_process_by_pid(pid); in SYSCALL_DEFINE3()
3853 if (p != NULL) in SYSCALL_DEFINE3()
3854 retval = sched_setattr(p, &attr); in SYSCALL_DEFINE3()
3869 struct task_struct *p; in SYSCALL_DEFINE1() local
3877 p = find_process_by_pid(pid); in SYSCALL_DEFINE1()
3878 if (p) { in SYSCALL_DEFINE1()
3879 retval = security_task_getscheduler(p); in SYSCALL_DEFINE1()
3881 retval = p->policy in SYSCALL_DEFINE1()
3882 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); in SYSCALL_DEFINE1()
3899 struct task_struct *p; in SYSCALL_DEFINE2() local
3906 p = find_process_by_pid(pid); in SYSCALL_DEFINE2()
3908 if (!p) in SYSCALL_DEFINE2()
3911 retval = security_task_getscheduler(p); in SYSCALL_DEFINE2()
3915 if (task_has_rt_policy(p)) in SYSCALL_DEFINE2()
3916 lp.sched_priority = p->rt_priority; in SYSCALL_DEFINE2()
3980 struct task_struct *p; in SYSCALL_DEFINE4() local
3988 p = find_process_by_pid(pid); in SYSCALL_DEFINE4()
3990 if (!p) in SYSCALL_DEFINE4()
3993 retval = security_task_getscheduler(p); in SYSCALL_DEFINE4()
3997 attr.sched_policy = p->policy; in SYSCALL_DEFINE4()
3998 if (p->sched_reset_on_fork) in SYSCALL_DEFINE4()
4000 if (task_has_dl_policy(p)) in SYSCALL_DEFINE4()
4001 __getparam_dl(p, &attr); in SYSCALL_DEFINE4()
4002 else if (task_has_rt_policy(p)) in SYSCALL_DEFINE4()
4003 attr.sched_priority = p->rt_priority; in SYSCALL_DEFINE4()
4005 attr.sched_nice = task_nice(p); in SYSCALL_DEFINE4()
4020 struct task_struct *p; in sched_setaffinity() local
4025 p = find_process_by_pid(pid); in sched_setaffinity()
4026 if (!p) { in sched_setaffinity()
4032 get_task_struct(p); in sched_setaffinity()
4035 if (p->flags & PF_NO_SETAFFINITY) { in sched_setaffinity()
4048 if (!check_same_owner(p)) { in sched_setaffinity()
4050 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { in sched_setaffinity()
4057 retval = security_task_setscheduler(p); in sched_setaffinity()
4062 cpuset_cpus_allowed(p, cpus_allowed); in sched_setaffinity()
4072 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { in sched_setaffinity()
4074 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { in sched_setaffinity()
4083 retval = set_cpus_allowed_ptr(p, new_mask); in sched_setaffinity()
4086 cpuset_cpus_allowed(p, cpus_allowed); in sched_setaffinity()
4102 put_task_struct(p); in sched_setaffinity()
4143 struct task_struct *p; in sched_getaffinity() local
4150 p = find_process_by_pid(pid); in sched_getaffinity()
4151 if (!p) in sched_getaffinity()
4154 retval = security_task_getscheduler(p); in sched_getaffinity()
4158 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_getaffinity()
4159 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); in sched_getaffinity()
4160 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_getaffinity()
4329 int __sched yield_to(struct task_struct *p, bool preempt) in yield_to() argument
4340 p_rq = task_rq(p); in yield_to()
4351 if (task_rq(p) != p_rq) { in yield_to()
4359 if (curr->sched_class != p->sched_class) in yield_to()
4362 if (task_running(p_rq, p) || p->state) in yield_to()
4365 yielded = curr->sched_class->yield_to_task(rq, p, preempt); in yield_to()
4480 struct task_struct *p; in SYSCALL_DEFINE2() local
4492 p = find_process_by_pid(pid); in SYSCALL_DEFINE2()
4493 if (!p) in SYSCALL_DEFINE2()
4496 retval = security_task_getscheduler(p); in SYSCALL_DEFINE2()
4500 rq = task_rq_lock(p, &flags); in SYSCALL_DEFINE2()
4502 if (p->sched_class->get_rr_interval) in SYSCALL_DEFINE2()
4503 time_slice = p->sched_class->get_rr_interval(rq, p); in SYSCALL_DEFINE2()
4504 task_rq_unlock(rq, p, &flags); in SYSCALL_DEFINE2()
4518 void sched_show_task(struct task_struct *p) in sched_show_task() argument
4522 unsigned long state = p->state; in sched_show_task()
4526 printk(KERN_INFO "%-15.15s %c", p->comm, in sched_show_task()
4532 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); in sched_show_task()
4537 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); in sched_show_task()
4540 free = stack_not_used(p); in sched_show_task()
4544 if (pid_alive(p)) in sched_show_task()
4545 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
4548 task_pid_nr(p), ppid, in sched_show_task()
4549 (unsigned long)task_thread_info(p)->flags); in sched_show_task()
4551 print_worker_info(KERN_INFO, p); in sched_show_task()
4552 show_stack(p, NULL); in sched_show_task()
4557 struct task_struct *g, *p; in show_state_filter() local
4567 for_each_process_thread(g, p) { in show_state_filter()
4573 if (!state_filter || (p->state & state_filter)) in show_state_filter()
4574 sched_show_task(p); in show_state_filter()
4674 int task_can_attach(struct task_struct *p, in task_can_attach() argument
4688 if (p->flags & PF_NO_SETAFFINITY) { in task_can_attach()
4694 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, in task_can_attach()
4707 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); in task_can_attach()
4717 __dl_add(dl_b, p->dl.dl_bw); in task_can_attach()
4734 static struct rq *move_queued_task(struct task_struct *p, int new_cpu) in move_queued_task() argument
4736 struct rq *rq = task_rq(p); in move_queued_task()
4740 dequeue_task(rq, p, 0); in move_queued_task()
4741 p->on_rq = TASK_ON_RQ_MIGRATING; in move_queued_task()
4742 set_task_cpu(p, new_cpu); in move_queued_task()
4748 BUG_ON(task_cpu(p) != new_cpu); in move_queued_task()
4749 p->on_rq = TASK_ON_RQ_QUEUED; in move_queued_task()
4750 enqueue_task(rq, p, 0); in move_queued_task()
4751 check_preempt_curr(rq, p, 0); in move_queued_task()
4756 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed() argument
4758 if (p->sched_class->set_cpus_allowed) in do_set_cpus_allowed()
4759 p->sched_class->set_cpus_allowed(p, new_mask); in do_set_cpus_allowed()
4761 cpumask_copy(&p->cpus_allowed, new_mask); in do_set_cpus_allowed()
4762 p->nr_cpus_allowed = cpumask_weight(new_mask); in do_set_cpus_allowed()
4788 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr() argument
4795 rq = task_rq_lock(p, &flags); in set_cpus_allowed_ptr()
4797 if (cpumask_equal(&p->cpus_allowed, new_mask)) in set_cpus_allowed_ptr()
4805 do_set_cpus_allowed(p, new_mask); in set_cpus_allowed_ptr()
4808 if (cpumask_test_cpu(task_cpu(p), new_mask)) in set_cpus_allowed_ptr()
4812 if (task_running(rq, p) || p->state == TASK_WAKING) { in set_cpus_allowed_ptr()
4813 struct migration_arg arg = { p, dest_cpu }; in set_cpus_allowed_ptr()
4815 task_rq_unlock(rq, p, &flags); in set_cpus_allowed_ptr()
4817 tlb_migrate_finish(p->mm); in set_cpus_allowed_ptr()
4819 } else if (task_on_rq_queued(p)) in set_cpus_allowed_ptr()
4820 rq = move_queued_task(p, dest_cpu); in set_cpus_allowed_ptr()
4822 task_rq_unlock(rq, p, &flags); in set_cpus_allowed_ptr()
4839 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) in __migrate_task() argument
4849 raw_spin_lock(&p->pi_lock); in __migrate_task()
4852 if (task_cpu(p) != src_cpu) in __migrate_task()
4856 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) in __migrate_task()
4863 if (task_on_rq_queued(p)) in __migrate_task()
4864 rq = move_queued_task(p, dest_cpu); in __migrate_task()
4869 raw_spin_unlock(&p->pi_lock); in __migrate_task()
4875 int migrate_task_to(struct task_struct *p, int target_cpu) in migrate_task_to() argument
4877 struct migration_arg arg = { p, target_cpu }; in migrate_task_to()
4878 int curr_cpu = task_cpu(p); in migrate_task_to()
4883 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) in migrate_task_to()
4888 trace_sched_move_numa(p, curr_cpu, target_cpu); in migrate_task_to()
4896 void sched_setnuma(struct task_struct *p, int nid) in sched_setnuma() argument
4902 rq = task_rq_lock(p, &flags); in sched_setnuma()
4903 queued = task_on_rq_queued(p); in sched_setnuma()
4904 running = task_current(rq, p); in sched_setnuma()
4907 dequeue_task(rq, p, 0); in sched_setnuma()
4909 put_prev_task(rq, p); in sched_setnuma()
4911 p->numa_preferred_nid = nid; in sched_setnuma()
4914 p->sched_class->set_curr_task(rq); in sched_setnuma()
4916 enqueue_task(rq, p, 0); in sched_setnuma()
4917 task_rq_unlock(rq, p, &flags); in sched_setnuma()
7347 static void normalize_task(struct rq *rq, struct task_struct *p) in normalize_task() argument
7349 const struct sched_class *prev_class = p->sched_class; in normalize_task()
7353 int old_prio = p->prio; in normalize_task()
7356 queued = task_on_rq_queued(p); in normalize_task()
7358 dequeue_task(rq, p, 0); in normalize_task()
7359 __setscheduler(rq, p, &attr, false); in normalize_task()
7361 enqueue_task(rq, p, 0); in normalize_task()
7365 check_class_changed(rq, p, prev_class, old_prio); in normalize_task()
7370 struct task_struct *g, *p; in normalize_rt_tasks() local
7375 for_each_process_thread(g, p) { in normalize_rt_tasks()
7379 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
7382 p->se.exec_start = 0; in normalize_rt_tasks()
7384 p->se.statistics.wait_start = 0; in normalize_rt_tasks()
7385 p->se.statistics.sleep_start = 0; in normalize_rt_tasks()
7386 p->se.statistics.block_start = 0; in normalize_rt_tasks()
7389 if (!dl_task(p) && !rt_task(p)) { in normalize_rt_tasks()
7394 if (task_nice(p) < 0) in normalize_rt_tasks()
7395 set_user_nice(p, 0); in normalize_rt_tasks()
7399 rq = task_rq_lock(p, &flags); in normalize_rt_tasks()
7400 normalize_task(rq, p); in normalize_rt_tasks()
7401 task_rq_unlock(rq, p, &flags); in normalize_rt_tasks()
7450 void set_curr_task(int cpu, struct task_struct *p) in set_curr_task() argument
7452 cpu_curr(cpu) = p; in set_curr_task()
7592 struct task_struct *g, *p; in tg_has_rt_tasks() local
7600 for_each_process_thread(g, p) { in tg_has_rt_tasks()
7601 if (rt_task(p) && task_group(p) == tg) in tg_has_rt_tasks()