Lines Matching refs:p
476 static bool set_nr_and_not_polling(struct task_struct *p) in set_nr_and_not_polling() argument
478 struct thread_info *ti = task_thread_info(p); in set_nr_and_not_polling()
488 static bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
490 struct thread_info *ti = task_thread_info(p); in set_nr_if_polling()
507 static bool set_nr_and_not_polling(struct task_struct *p) in set_nr_and_not_polling() argument
509 set_tsk_need_resched(p); in set_nr_and_not_polling()
514 static bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
812 static void set_load_weight(struct task_struct *p) in set_load_weight() argument
814 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
815 struct load_weight *load = &p->se.load; in set_load_weight()
820 if (idle_policy(p->policy)) { in set_load_weight()
830 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
834 sched_info_queued(rq, p); in enqueue_task()
835 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
838 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
842 sched_info_dequeued(rq, p); in dequeue_task()
843 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
846 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
848 if (task_contributes_to_load(p)) in activate_task()
851 enqueue_task(rq, p, flags); in activate_task()
854 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
856 if (task_contributes_to_load(p)) in deactivate_task()
859 dequeue_task(rq, p, flags); in deactivate_task()
949 static inline int __normal_prio(struct task_struct *p) in __normal_prio() argument
951 return p->static_prio; in __normal_prio()
961 static inline int normal_prio(struct task_struct *p) in normal_prio() argument
965 if (task_has_dl_policy(p)) in normal_prio()
967 else if (task_has_rt_policy(p)) in normal_prio()
968 prio = MAX_RT_PRIO-1 - p->rt_priority; in normal_prio()
970 prio = __normal_prio(p); in normal_prio()
981 static int effective_prio(struct task_struct *p) in effective_prio() argument
983 p->normal_prio = normal_prio(p); in effective_prio()
989 if (!rt_prio(p->prio)) in effective_prio()
990 return p->normal_prio; in effective_prio()
991 return p->prio; in effective_prio()
1000 inline int task_curr(const struct task_struct *p) in task_curr() argument
1002 return cpu_curr(task_cpu(p)) == p; in task_curr()
1012 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
1016 if (prev_class != p->sched_class) { in check_class_changed()
1018 prev_class->switched_from(rq, p); in check_class_changed()
1020 p->sched_class->switched_to(rq, p); in check_class_changed()
1021 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
1022 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
1025 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
1029 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr()
1030 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
1035 if (class == p->sched_class) { in check_preempt_curr()
1070 static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu) in move_queued_task() argument
1074 dequeue_task(rq, p, 0); in move_queued_task()
1075 p->on_rq = TASK_ON_RQ_MIGRATING; in move_queued_task()
1076 set_task_cpu(p, new_cpu); in move_queued_task()
1082 BUG_ON(task_cpu(p) != new_cpu); in move_queued_task()
1083 p->on_rq = TASK_ON_RQ_QUEUED; in move_queued_task()
1084 enqueue_task(rq, p, 0); in move_queued_task()
1085 check_preempt_curr(rq, p, 0); in move_queued_task()
1104 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) in __migrate_task() argument
1110 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) in __migrate_task()
1113 rq = move_queued_task(rq, p, dest_cpu); in __migrate_task()
1126 struct task_struct *p = arg->task; in migration_cpu_stop() local
1141 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
1148 if (task_rq(p) == rq && task_on_rq_queued(p)) in migration_cpu_stop()
1149 rq = __migrate_task(rq, p, arg->dest_cpu); in migration_cpu_stop()
1151 raw_spin_unlock(&p->pi_lock); in migration_cpu_stop()
1161 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_common() argument
1163 cpumask_copy(&p->cpus_allowed, new_mask); in set_cpus_allowed_common()
1164 p->nr_cpus_allowed = cpumask_weight(new_mask); in set_cpus_allowed_common()
1167 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed() argument
1169 struct rq *rq = task_rq(p); in do_set_cpus_allowed()
1172 lockdep_assert_held(&p->pi_lock); in do_set_cpus_allowed()
1174 queued = task_on_rq_queued(p); in do_set_cpus_allowed()
1175 running = task_current(rq, p); in do_set_cpus_allowed()
1183 dequeue_task(rq, p, DEQUEUE_SAVE); in do_set_cpus_allowed()
1186 put_prev_task(rq, p); in do_set_cpus_allowed()
1188 p->sched_class->set_cpus_allowed(p, new_mask); in do_set_cpus_allowed()
1191 p->sched_class->set_curr_task(rq); in do_set_cpus_allowed()
1193 enqueue_task(rq, p, ENQUEUE_RESTORE); in do_set_cpus_allowed()
1205 static int __set_cpus_allowed_ptr(struct task_struct *p, in __set_cpus_allowed_ptr() argument
1213 rq = task_rq_lock(p, &flags); in __set_cpus_allowed_ptr()
1219 if (check && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr()
1224 if (cpumask_equal(&p->cpus_allowed, new_mask)) in __set_cpus_allowed_ptr()
1232 do_set_cpus_allowed(p, new_mask); in __set_cpus_allowed_ptr()
1235 if (cpumask_test_cpu(task_cpu(p), new_mask)) in __set_cpus_allowed_ptr()
1239 if (task_running(rq, p) || p->state == TASK_WAKING) { in __set_cpus_allowed_ptr()
1240 struct migration_arg arg = { p, dest_cpu }; in __set_cpus_allowed_ptr()
1242 task_rq_unlock(rq, p, &flags); in __set_cpus_allowed_ptr()
1244 tlb_migrate_finish(p->mm); in __set_cpus_allowed_ptr()
1246 } else if (task_on_rq_queued(p)) { in __set_cpus_allowed_ptr()
1252 rq = move_queued_task(rq, p, dest_cpu); in __set_cpus_allowed_ptr()
1256 task_rq_unlock(rq, p, &flags); in __set_cpus_allowed_ptr()
1261 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr() argument
1263 return __set_cpus_allowed_ptr(p, new_mask, false); in set_cpus_allowed_ptr()
1267 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) in set_task_cpu() argument
1274 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && in set_task_cpu()
1275 !p->on_rq); in set_task_cpu()
1288 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
1289 lockdep_is_held(&task_rq(p)->lock))); in set_task_cpu()
1293 trace_sched_migrate_task(p, new_cpu); in set_task_cpu()
1295 if (task_cpu(p) != new_cpu) { in set_task_cpu()
1296 if (p->sched_class->migrate_task_rq) in set_task_cpu()
1297 p->sched_class->migrate_task_rq(p); in set_task_cpu()
1298 p->se.nr_migrations++; in set_task_cpu()
1299 perf_event_task_migrate(p); in set_task_cpu()
1302 __set_task_cpu(p, new_cpu); in set_task_cpu()
1305 static void __migrate_swap_task(struct task_struct *p, int cpu) in __migrate_swap_task() argument
1307 if (task_on_rq_queued(p)) { in __migrate_swap_task()
1310 src_rq = task_rq(p); in __migrate_swap_task()
1313 deactivate_task(src_rq, p, 0); in __migrate_swap_task()
1314 set_task_cpu(p, cpu); in __migrate_swap_task()
1315 activate_task(dst_rq, p, 0); in __migrate_swap_task()
1316 check_preempt_curr(dst_rq, p, 0); in __migrate_swap_task()
1323 p->wake_cpu = cpu; in __migrate_swap_task()
1376 int migrate_swap(struct task_struct *cur, struct task_struct *p) in migrate_swap() argument
1384 .dst_task = p, in migrate_swap()
1385 .dst_cpu = task_cpu(p), in migrate_swap()
1404 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); in migrate_swap()
1427 unsigned long wait_task_inactive(struct task_struct *p, long match_state) in wait_task_inactive() argument
1441 rq = task_rq(p); in wait_task_inactive()
1454 while (task_running(rq, p)) { in wait_task_inactive()
1455 if (match_state && unlikely(p->state != match_state)) in wait_task_inactive()
1465 rq = task_rq_lock(p, &flags); in wait_task_inactive()
1466 trace_sched_wait_task(p); in wait_task_inactive()
1467 running = task_running(rq, p); in wait_task_inactive()
1468 queued = task_on_rq_queued(p); in wait_task_inactive()
1470 if (!match_state || p->state == match_state) in wait_task_inactive()
1471 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
1472 task_rq_unlock(rq, p, &flags); in wait_task_inactive()
1532 void kick_process(struct task_struct *p) in kick_process() argument
1537 cpu = task_cpu(p); in kick_process()
1538 if ((cpu != smp_processor_id()) && task_curr(p)) in kick_process()
1547 static int select_fallback_rq(int cpu, struct task_struct *p) in select_fallback_rq() argument
1568 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) in select_fallback_rq()
1575 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { in select_fallback_rq()
1587 cpuset_cpus_allowed_fallback(p); in select_fallback_rq()
1593 do_set_cpus_allowed(p, cpu_possible_mask); in select_fallback_rq()
1610 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
1612 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
1623 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) in select_task_rq() argument
1625 lockdep_assert_held(&p->pi_lock); in select_task_rq()
1627 if (p->nr_cpus_allowed > 1) in select_task_rq()
1628 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); in select_task_rq()
1640 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || in select_task_rq()
1642 cpu = select_fallback_rq(task_cpu(p), p); in select_task_rq()
1655 static inline int __set_cpus_allowed_ptr(struct task_struct *p, in __set_cpus_allowed_ptr() argument
1658 return set_cpus_allowed_ptr(p, new_mask); in __set_cpus_allowed_ptr()
1664 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) in ttwu_stat() argument
1674 schedstat_inc(p, se.statistics.nr_wakeups_local); in ttwu_stat()
1678 schedstat_inc(p, se.statistics.nr_wakeups_remote); in ttwu_stat()
1690 schedstat_inc(p, se.statistics.nr_wakeups_migrate); in ttwu_stat()
1695 schedstat_inc(p, se.statistics.nr_wakeups); in ttwu_stat()
1698 schedstat_inc(p, se.statistics.nr_wakeups_sync); in ttwu_stat()
1703 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) in ttwu_activate() argument
1705 activate_task(rq, p, en_flags); in ttwu_activate()
1706 p->on_rq = TASK_ON_RQ_QUEUED; in ttwu_activate()
1709 if (p->flags & PF_WQ_WORKER) in ttwu_activate()
1710 wq_worker_waking_up(p, cpu_of(rq)); in ttwu_activate()
1717 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in ttwu_do_wakeup() argument
1719 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
1720 p->state = TASK_RUNNING; in ttwu_do_wakeup()
1721 trace_sched_wakeup(p); in ttwu_do_wakeup()
1724 if (p->sched_class->task_woken) { in ttwu_do_wakeup()
1730 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
1749 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) in ttwu_do_activate() argument
1754 if (p->sched_contributes_to_load) in ttwu_do_activate()
1758 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); in ttwu_do_activate()
1759 ttwu_do_wakeup(rq, p, wake_flags); in ttwu_do_activate()
1768 static int ttwu_remote(struct task_struct *p, int wake_flags) in ttwu_remote() argument
1773 rq = __task_rq_lock(p); in ttwu_remote()
1774 if (task_on_rq_queued(p)) { in ttwu_remote()
1777 ttwu_do_wakeup(rq, p, wake_flags); in ttwu_remote()
1790 struct task_struct *p; in sched_ttwu_pending() local
1800 p = llist_entry(llist, struct task_struct, wake_entry); in sched_ttwu_pending()
1802 ttwu_do_activate(rq, p, 0); in sched_ttwu_pending()
1847 static void ttwu_queue_remote(struct task_struct *p, int cpu) in ttwu_queue_remote() argument
1851 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { in ttwu_queue_remote()
1889 static void ttwu_queue(struct task_struct *p, int cpu) in ttwu_queue() argument
1896 ttwu_queue_remote(p, cpu); in ttwu_queue()
1903 ttwu_do_activate(rq, p, 0); in ttwu_queue()
1924 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) in try_to_wake_up() argument
1936 raw_spin_lock_irqsave(&p->pi_lock, flags); in try_to_wake_up()
1937 if (!(p->state & state)) in try_to_wake_up()
1940 trace_sched_waking(p); in try_to_wake_up()
1943 cpu = task_cpu(p); in try_to_wake_up()
1945 if (p->on_rq && ttwu_remote(p, wake_flags)) in try_to_wake_up()
1972 while (p->on_cpu) in try_to_wake_up()
1985 p->sched_contributes_to_load = !!task_contributes_to_load(p); in try_to_wake_up()
1986 p->state = TASK_WAKING; in try_to_wake_up()
1988 if (p->sched_class->task_waking) in try_to_wake_up()
1989 p->sched_class->task_waking(p); in try_to_wake_up()
1991 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); in try_to_wake_up()
1992 if (task_cpu(p) != cpu) { in try_to_wake_up()
1994 set_task_cpu(p, cpu); in try_to_wake_up()
1998 ttwu_queue(p, cpu); in try_to_wake_up()
2000 ttwu_stat(p, cpu, wake_flags); in try_to_wake_up()
2002 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in try_to_wake_up()
2015 static void try_to_wake_up_local(struct task_struct *p) in try_to_wake_up_local() argument
2017 struct rq *rq = task_rq(p); in try_to_wake_up_local()
2020 WARN_ON_ONCE(p == current)) in try_to_wake_up_local()
2025 if (!raw_spin_trylock(&p->pi_lock)) { in try_to_wake_up_local()
2034 raw_spin_lock(&p->pi_lock); in try_to_wake_up_local()
2039 if (!(p->state & TASK_NORMAL)) in try_to_wake_up_local()
2042 trace_sched_waking(p); in try_to_wake_up_local()
2044 if (!task_on_rq_queued(p)) in try_to_wake_up_local()
2045 ttwu_activate(rq, p, ENQUEUE_WAKEUP); in try_to_wake_up_local()
2047 ttwu_do_wakeup(rq, p, 0); in try_to_wake_up_local()
2048 ttwu_stat(p, smp_processor_id(), 0); in try_to_wake_up_local()
2050 raw_spin_unlock(&p->pi_lock); in try_to_wake_up_local()
2065 int wake_up_process(struct task_struct *p) in wake_up_process() argument
2067 return try_to_wake_up(p, TASK_NORMAL, 0); in wake_up_process()
2071 int wake_up_state(struct task_struct *p, unsigned int state) in wake_up_state() argument
2073 return try_to_wake_up(p, state, 0); in wake_up_state()
2079 void __dl_clear_params(struct task_struct *p) in __dl_clear_params() argument
2081 struct sched_dl_entity *dl_se = &p->dl; in __dl_clear_params()
2100 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) in __sched_fork() argument
2102 p->on_rq = 0; in __sched_fork()
2104 p->se.on_rq = 0; in __sched_fork()
2105 p->se.exec_start = 0; in __sched_fork()
2106 p->se.sum_exec_runtime = 0; in __sched_fork()
2107 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
2108 p->se.nr_migrations = 0; in __sched_fork()
2109 p->se.vruntime = 0; in __sched_fork()
2110 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
2113 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); in __sched_fork()
2116 RB_CLEAR_NODE(&p->dl.rb_node); in __sched_fork()
2117 init_dl_task_timer(&p->dl); in __sched_fork()
2118 __dl_clear_params(p); in __sched_fork()
2120 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
2123 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
2127 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { in __sched_fork()
2128 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); in __sched_fork()
2129 p->mm->numa_scan_seq = 0; in __sched_fork()
2133 p->numa_preferred_nid = current->numa_preferred_nid; in __sched_fork()
2135 p->numa_preferred_nid = -1; in __sched_fork()
2137 p->node_stamp = 0ULL; in __sched_fork()
2138 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; in __sched_fork()
2139 p->numa_scan_period = sysctl_numa_balancing_scan_delay; in __sched_fork()
2140 p->numa_work.next = &p->numa_work; in __sched_fork()
2141 p->numa_faults = NULL; in __sched_fork()
2142 p->last_task_numa_placement = 0; in __sched_fork()
2143 p->last_sum_exec_runtime = 0; in __sched_fork()
2145 p->numa_group = NULL; in __sched_fork()
2187 int sched_fork(unsigned long clone_flags, struct task_struct *p) in sched_fork() argument
2192 __sched_fork(clone_flags, p); in sched_fork()
2198 p->state = TASK_RUNNING; in sched_fork()
2203 p->prio = current->normal_prio; in sched_fork()
2208 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
2209 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in sched_fork()
2210 p->policy = SCHED_NORMAL; in sched_fork()
2211 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
2212 p->rt_priority = 0; in sched_fork()
2213 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
2214 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
2216 p->prio = p->normal_prio = __normal_prio(p); in sched_fork()
2217 set_load_weight(p); in sched_fork()
2223 p->sched_reset_on_fork = 0; in sched_fork()
2226 if (dl_prio(p->prio)) { in sched_fork()
2229 } else if (rt_prio(p->prio)) { in sched_fork()
2230 p->sched_class = &rt_sched_class; in sched_fork()
2232 p->sched_class = &fair_sched_class; in sched_fork()
2235 if (p->sched_class->task_fork) in sched_fork()
2236 p->sched_class->task_fork(p); in sched_fork()
2245 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_fork()
2246 set_task_cpu(p, cpu); in sched_fork()
2247 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_fork()
2251 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
2254 p->on_cpu = 0; in sched_fork()
2256 init_task_preempt_count(p); in sched_fork()
2258 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
2259 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
2325 static int dl_overflow(struct task_struct *p, int policy, in dl_overflow() argument
2329 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in dl_overflow()
2335 if (new_bw == p->dl.dl_bw) in dl_overflow()
2344 cpus = dl_bw_cpus(task_cpu(p)); in dl_overflow()
2345 if (dl_policy(policy) && !task_has_dl_policy(p) && in dl_overflow()
2349 } else if (dl_policy(policy) && task_has_dl_policy(p) && in dl_overflow()
2350 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { in dl_overflow()
2351 __dl_clear(dl_b, p->dl.dl_bw); in dl_overflow()
2354 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { in dl_overflow()
2355 __dl_clear(dl_b, p->dl.dl_bw); in dl_overflow()
2372 void wake_up_new_task(struct task_struct *p) in wake_up_new_task() argument
2377 raw_spin_lock_irqsave(&p->pi_lock, flags); in wake_up_new_task()
2379 init_entity_runnable_average(&p->se); in wake_up_new_task()
2386 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); in wake_up_new_task()
2389 rq = __task_rq_lock(p); in wake_up_new_task()
2390 activate_task(rq, p, 0); in wake_up_new_task()
2391 p->on_rq = TASK_ON_RQ_QUEUED; in wake_up_new_task()
2392 trace_sched_wakeup_new(p); in wake_up_new_task()
2393 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
2395 if (p->sched_class->task_woken) { in wake_up_new_task()
2401 p->sched_class->task_woken(rq, p); in wake_up_new_task()
2405 task_rq_unlock(rq, p, &flags); in wake_up_new_task()
2786 struct task_struct *p = current; in sched_exec() local
2790 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_exec()
2791 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); in sched_exec()
2796 struct migration_arg arg = { p, dest_cpu }; in sched_exec()
2798 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_exec()
2799 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); in sched_exec()
2803 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_exec()
2819 unsigned long long task_sched_runtime(struct task_struct *p) in task_sched_runtime() argument
2837 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
2838 return p->se.sum_exec_runtime; in task_sched_runtime()
2841 rq = task_rq_lock(p, &flags); in task_sched_runtime()
2847 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
2849 p->sched_class->update_curr(rq); in task_sched_runtime()
2851 ns = p->se.sum_exec_runtime; in task_sched_runtime()
2852 task_rq_unlock(rq, p, &flags); in task_sched_runtime()
3033 struct task_struct *p; in pick_next_task() local
3041 p = fair_sched_class.pick_next_task(rq, prev); in pick_next_task()
3042 if (unlikely(p == RETRY_TASK)) in pick_next_task()
3046 if (unlikely(!p)) in pick_next_task()
3047 p = idle_sched_class.pick_next_task(rq, prev); in pick_next_task()
3049 return p; in pick_next_task()
3054 p = class->pick_next_task(rq, prev); in pick_next_task()
3055 if (p) { in pick_next_task()
3056 if (unlikely(p == RETRY_TASK)) in pick_next_task()
3058 return p; in pick_next_task()
3366 void rt_mutex_setprio(struct task_struct *p, int prio) in rt_mutex_setprio() argument
3374 rq = __task_rq_lock(p); in rt_mutex_setprio()
3388 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
3389 WARN_ON(p != rq->curr); in rt_mutex_setprio()
3390 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
3394 trace_sched_pi_setprio(p, prio); in rt_mutex_setprio()
3395 oldprio = p->prio; in rt_mutex_setprio()
3396 prev_class = p->sched_class; in rt_mutex_setprio()
3397 queued = task_on_rq_queued(p); in rt_mutex_setprio()
3398 running = task_current(rq, p); in rt_mutex_setprio()
3400 dequeue_task(rq, p, DEQUEUE_SAVE); in rt_mutex_setprio()
3402 put_prev_task(rq, p); in rt_mutex_setprio()
3414 struct task_struct *pi_task = rt_mutex_get_top_task(p); in rt_mutex_setprio()
3415 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
3416 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
3417 p->dl.dl_boosted = 1; in rt_mutex_setprio()
3420 p->dl.dl_boosted = 0; in rt_mutex_setprio()
3421 p->sched_class = &dl_sched_class; in rt_mutex_setprio()
3424 p->dl.dl_boosted = 0; in rt_mutex_setprio()
3427 p->sched_class = &rt_sched_class; in rt_mutex_setprio()
3430 p->dl.dl_boosted = 0; in rt_mutex_setprio()
3432 p->rt.timeout = 0; in rt_mutex_setprio()
3433 p->sched_class = &fair_sched_class; in rt_mutex_setprio()
3436 p->prio = prio; in rt_mutex_setprio()
3439 p->sched_class->set_curr_task(rq); in rt_mutex_setprio()
3441 enqueue_task(rq, p, enqueue_flag); in rt_mutex_setprio()
3443 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
3453 void set_user_nice(struct task_struct *p, long nice) in set_user_nice() argument
3459 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) in set_user_nice()
3465 rq = task_rq_lock(p, &flags); in set_user_nice()
3472 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in set_user_nice()
3473 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
3476 queued = task_on_rq_queued(p); in set_user_nice()
3478 dequeue_task(rq, p, DEQUEUE_SAVE); in set_user_nice()
3480 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
3481 set_load_weight(p); in set_user_nice()
3482 old_prio = p->prio; in set_user_nice()
3483 p->prio = effective_prio(p); in set_user_nice()
3484 delta = p->prio - old_prio; in set_user_nice()
3487 enqueue_task(rq, p, ENQUEUE_RESTORE); in set_user_nice()
3492 if (delta < 0 || (delta > 0 && task_running(rq, p))) in set_user_nice()
3496 task_rq_unlock(rq, p, &flags); in set_user_nice()
3505 int can_nice(const struct task_struct *p, const int nice) in can_nice() argument
3510 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || in can_nice()
3557 int task_prio(const struct task_struct *p) in task_prio() argument
3559 return p->prio - MAX_RT_PRIO; in task_prio()
3617 __setparam_dl(struct task_struct *p, const struct sched_attr *attr) in __setparam_dl() argument
3619 struct sched_dl_entity *dl_se = &p->dl; in __setparam_dl()
3654 static void __setscheduler_params(struct task_struct *p, in __setscheduler_params() argument
3660 policy = p->policy; in __setscheduler_params()
3662 p->policy = policy; in __setscheduler_params()
3665 __setparam_dl(p, attr); in __setscheduler_params()
3667 p->static_prio = NICE_TO_PRIO(attr->sched_nice); in __setscheduler_params()
3674 p->rt_priority = attr->sched_priority; in __setscheduler_params()
3675 p->normal_prio = normal_prio(p); in __setscheduler_params()
3676 set_load_weight(p); in __setscheduler_params()
3680 static void __setscheduler(struct rq *rq, struct task_struct *p, in __setscheduler() argument
3683 __setscheduler_params(p, attr); in __setscheduler()
3690 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); in __setscheduler()
3692 p->prio = normal_prio(p); in __setscheduler()
3694 if (dl_prio(p->prio)) in __setscheduler()
3695 p->sched_class = &dl_sched_class; in __setscheduler()
3696 else if (rt_prio(p->prio)) in __setscheduler()
3697 p->sched_class = &rt_sched_class; in __setscheduler()
3699 p->sched_class = &fair_sched_class; in __setscheduler()
3703 __getparam_dl(struct task_struct *p, struct sched_attr *attr) in __getparam_dl() argument
3705 struct sched_dl_entity *dl_se = &p->dl; in __getparam_dl()
3707 attr->sched_priority = p->rt_priority; in __getparam_dl()
3758 static bool check_same_owner(struct task_struct *p) in check_same_owner() argument
3764 pcred = __task_cred(p); in check_same_owner()
3771 static bool dl_param_changed(struct task_struct *p, in dl_param_changed() argument
3774 struct sched_dl_entity *dl_se = &p->dl; in dl_param_changed()
3785 static int __sched_setscheduler(struct task_struct *p, in __sched_setscheduler() argument
3803 reset_on_fork = p->sched_reset_on_fork; in __sched_setscheduler()
3804 policy = oldpolicy = p->policy; in __sched_setscheduler()
3820 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || in __sched_setscheduler()
3821 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) in __sched_setscheduler()
3832 if (attr->sched_nice < task_nice(p) && in __sched_setscheduler()
3833 !can_nice(p, attr->sched_nice)) in __sched_setscheduler()
3839 task_rlimit(p, RLIMIT_RTPRIO); in __sched_setscheduler()
3842 if (policy != p->policy && !rlim_rtprio) in __sched_setscheduler()
3846 if (attr->sched_priority > p->rt_priority && in __sched_setscheduler()
3864 if (idle_policy(p->policy) && !idle_policy(policy)) { in __sched_setscheduler()
3865 if (!can_nice(p, task_nice(p))) in __sched_setscheduler()
3870 if (!check_same_owner(p)) in __sched_setscheduler()
3874 if (p->sched_reset_on_fork && !reset_on_fork) in __sched_setscheduler()
3879 retval = security_task_setscheduler(p); in __sched_setscheduler()
3891 rq = task_rq_lock(p, &flags); in __sched_setscheduler()
3896 if (p == rq->stop) { in __sched_setscheduler()
3897 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3905 if (unlikely(policy == p->policy)) { in __sched_setscheduler()
3906 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) in __sched_setscheduler()
3908 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) in __sched_setscheduler()
3910 if (dl_policy(policy) && dl_param_changed(p, attr)) in __sched_setscheduler()
3913 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
3914 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3926 task_group(p)->rt_bandwidth.rt_runtime == 0 && in __sched_setscheduler()
3927 !task_group_is_autogroup(task_group(p))) { in __sched_setscheduler()
3928 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3941 if (!cpumask_subset(span, &p->cpus_allowed) || in __sched_setscheduler()
3943 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3951 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { in __sched_setscheduler()
3953 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3962 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { in __sched_setscheduler()
3963 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3967 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
3968 oldprio = p->prio; in __sched_setscheduler()
3978 new_effective_prio = rt_mutex_get_effective_prio(p, newprio); in __sched_setscheduler()
3980 __setscheduler_params(p, attr); in __sched_setscheduler()
3981 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3986 queued = task_on_rq_queued(p); in __sched_setscheduler()
3987 running = task_current(rq, p); in __sched_setscheduler()
3989 dequeue_task(rq, p, DEQUEUE_SAVE); in __sched_setscheduler()
3991 put_prev_task(rq, p); in __sched_setscheduler()
3993 prev_class = p->sched_class; in __sched_setscheduler()
3994 __setscheduler(rq, p, attr, pi); in __sched_setscheduler()
3997 p->sched_class->set_curr_task(rq); in __sched_setscheduler()
4004 if (oldprio <= p->prio) in __sched_setscheduler()
4007 enqueue_task(rq, p, enqueue_flags); in __sched_setscheduler()
4010 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
4012 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
4015 rt_mutex_adjust_pi(p); in __sched_setscheduler()
4026 static int _sched_setscheduler(struct task_struct *p, int policy, in _sched_setscheduler() argument
4032 .sched_nice = PRIO_TO_NICE(p->static_prio), in _sched_setscheduler()
4042 return __sched_setscheduler(p, &attr, check, true); in _sched_setscheduler()
4054 int sched_setscheduler(struct task_struct *p, int policy, in sched_setscheduler() argument
4057 return _sched_setscheduler(p, policy, param, true); in sched_setscheduler()
4061 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) in sched_setattr() argument
4063 return __sched_setscheduler(p, attr, true, true); in sched_setattr()
4080 int sched_setscheduler_nocheck(struct task_struct *p, int policy, in sched_setscheduler_nocheck() argument
4083 return _sched_setscheduler(p, policy, param, false); in sched_setscheduler_nocheck()
4091 struct task_struct *p; in do_sched_setscheduler() local
4101 p = find_process_by_pid(pid); in do_sched_setscheduler()
4102 if (p != NULL) in do_sched_setscheduler()
4103 retval = sched_setscheduler(p, policy, &lparam); in do_sched_setscheduler()
4220 struct task_struct *p; in SYSCALL_DEFINE3() local
4235 p = find_process_by_pid(pid); in SYSCALL_DEFINE3()
4236 if (p != NULL) in SYSCALL_DEFINE3()
4237 retval = sched_setattr(p, &attr); in SYSCALL_DEFINE3()
4252 struct task_struct *p; in SYSCALL_DEFINE1() local
4260 p = find_process_by_pid(pid); in SYSCALL_DEFINE1()
4261 if (p) { in SYSCALL_DEFINE1()
4262 retval = security_task_getscheduler(p); in SYSCALL_DEFINE1()
4264 retval = p->policy in SYSCALL_DEFINE1()
4265 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); in SYSCALL_DEFINE1()
4282 struct task_struct *p; in SYSCALL_DEFINE2() local
4289 p = find_process_by_pid(pid); in SYSCALL_DEFINE2()
4291 if (!p) in SYSCALL_DEFINE2()
4294 retval = security_task_getscheduler(p); in SYSCALL_DEFINE2()
4298 if (task_has_rt_policy(p)) in SYSCALL_DEFINE2()
4299 lp.sched_priority = p->rt_priority; in SYSCALL_DEFINE2()
4363 struct task_struct *p; in SYSCALL_DEFINE4() local
4371 p = find_process_by_pid(pid); in SYSCALL_DEFINE4()
4373 if (!p) in SYSCALL_DEFINE4()
4376 retval = security_task_getscheduler(p); in SYSCALL_DEFINE4()
4380 attr.sched_policy = p->policy; in SYSCALL_DEFINE4()
4381 if (p->sched_reset_on_fork) in SYSCALL_DEFINE4()
4383 if (task_has_dl_policy(p)) in SYSCALL_DEFINE4()
4384 __getparam_dl(p, &attr); in SYSCALL_DEFINE4()
4385 else if (task_has_rt_policy(p)) in SYSCALL_DEFINE4()
4386 attr.sched_priority = p->rt_priority; in SYSCALL_DEFINE4()
4388 attr.sched_nice = task_nice(p); in SYSCALL_DEFINE4()
4403 struct task_struct *p; in sched_setaffinity() local
4408 p = find_process_by_pid(pid); in sched_setaffinity()
4409 if (!p) { in sched_setaffinity()
4415 get_task_struct(p); in sched_setaffinity()
4418 if (p->flags & PF_NO_SETAFFINITY) { in sched_setaffinity()
4431 if (!check_same_owner(p)) { in sched_setaffinity()
4433 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { in sched_setaffinity()
4440 retval = security_task_setscheduler(p); in sched_setaffinity()
4445 cpuset_cpus_allowed(p, cpus_allowed); in sched_setaffinity()
4455 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { in sched_setaffinity()
4457 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { in sched_setaffinity()
4466 retval = __set_cpus_allowed_ptr(p, new_mask, true); in sched_setaffinity()
4469 cpuset_cpus_allowed(p, cpus_allowed); in sched_setaffinity()
4485 put_task_struct(p); in sched_setaffinity()
4526 struct task_struct *p; in sched_getaffinity() local
4533 p = find_process_by_pid(pid); in sched_getaffinity()
4534 if (!p) in sched_getaffinity()
4537 retval = security_task_getscheduler(p); in sched_getaffinity()
4541 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_getaffinity()
4542 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); in sched_getaffinity()
4543 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_getaffinity()
4712 int __sched yield_to(struct task_struct *p, bool preempt) in yield_to() argument
4723 p_rq = task_rq(p); in yield_to()
4734 if (task_rq(p) != p_rq) { in yield_to()
4742 if (curr->sched_class != p->sched_class) in yield_to()
4745 if (task_running(p_rq, p) || p->state) in yield_to()
4748 yielded = curr->sched_class->yield_to_task(rq, p, preempt); in yield_to()
4863 struct task_struct *p; in SYSCALL_DEFINE2() local
4875 p = find_process_by_pid(pid); in SYSCALL_DEFINE2()
4876 if (!p) in SYSCALL_DEFINE2()
4879 retval = security_task_getscheduler(p); in SYSCALL_DEFINE2()
4883 rq = task_rq_lock(p, &flags); in SYSCALL_DEFINE2()
4885 if (p->sched_class->get_rr_interval) in SYSCALL_DEFINE2()
4886 time_slice = p->sched_class->get_rr_interval(rq, p); in SYSCALL_DEFINE2()
4887 task_rq_unlock(rq, p, &flags); in SYSCALL_DEFINE2()
4901 void sched_show_task(struct task_struct *p) in sched_show_task() argument
4905 unsigned long state = p->state; in sched_show_task()
4909 printk(KERN_INFO "%-15.15s %c", p->comm, in sched_show_task()
4915 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); in sched_show_task()
4920 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); in sched_show_task()
4923 free = stack_not_used(p); in sched_show_task()
4927 if (pid_alive(p)) in sched_show_task()
4928 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
4931 task_pid_nr(p), ppid, in sched_show_task()
4932 (unsigned long)task_thread_info(p)->flags); in sched_show_task()
4934 print_worker_info(KERN_INFO, p); in sched_show_task()
4935 show_stack(p, NULL); in sched_show_task()
4940 struct task_struct *g, *p; in show_state_filter() local
4950 for_each_process_thread(g, p) { in show_state_filter()
4956 if (!state_filter || (p->state & state_filter)) in show_state_filter()
4957 sched_show_task(p); in show_state_filter()
5067 int task_can_attach(struct task_struct *p, in task_can_attach() argument
5081 if (p->flags & PF_NO_SETAFFINITY) { in task_can_attach()
5087 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, in task_can_attach()
5100 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); in task_can_attach()
5110 __dl_add(dl_b, p->dl.dl_bw); in task_can_attach()
5125 int migrate_task_to(struct task_struct *p, int target_cpu) in migrate_task_to() argument
5127 struct migration_arg arg = { p, target_cpu }; in migrate_task_to()
5128 int curr_cpu = task_cpu(p); in migrate_task_to()
5133 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) in migrate_task_to()
5138 trace_sched_move_numa(p, curr_cpu, target_cpu); in migrate_task_to()
5146 void sched_setnuma(struct task_struct *p, int nid) in sched_setnuma() argument
5152 rq = task_rq_lock(p, &flags); in sched_setnuma()
5153 queued = task_on_rq_queued(p); in sched_setnuma()
5154 running = task_current(rq, p); in sched_setnuma()
5157 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
5159 put_prev_task(rq, p); in sched_setnuma()
5161 p->numa_preferred_nid = nid; in sched_setnuma()
5164 p->sched_class->set_curr_task(rq); in sched_setnuma()
5166 enqueue_task(rq, p, ENQUEUE_RESTORE); in sched_setnuma()
5167 task_rq_unlock(rq, p, &flags); in sched_setnuma()
7607 struct task_struct *g, *p; in normalize_rt_tasks() local
7613 for_each_process_thread(g, p) { in normalize_rt_tasks()
7617 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
7620 p->se.exec_start = 0; in normalize_rt_tasks()
7622 p->se.statistics.wait_start = 0; in normalize_rt_tasks()
7623 p->se.statistics.sleep_start = 0; in normalize_rt_tasks()
7624 p->se.statistics.block_start = 0; in normalize_rt_tasks()
7627 if (!dl_task(p) && !rt_task(p)) { in normalize_rt_tasks()
7632 if (task_nice(p) < 0) in normalize_rt_tasks()
7633 set_user_nice(p, 0); in normalize_rt_tasks()
7637 __sched_setscheduler(p, &attr, false, false); in normalize_rt_tasks()
7686 void set_curr_task(int cpu, struct task_struct *p) in set_curr_task() argument
7688 cpu_curr(cpu) = p; in set_curr_task()
7827 struct task_struct *g, *p; in tg_has_rt_tasks() local
7835 for_each_process_thread(g, p) { in tg_has_rt_tasks()
7836 if (rt_task(p) && task_group(p) == tg) in tg_has_rt_tasks()