Searched refs:task_cpu (Results 1 - 19 of 19) sorted by relevance

/linux-4.1.27/arch/ia64/include/asm/
H A Dswitch_to.h69 (task_cpu(current) != \
72 task_thread_info(current)->last_cpu = task_cpu(current); \
/linux-4.1.27/kernel/sched/
H A Didle_task.c14 return task_cpu(p); /* IDLE tasks as never migrated */ select_task_rq_idle()
H A Dstop_task.c16 return task_cpu(p); /* stop tasks as never migrate */ select_task_rq_stop()
H A Dcpuacct.c240 cpu = task_cpu(tsk); cpuacct_charge()
H A Dcore.c975 return cpu_curr(task_cpu(p)) == p; task_curr()
1048 if (task_cpu(p) != new_cpu) { set_task_cpu()
1097 if (task_cpu(arg->dst_task) != arg->dst_cpu) migrate_swap_stop()
1100 if (task_cpu(arg->src_task) != arg->src_cpu) migrate_swap_stop()
1132 .src_cpu = task_cpu(cur), migrate_swap()
1134 .dst_cpu = task_cpu(p), migrate_swap()
1293 cpu = task_cpu(p); kick_process()
1391 * [ this allows ->select_task() to simply return task_cpu(p) and select_task_rq()
1396 cpu = select_fallback_rq(task_cpu(p), p); select_task_rq()
1673 cpu = task_cpu(p); try_to_wake_up()
1697 if (task_cpu(p) != cpu) { try_to_wake_up()
2034 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); dl_overflow()
2049 cpus = dl_bw_cpus(task_cpu(p)); dl_overflow()
2089 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); wake_up_new_task()
2425 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); sched_exec()
2433 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); sched_exec()
4748 BUG_ON(task_cpu(p) != new_cpu); move_queued_task()
4808 if (cpumask_test_cpu(task_cpu(p), new_mask)) set_cpus_allowed_ptr()
4852 if (task_cpu(p) != src_cpu) __migrate_task()
4878 int curr_cpu = task_cpu(p); migrate_task_to()
7572 set_task_rq(tsk, task_cpu(tsk)); sched_move_task()
H A Ddeadline.c1175 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); task_dead_dl()
1240 int best_cpu, cpu = task_cpu(task); find_later_rq()
1377 BUG_ON(rq->cpu != task_cpu(p)); pick_next_pushable_dl_task()
1437 if (task_cpu(next_task) == rq->cpu && task == next_task) { push_dl_task()
H A Ddebug.c165 if (task_cpu(p) != rq_cpu) for_each_process_thread()
H A Drt.c1319 /* For anything but wake ups, just return the task_cpu */ select_task_rq_rt()
1553 int cpu = task_cpu(task); find_lowest_rq()
1689 BUG_ON(rq->cpu != task_cpu(p)); pick_next_pushable_task()
1749 if (task_cpu(next_task) == rq->cpu && task == next_task) { push_rt_task()
H A Dsched.h704 #define task_rq(p) cpu_rq(task_cpu(p))
1186 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
H A Dfair.c1417 .src_cpu = task_cpu(p), task_numa_migrate()
1538 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
4582 prev_cpu = task_cpu(p); wake_affine()
4751 int i = task_cpu(p); select_idle_sibling()
4908 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
8032 set_task_rq(p, task_cpu(p)); task_move_group_fair()
/linux-4.1.27/include/trace/events/
H A Dsched.h75 __entry->target_cpu = task_cpu(p);
176 __entry->orig_cpu = task_cpu(p);
/linux-4.1.27/kernel/trace/
H A Dtrace_sched_wakeup.c388 entry->next_cpu = task_cpu(next); tracing_sched_switch_trace()
416 entry->next_cpu = task_cpu(wakee); tracing_sched_wakeup_trace()
560 wakeup_cpu = task_cpu(p); probe_wakeup()
/linux-4.1.27/fs/proc/
H A Darray.c515 seq_put_decimal_ll(m, ' ', task_cpu(task)); do_task_stat()
/linux-4.1.27/kernel/
H A Dcpu.c316 if (task_cpu(p) != dead_cpu) do_each_thread()
/linux-4.1.27/arch/powerpc/kernel/
H A Dprocess.c1480 unsigned long cpu = task_cpu(p); valid_irq_stack()
1484 * task_cpu(p), which is in the thread_info struct. valid_irq_stack()
/linux-4.1.27/include/linux/
H A Dsched.h2990 static inline unsigned int task_cpu(const struct task_struct *p) task_cpu() function
2997 return cpu_to_node(task_cpu(p)); task_node()
3004 static inline unsigned int task_cpu(const struct task_struct *p) task_cpu() function
/linux-4.1.27/kernel/rcu/
H A Dupdate.c569 cpu = task_cpu(t); check_holdout_task()
/linux-4.1.27/arch/ia64/kernel/
H A Dmca.c1630 printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); for_each_online_cpu()
/linux-4.1.27/kernel/events/
H A Dcore.c68 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) remote_function()
99 smp_call_function_single(task_cpu(p), remote_function, &data, 1); task_function_call()

Completed in 575 milliseconds