task_cpu 199 arch/arm64/kernel/smp.c cpu = task_cpu(current); task_cpu 70 arch/ia64/include/asm/switch_to.h (task_cpu(current) != \ task_cpu 72 arch/ia64/include/asm/switch_to.h task_thread_info(current)->last_cpu = task_cpu(current); \ task_cpu 1626 arch/ia64/kernel/mca.c printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); task_cpu 813 arch/mips/kernel/process.c cpumask_set_cpu(task_cpu(t), &process_cpus); task_cpu 1960 arch/powerpc/kernel/process.c unsigned long cpu = task_cpu(p); task_cpu 3098 arch/powerpc/xmon/xmon.c state, task_cpu(tsk), task_cpu 2205 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_set_cpu(task_cpu(t), mask); task_cpu 198 arch/x86/um/ptrace_32.c int err, n, cpu = task_cpu(child); task_cpu 215 arch/x86/um/ptrace_32.c int n, cpu = task_cpu(child); task_cpu 228 arch/x86/um/ptrace_32.c int err, n, cpu = task_cpu(child); task_cpu 244 arch/x86/um/ptrace_32.c int n, cpu = task_cpu(child); task_cpu 594 fs/proc/array.c seq_put_decimal_ll(m, " ", task_cpu(task)); task_cpu 180 include/linux/kdb.h unsigned int cpu = task_cpu(p); task_cpu 230 include/linux/sched/topology.h return cpu_to_node(task_cpu(p)); task_cpu 76 include/trace/events/sched.h __entry->target_cpu = task_cpu(p); task_cpu 207 include/trace/events/sched.h __entry->orig_cpu = task_cpu(p); task_cpu 73 kernel/events/core.c if (task_cpu(p) != smp_processor_id()) task_cpu 114 kernel/events/core.c ret = smp_call_function_single(task_cpu(p), remote_function, task_cpu 569 kernel/locking/mutex.c vcpu_is_preempted(task_cpu(owner))) { task_cpu 605 kernel/locking/mutex.c retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); task_cpu 653 kernel/locking/rwsem.c return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); task_cpu 1489 kernel/rcu/rcutorture.c wtp == NULL ? -1 : (int)task_cpu(wtp)); task_cpu 920 kernel/rcu/tree.c cpu = task_cpu(t); task_cpu 345 kernel/rcu/tree_stall.h gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); task_cpu 620 kernel/rcu/update.c cpu = task_cpu(t); task_cpu 1389 kernel/sched/core.c return cpu_curr(task_cpu(p)) == p; task_cpu 1497 kernel/sched/core.c BUG_ON(task_cpu(p) != new_cpu); task_cpu 1674 kernel/sched/core.c if (cpumask_test_cpu(task_cpu(p), new_mask)) task_cpu 1743 kernel/sched/core.c if (task_cpu(p) != new_cpu) { task_cpu 1806 kernel/sched/core.c if (task_cpu(arg->dst_task) != arg->dst_cpu) task_cpu 1809 kernel/sched/core.c if (task_cpu(arg->src_task) != arg->src_cpu) task_cpu 1997 kernel/sched/core.c cpu = task_cpu(p); task_cpu 2119 kernel/sched/core.c cpu = select_fallback_rq(task_cpu(p), p); task_cpu 2534 kernel/sched/core.c cpu = task_cpu(p); task_cpu 2556 kernel/sched/core.c cpu = task_cpu(p); task_cpu 2624 kernel/sched/core.c if (task_cpu(p) != cpu) { task_cpu 2955 kernel/sched/core.c p->recent_used_cpu = task_cpu(p); task_cpu 2956 kernel/sched/core.c __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); task_cpu 3497 kernel/sched/core.c dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); task_cpu 3505 kernel/sched/core.c stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); task_cpu 6127 kernel/sched/core.c int curr_cpu = task_cpu(p); task_cpu 7023 kernel/sched/core.c set_task_rq(tsk, task_cpu(tsk)); task_cpu 275 kernel/sched/deadline.c struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); task_cpu 280 kernel/sched/deadline.c __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); task_cpu 1297 kernel/sched/deadline.c struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); task_cpu 1306 kernel/sched/deadline.c __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); task_cpu 1881 kernel/sched/deadline.c int cpu = task_cpu(task); task_cpu 2031 kernel/sched/deadline.c BUG_ON(rq->cpu != task_cpu(p)); task_cpu 2257 kernel/sched/deadline.c __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); task_cpu 2555 kernel/sched/deadline.c struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); task_cpu 2574 kernel/sched/deadline.c cpus = dl_bw_cpus(task_cpu(p)); task_cpu 475 kernel/sched/debug.c if (task_cpu(p) != rq_cpu) task_cpu 1742 kernel/sched/fair.c .src_cpu = task_cpu(p), task_cpu 1863 kernel/sched/fair.c trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); task_cpu 2705 kernel/sched/fair.c int src_nid = cpu_to_node(task_cpu(p)); task_cpu 6134 kernel/sched/fair.c if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) task_cpu 6246 kernel/sched/fair.c if (task_cpu(p) == cpu && dst_cpu != cpu) task_cpu 6248 kernel/sched/fair.c else if (task_cpu(p) != cpu && dst_cpu == cpu) task_cpu 10232 kernel/sched/fair.c set_task_rq(p, task_cpu(p)); task_cpu 10239 kernel/sched/fair.c set_task_rq(p, task_cpu(p)); task_cpu 366 kernel/sched/idle.c return task_cpu(p); /* IDLE tasks as never migrated */ task_cpu 749 kernel/sched/psi.c int cpu = task_cpu(task); task_cpu 1640 kernel/sched/rt.c int cpu = task_cpu(task); task_cpu 1777 kernel/sched/rt.c BUG_ON(rq->cpu != task_cpu(p)); task_cpu 1053 kernel/sched/sched.h #define task_rq(p) cpu_rq(task_cpu(p)) task_cpu 1741 kernel/sched/sched.h int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); task_cpu 16 kernel/sched/stop_task.c return task_cpu(p); /* stop tasks as never migrate */ task_cpu 396 kernel/trace/trace_sched_wakeup.c entry->next_cpu = task_cpu(next); task_cpu 424 kernel/trace/trace_sched_wakeup.c entry->next_cpu = task_cpu(wakee); task_cpu 569 kernel/trace/trace_sched_wakeup.c wakeup_cpu = task_cpu(p);