Lines Matching defs:task
79 * sporadic time-constrained task. In such model a task is specified by:
85 * Very briefly, a periodic (sporadic) task asks for the execution of
95 * @sched_policy task's scheduling policy
97 * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
98 * @sched_priority task's static priority (SCHED_FIFO/RR)
99 * @sched_deadline representative of the task's deadline
100 * @sched_runtime representative of the task's runtime
101 * @sched_period representative of the task's period
103 * Given this task model, there are a multiplicity of scheduling algorithms
201 * We have two separate sets of flags: task->state
202 * is about runnability, while task->exit_state are
203 * about the task exiting. Confusing, but this way
245 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
246 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
247 #define task_is_stopped_or_traced(task) \
248 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
249 #define task_contributes_to_load(task) \
250 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
251 (task->flags & PF_FROZEN) == 0 && \
252 (task->state & TASK_NOLOAD) == 0)
366 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
367 * task), SP is the stack pointer of the first frame that should be shown in the back
368 * trace (or NULL if the entire call-chain of the task should be shown).
370 extern void show_stack(struct task_struct *task, unsigned long *sp);
876 unsigned int flags; /* Private per-task flags */
934 * callers have already marked the task as woken internally,
939 * We hold reference to each task in the list across the wakeup,
943 * One per task suffices, because there's never a need for a task to be
944 * in two wake queues simultaneously; it is forbidden to abandon a task
945 * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
973 struct task_struct *task);
985 #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
1309 * they are continously updated during task execution. Note that
1320 * task has to wait for a replenishment to be performed at the
1331 * @dl_yielded tells if task gave up the cpu before consuming
1337 * Bandwidth enforcement timer. Each -deadline task has its
1338 * own bandwidth to be enforced, thus we need one timer per task.
1451 /* task state */
1505 * ptraced is the list of tasks this task is using ptrace on.
1543 const struct cred __rcu *real_cred; /* objective and real subjective task
1545 const struct cred __rcu *cred; /* effective (overridable) subjective task
1559 /* hung task detection */
1601 /* PI waiters blocked on a rt_mutex held by this task */
1728 * scan window were remote/local or failed to migrate. The task scan
1818 /* CPU-specific state of this task */
1872 static inline struct pid *task_pid(struct task_struct *task)
1874 return task->pids[PIDTYPE_PID].pid;
1877 static inline struct pid *task_tgid(struct task_struct *task)
1879 return task->group_leader->pids[PIDTYPE_PID].pid;
1884 * the result of task_pgrp/task_session even if task == current,
1887 static inline struct pid *task_pgrp(struct task_struct *task)
1889 return task->group_leader->pids[PIDTYPE_PGID].pid;
1892 static inline struct pid *task_session(struct task_struct *task)
1894 return task->group_leader->pids[PIDTYPE_SID].pid;
1900 * the helpers to get the task's different pids as they are seen
1908 * set_task_vxid() : assigns a virtual id to a task;
1912 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1993 * pid_alive - check that a task structure is not stale
1997 * If pid_alive fails, then pointers within the task structure
2008 * is_global_init - check if a task structure is init. Since init
2012 * Check if a task structure is the first user space task the kernel created.
2014 * Return: 1 if the task structure is init. 0 otherwise.
2100 * Only the _current_ task can read/write to tsk->flags, but other
2104 * or during fork: the ptracer task is allowed to write to the
2174 * task->jobctl flags
2179 #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
2197 extern bool task_set_jobctl_pending(struct task_struct *task,
2199 extern void task_clear_jobctl_trapping(struct task_struct *task);
2200 extern void task_clear_jobctl_pending(struct task_struct *task,
2218 static inline void tsk_restore_flags(struct task_struct *task,
2221 task->flags &= ~flags;
2222 task->flags |= orig_flags & flags;
2319 task_sched_runtime(struct task_struct *task);
2370 * task_nice - return the nice value of a given task.
2371 * @p: the task in question.
2390 * is_idle_task - is the specified task an idle task?
2391 * @p: the task in question.
2393 * Return: 1 if @p is an idle task. 0 otherwise.
2427 * find a task by one of its numerical ids
2430 * finds a task by its pid in the specified namespace
2432 * finds a task by its virtual pid
2587 /* Grab a reference to a task's mm, if it is not already going away */
2588 extern struct mm_struct *get_task_mm(struct task_struct *task);
2590 * Grab a reference to a task's mm, if it is not already going away
2594 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2700 * all we care about is that we have a task with the appropriate
2701 * pid, we don't actually care if we have the right task.
2731 * pins the final release of task.io_context. Also protects ->cpuset and
2769 * @tsk: task causing the changes
2786 * @tsk: task causing the changes
2797 #define task_thread_info(task) ((struct thread_info *)(task)->stack)
2798 #define task_stack_page(task) ((task)->stack)
2803 task_thread_info(p)->task = p;
2825 #define task_stack_end_corrupted(task) \
2826 (*(end_of_stack(task)) != STACK_END_MAGIC)
2851 /* set thread flags in other task's structures
2964 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
3064 * Reevaluate whether the task has signals pending delivery.
3065 * Wake the task if so.