Searched refs:task (Results 1 - 200 of 1668) sorted by relevance

123456789

/linux-4.1.27/arch/um/include/asm/
H A Dstacktrace.h18 get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs) get_frame_pointer() argument
20 if (!task || task == current) get_frame_pointer()
22 return KSTK_EBP(task); get_frame_pointer()
26 get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs) get_frame_pointer() argument
33 *get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs) get_stack_pointer() argument
35 if (!task || task == current) get_stack_pointer()
37 return (unsigned long *)KSTK_ESP(task); get_stack_pointer()
H A Dsysrq.h5 extern void show_trace(struct task_struct* task, unsigned long *stack);
/linux-4.1.27/net/sunrpc/
H A Dsched.c46 static void rpc_release_task(struct rpc_task *task);
60 * Disable the timer for a given RPC task. Should be called with
65 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) __rpc_disable_timer() argument
67 if (task->tk_timeout == 0) __rpc_disable_timer()
69 dprintk("RPC: %5u disabling timer\n", task->tk_pid); __rpc_disable_timer()
70 task->tk_timeout = 0; __rpc_disable_timer()
71 list_del(&task->u.tk_wait.timer_list); __rpc_disable_timer()
84 * Set up a timer for the current task.
87 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) __rpc_add_timer() argument
89 if (!task->tk_timeout) __rpc_add_timer()
93 task->tk_pid, jiffies_to_msecs(task->tk_timeout)); __rpc_add_timer()
95 task->u.tk_wait.expires = jiffies + task->tk_timeout; __rpc_add_timer()
96 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) __rpc_add_timer()
97 rpc_set_queue_timer(queue, task->u.tk_wait.expires); __rpc_add_timer()
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); __rpc_add_timer()
104 struct rpc_task *task; rpc_rotate_queue_owner() local
107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); rpc_rotate_queue_owner()
108 if (task->tk_owner == queue->owner) rpc_rotate_queue_owner()
109 list_move_tail(&task->u.tk_wait.list, q); rpc_rotate_queue_owner()
138 struct rpc_task *task, __rpc_add_wait_queue_priority()
144 INIT_LIST_HEAD(&task->u.tk_wait.links); __rpc_add_wait_queue_priority()
151 if (t->tk_owner == task->tk_owner) { list_for_each_entry()
152 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); list_for_each_entry()
156 list_add_tail(&task->u.tk_wait.list, q);
168 struct rpc_task *task, __rpc_add_wait_queue()
171 WARN_ON_ONCE(RPC_IS_QUEUED(task)); __rpc_add_wait_queue()
172 if (RPC_IS_QUEUED(task)) __rpc_add_wait_queue()
176 __rpc_add_wait_queue_priority(queue, task, queue_priority); __rpc_add_wait_queue()
177 else if (RPC_IS_SWAPPER(task)) __rpc_add_wait_queue()
178 list_add(&task->u.tk_wait.list, &queue->tasks[0]); __rpc_add_wait_queue()
180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); __rpc_add_wait_queue()
181 task->tk_waitqueue = queue; __rpc_add_wait_queue()
185 rpc_set_queued(task); __rpc_add_wait_queue()
188 task->tk_pid, queue, rpc_qname(queue)); __rpc_add_wait_queue()
194 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) __rpc_remove_wait_queue_priority() argument
198 if (!list_empty(&task->u.tk_wait.links)) { __rpc_remove_wait_queue_priority()
199 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); __rpc_remove_wait_queue_priority()
200 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); __rpc_remove_wait_queue_priority()
201 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); __rpc_remove_wait_queue_priority()
209 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) __rpc_remove_wait_queue() argument
211 __rpc_disable_timer(queue, task); __rpc_remove_wait_queue()
213 __rpc_remove_wait_queue_priority(task); __rpc_remove_wait_queue()
214 list_del(&task->u.tk_wait.list); __rpc_remove_wait_queue()
217 task->tk_pid, queue, rpc_qname(queue)); __rpc_remove_wait_queue()
262 static void rpc_task_set_debuginfo(struct rpc_task *task) rpc_task_set_debuginfo() argument
266 task->tk_pid = atomic_inc_return(&rpc_pid); rpc_task_set_debuginfo()
269 static inline void rpc_task_set_debuginfo(struct rpc_task *task) rpc_task_set_debuginfo() argument
274 static void rpc_set_active(struct rpc_task *task) rpc_set_active() argument
276 trace_rpc_task_begin(task->tk_client, task, NULL); rpc_set_active()
278 rpc_task_set_debuginfo(task); rpc_set_active()
279 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); rpc_set_active()
286 static int rpc_complete_task(struct rpc_task *task) rpc_complete_task() argument
288 void *m = &task->tk_runstate; rpc_complete_task()
294 trace_rpc_task_complete(task->tk_client, task, NULL); rpc_complete_task()
297 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); rpc_complete_task()
298 ret = atomic_dec_and_test(&task->tk_count); rpc_complete_task()
312 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) __rpc_wait_for_completion_task() argument
316 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, __rpc_wait_for_completion_task()
322 * Make an RPC task runnable.
324 * Note: If the task is ASYNC, and is being made runnable after sitting on an
332 static void rpc_make_runnable(struct rpc_task *task) rpc_make_runnable() argument
334 bool need_wakeup = !rpc_test_and_set_running(task); rpc_make_runnable()
336 rpc_clear_queued(task); rpc_make_runnable()
339 if (RPC_IS_ASYNC(task)) { rpc_make_runnable()
340 INIT_WORK(&task->u.tk_work, rpc_async_schedule); rpc_make_runnable()
341 queue_work(rpciod_workqueue, &task->u.tk_work); rpc_make_runnable()
343 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); rpc_make_runnable()
349 * NB: An RPC task will only receive interrupt-driven events as long
353 struct rpc_task *task, __rpc_sleep_on_priority()
358 task->tk_pid, rpc_qname(q), jiffies); __rpc_sleep_on_priority()
360 trace_rpc_task_sleep(task->tk_client, task, q); __rpc_sleep_on_priority()
362 __rpc_add_wait_queue(q, task, queue_priority); __rpc_sleep_on_priority()
364 WARN_ON_ONCE(task->tk_callback != NULL); __rpc_sleep_on_priority()
365 task->tk_callback = action; __rpc_sleep_on_priority()
366 __rpc_add_timer(q, task); __rpc_sleep_on_priority()
369 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_sleep_on() argument
372 /* We shouldn't ever put an inactive task to sleep */ rpc_sleep_on()
373 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); rpc_sleep_on()
374 if (!RPC_IS_ACTIVATED(task)) { rpc_sleep_on()
375 task->tk_status = -EIO; rpc_sleep_on()
376 rpc_put_task_async(task); rpc_sleep_on()
384 __rpc_sleep_on_priority(q, task, action, task->tk_priority); rpc_sleep_on()
389 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, rpc_sleep_on_priority() argument
392 /* We shouldn't ever put an inactive task to sleep */ rpc_sleep_on_priority()
393 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); rpc_sleep_on_priority()
394 if (!RPC_IS_ACTIVATED(task)) { rpc_sleep_on_priority()
395 task->tk_status = -EIO; rpc_sleep_on_priority()
396 rpc_put_task_async(task); rpc_sleep_on_priority()
404 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); rpc_sleep_on_priority()
412 * @task: task to be woken up
414 * Caller must hold queue->lock, and have cleared the task queued flag.
416 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) __rpc_do_wake_up_task() argument
419 task->tk_pid, jiffies); __rpc_do_wake_up_task()
421 /* Has the task been executed yet? If not, we cannot wake it up! */ __rpc_do_wake_up_task()
422 if (!RPC_IS_ACTIVATED(task)) { __rpc_do_wake_up_task()
423 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); __rpc_do_wake_up_task()
427 trace_rpc_task_wakeup(task->tk_client, task, queue); __rpc_do_wake_up_task()
429 __rpc_remove_wait_queue(queue, task); __rpc_do_wake_up_task()
431 rpc_make_runnable(task); __rpc_do_wake_up_task()
437 * Wake up a queued task while the queue lock is being held
439 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) rpc_wake_up_task_queue_locked() argument
441 if (RPC_IS_QUEUED(task)) { rpc_wake_up_task_queue_locked()
443 if (task->tk_waitqueue == queue) rpc_wake_up_task_queue_locked()
444 __rpc_do_wake_up_task(queue, task); rpc_wake_up_task_queue_locked()
449 * Wake up a task on a specific queue
451 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) rpc_wake_up_queued_task() argument
454 rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up_queued_task()
460 * Wake up the next task on a priority queue.
465 struct rpc_task *task; __rpc_find_next_queued_priority() local
472 task = list_entry(q->next, struct rpc_task, u.tk_wait.list); __rpc_find_next_queued_priority()
473 if (queue->owner == task->tk_owner) { __rpc_find_next_queued_priority()
476 list_move_tail(&task->u.tk_wait.list, q); __rpc_find_next_queued_priority()
493 task = list_entry(q->next, struct rpc_task, u.tk_wait.list); __rpc_find_next_queued_priority()
504 rpc_set_waitqueue_owner(queue, task->tk_owner); __rpc_find_next_queued_priority()
506 return task; __rpc_find_next_queued_priority()
519 * Wake up the first task on the wait queue.
524 struct rpc_task *task = NULL; rpc_wake_up_first() local
529 task = __rpc_find_next_queued(queue); rpc_wake_up_first()
530 if (task != NULL) { rpc_wake_up_first()
531 if (func(task, data)) rpc_wake_up_first()
532 rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up_first()
534 task = NULL; rpc_wake_up_first()
538 return task; rpc_wake_up_first()
542 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) rpc_wake_up_next_func() argument
548 * Wake up the next task on the wait queue.
570 struct rpc_task *task; rpc_wake_up() local
571 task = list_first_entry(head, rpc_wake_up()
574 rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up()
599 struct rpc_task *task; rpc_wake_up_status() local
600 task = list_first_entry(head, rpc_wake_up_status()
603 task->tk_status = status; rpc_wake_up_status()
604 rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up_status()
617 struct rpc_task *task, *n; __rpc_queue_timer_fn() local
622 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { __rpc_queue_timer_fn()
623 timeo = task->u.tk_wait.expires; __rpc_queue_timer_fn()
625 dprintk("RPC: %5u timeout\n", task->tk_pid); __rpc_queue_timer_fn()
626 task->tk_status = -ETIMEDOUT; __rpc_queue_timer_fn()
627 rpc_wake_up_task_queue_locked(queue, task); __rpc_queue_timer_fn()
638 static void __rpc_atrun(struct rpc_task *task) __rpc_atrun() argument
640 if (task->tk_status == -ETIMEDOUT) __rpc_atrun()
641 task->tk_status = 0; __rpc_atrun()
645 * Run a task at a later time
647 void rpc_delay(struct rpc_task *task, unsigned long delay) rpc_delay() argument
649 task->tk_timeout = delay; rpc_delay()
650 rpc_sleep_on(&delay_queue, task, __rpc_atrun); rpc_delay()
655 * Helper to call task->tk_ops->rpc_call_prepare
657 void rpc_prepare_task(struct rpc_task *task) rpc_prepare_task() argument
659 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); rpc_prepare_task()
663 rpc_init_task_statistics(struct rpc_task *task) rpc_init_task_statistics() argument
666 task->tk_garb_retry = 2; rpc_init_task_statistics()
667 task->tk_cred_retry = 2; rpc_init_task_statistics()
668 task->tk_rebind_retry = 2; rpc_init_task_statistics()
671 task->tk_start = ktime_get(); rpc_init_task_statistics()
675 rpc_reset_task_statistics(struct rpc_task *task) rpc_reset_task_statistics() argument
677 task->tk_timeouts = 0; rpc_reset_task_statistics()
678 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); rpc_reset_task_statistics()
680 rpc_init_task_statistics(task); rpc_reset_task_statistics()
684 * Helper that calls task->tk_ops->rpc_call_done if it exists
686 void rpc_exit_task(struct rpc_task *task) rpc_exit_task() argument
688 task->tk_action = NULL; rpc_exit_task()
689 if (task->tk_ops->rpc_call_done != NULL) { rpc_exit_task()
690 task->tk_ops->rpc_call_done(task, task->tk_calldata); rpc_exit_task()
691 if (task->tk_action != NULL) { rpc_exit_task()
692 WARN_ON(RPC_ASSASSINATED(task)); rpc_exit_task()
694 xprt_release(task); rpc_exit_task()
695 rpc_reset_task_statistics(task); rpc_exit_task()
700 void rpc_exit(struct rpc_task *task, int status) rpc_exit() argument
702 task->tk_status = status; rpc_exit()
703 task->tk_action = rpc_exit_task; rpc_exit()
704 if (RPC_IS_QUEUED(task)) rpc_exit()
705 rpc_wake_up_queued_task(task->tk_waitqueue, task); rpc_exit()
718 static void __rpc_execute(struct rpc_task *task) __rpc_execute() argument
721 int task_is_async = RPC_IS_ASYNC(task); __rpc_execute()
725 task->tk_pid, task->tk_flags); __rpc_execute()
727 WARN_ON_ONCE(RPC_IS_QUEUED(task)); __rpc_execute()
728 if (RPC_IS_QUEUED(task)) __rpc_execute()
737 do_action = task->tk_callback; __rpc_execute()
738 task->tk_callback = NULL; __rpc_execute()
742 * tk_action may be NULL if the task has been killed. __rpc_execute()
746 do_action = task->tk_action; __rpc_execute()
750 trace_rpc_task_run_action(task->tk_client, task, task->tk_action); __rpc_execute()
751 do_action(task); __rpc_execute()
754 * Lockless check for whether task is sleeping or not. __rpc_execute()
756 if (!RPC_IS_QUEUED(task)) __rpc_execute()
767 queue = task->tk_waitqueue; __rpc_execute()
769 if (!RPC_IS_QUEUED(task)) { __rpc_execute()
773 rpc_clear_running(task); __rpc_execute()
778 /* sync task: sleep here */ __rpc_execute()
779 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); __rpc_execute()
780 status = out_of_line_wait_on_bit(&task->tk_runstate, __rpc_execute()
785 * When a sync task receives a signal, it exits with __rpc_execute()
790 dprintk("RPC: %5u got signal\n", task->tk_pid); __rpc_execute()
791 task->tk_flags |= RPC_TASK_KILLED; __rpc_execute()
792 rpc_exit(task, -ERESTARTSYS); __rpc_execute()
794 dprintk("RPC: %5u sync task resuming\n", task->tk_pid); __rpc_execute()
797 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, __rpc_execute()
798 task->tk_status); __rpc_execute()
799 /* Release all resources associated with the task */ __rpc_execute()
800 rpc_release_task(task); __rpc_execute()
806 * This may be called recursively if e.g. an async NFS task updates
808 * NOTE: Upon exit of this function the task is guaranteed to be
810 * been called, so your task memory may have been freed.
812 void rpc_execute(struct rpc_task *task) rpc_execute() argument
814 bool is_async = RPC_IS_ASYNC(task); rpc_execute()
816 rpc_set_active(task); rpc_execute()
817 rpc_make_runnable(task); rpc_execute()
819 __rpc_execute(task); rpc_execute()
829 * @task: RPC task that will use this buffer
844 void *rpc_malloc(struct rpc_task *task, size_t size) rpc_malloc() argument
849 if (RPC_IS_SWAPPER(task)) rpc_malloc()
863 task->tk_pid, size, buf); rpc_malloc()
895 * Creation and deletion of RPC task structures
897 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) rpc_init_task() argument
899 memset(task, 0, sizeof(*task)); rpc_init_task()
900 atomic_set(&task->tk_count, 1); rpc_init_task()
901 task->tk_flags = task_setup_data->flags; rpc_init_task()
902 task->tk_ops = task_setup_data->callback_ops; rpc_init_task()
903 task->tk_calldata = task_setup_data->callback_data; rpc_init_task()
904 INIT_LIST_HEAD(&task->tk_task); rpc_init_task()
906 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; rpc_init_task()
907 task->tk_owner = current->tgid; rpc_init_task()
910 task->tk_workqueue = task_setup_data->workqueue; rpc_init_task()
912 if (task->tk_ops->rpc_call_prepare != NULL) rpc_init_task()
913 task->tk_action = rpc_prepare_task; rpc_init_task()
915 rpc_init_task_statistics(task); rpc_init_task()
917 dprintk("RPC: new task initialized, procpid %u\n", rpc_init_task()
928 * Create a new task for the specified client.
932 struct rpc_task *task = setup_data->task; rpc_new_task() local
935 if (task == NULL) { rpc_new_task()
936 task = rpc_alloc_task(); rpc_new_task()
937 if (task == NULL) { rpc_new_task()
945 rpc_init_task(task, setup_data); rpc_new_task()
946 task->tk_flags |= flags; rpc_new_task()
947 dprintk("RPC: allocated task %p\n", task); rpc_new_task()
948 return task; rpc_new_task()
952 * rpc_free_task - release rpc task and perform cleanups
970 static void rpc_free_task(struct rpc_task *task) rpc_free_task() argument
972 unsigned short tk_flags = task->tk_flags; rpc_free_task()
974 rpc_release_calldata(task->tk_ops, task->tk_calldata); rpc_free_task()
977 dprintk("RPC: %5u freeing task\n", task->tk_pid); rpc_free_task()
978 mempool_free(task, rpc_task_mempool); rpc_free_task()
987 static void rpc_release_resources_task(struct rpc_task *task) rpc_release_resources_task() argument
989 xprt_release(task); rpc_release_resources_task()
990 if (task->tk_msg.rpc_cred) { rpc_release_resources_task()
991 put_rpccred(task->tk_msg.rpc_cred); rpc_release_resources_task()
992 task->tk_msg.rpc_cred = NULL; rpc_release_resources_task()
994 rpc_task_release_client(task); rpc_release_resources_task()
997 static void rpc_final_put_task(struct rpc_task *task, rpc_final_put_task() argument
1001 INIT_WORK(&task->u.tk_work, rpc_async_release); rpc_final_put_task()
1002 queue_work(q, &task->u.tk_work); rpc_final_put_task()
1004 rpc_free_task(task); rpc_final_put_task()
1007 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) rpc_do_put_task() argument
1009 if (atomic_dec_and_test(&task->tk_count)) { rpc_do_put_task()
1010 rpc_release_resources_task(task); rpc_do_put_task()
1011 rpc_final_put_task(task, q); rpc_do_put_task()
1015 void rpc_put_task(struct rpc_task *task) rpc_put_task() argument
1017 rpc_do_put_task(task, NULL); rpc_put_task()
1021 void rpc_put_task_async(struct rpc_task *task) rpc_put_task_async() argument
1023 rpc_do_put_task(task, task->tk_workqueue); rpc_put_task_async()
1027 static void rpc_release_task(struct rpc_task *task) rpc_release_task() argument
1029 dprintk("RPC: %5u release task\n", task->tk_pid); rpc_release_task()
1031 WARN_ON_ONCE(RPC_IS_QUEUED(task)); rpc_release_task()
1033 rpc_release_resources_task(task); rpc_release_task()
1037 * so it should be safe to use task->tk_count as a test for whether rpc_release_task()
1040 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { rpc_release_task()
1041 /* Wake up anyone who may be waiting for task completion */ rpc_release_task()
1042 if (!rpc_complete_task(task)) rpc_release_task()
1045 if (!atomic_dec_and_test(&task->tk_count)) rpc_release_task()
1048 rpc_final_put_task(task, task->tk_workqueue); rpc_release_task()
137 __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned char queue_priority) __rpc_add_wait_queue_priority() argument
167 __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned char queue_priority) __rpc_add_wait_queue() argument
352 __rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, unsigned char queue_priority) __rpc_sleep_on_priority() argument
H A Dxprt.c69 static void xprt_connect_status(struct rpc_task *task);
174 * @task: task that is requesting access to the transport
181 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) xprt_reserve_xprt() argument
183 struct rpc_rqst *req = task->tk_rqstp; xprt_reserve_xprt()
187 if (task == xprt->snd_task) xprt_reserve_xprt()
191 xprt->snd_task = task; xprt_reserve_xprt()
199 task->tk_pid, xprt); xprt_reserve_xprt()
200 task->tk_timeout = 0; xprt_reserve_xprt()
201 task->tk_status = -EAGAIN; xprt_reserve_xprt()
208 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); xprt_reserve_xprt()
226 * @task: task that is requesting access to the transport
232 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) xprt_reserve_xprt_cong() argument
234 struct rpc_rqst *req = task->tk_rqstp; xprt_reserve_xprt_cong()
238 if (task == xprt->snd_task) xprt_reserve_xprt_cong()
243 xprt->snd_task = task; xprt_reserve_xprt_cong()
246 if (__xprt_get_cong(xprt, task)) { xprt_reserve_xprt_cong()
247 xprt->snd_task = task; xprt_reserve_xprt_cong()
253 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); xprt_reserve_xprt_cong()
254 task->tk_timeout = 0; xprt_reserve_xprt_cong()
255 task->tk_status = -EAGAIN; xprt_reserve_xprt_cong()
262 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); xprt_reserve_xprt_cong()
267 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) xprt_lock_write() argument
272 retval = xprt->ops->reserve_xprt(xprt, task); xprt_lock_write()
277 static bool __xprt_lock_write_func(struct rpc_task *task, void *data) __xprt_lock_write_func() argument
282 req = task->tk_rqstp; __xprt_lock_write_func()
283 xprt->snd_task = task; __xprt_lock_write_func()
299 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data) __xprt_lock_write_cong_func() argument
304 req = task->tk_rqstp; __xprt_lock_write_cong_func()
306 xprt->snd_task = task; __xprt_lock_write_cong_func()
309 if (__xprt_get_cong(xprt, task)) { __xprt_lock_write_cong_func()
310 xprt->snd_task = task; __xprt_lock_write_cong_func()
329 static void xprt_task_clear_bytes_sent(struct rpc_task *task) xprt_task_clear_bytes_sent() argument
331 if (task != NULL) { xprt_task_clear_bytes_sent()
332 struct rpc_rqst *req = task->tk_rqstp; xprt_task_clear_bytes_sent()
341 * @task: task that is releasing access to the transport
343 * Note that "task" can be NULL. No congestion control is provided.
345 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) xprt_release_xprt() argument
347 if (xprt->snd_task == task) { xprt_release_xprt()
348 xprt_task_clear_bytes_sent(task); xprt_release_xprt()
358 * @task: task that is releasing access to the transport
360 * Note that "task" can be NULL. Another task is awoken to use the
363 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) xprt_release_xprt_cong() argument
365 if (xprt->snd_task == task) { xprt_release_xprt_cong()
366 xprt_task_clear_bytes_sent(task); xprt_release_xprt_cong()
373 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) xprt_release_write() argument
376 xprt->ops->release_xprt(xprt, task); xprt_release_write()
382 * overflowed. Put the task to sleep if this is the case.
385 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) __xprt_get_cong() argument
387 struct rpc_rqst *req = task->tk_rqstp; __xprt_get_cong()
392 task->tk_pid, xprt->cong, xprt->cwnd); __xprt_get_cong()
401 * Adjust the congestion window, and wake up the next task
416 * @task: RPC request that recently completed
420 void xprt_release_rqst_cong(struct rpc_task *task) xprt_release_rqst_cong() argument
422 struct rpc_rqst *req = task->tk_rqstp; xprt_release_rqst_cong()
431 * @task: recently completed RPC request used to adjust window
444 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) xprt_adjust_cwnd() argument
446 struct rpc_rqst *req = task->tk_rqstp; xprt_adjust_cwnd()
471 * @status: result code to plant in each task before waking it
485 * @task: task to be put to sleep
492 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action) xprt_wait_for_buffer_space() argument
494 struct rpc_rqst *req = task->tk_rqstp; xprt_wait_for_buffer_space()
497 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; xprt_wait_for_buffer_space()
498 rpc_sleep_on(&xprt->pending, task, action); xprt_wait_for_buffer_space()
503 * xprt_write_space - wake the task waiting for transport output buffer space
512 dprintk("RPC: write space: waking waiting task on " xprt_write_space()
522 * @task: task whose timeout is to be set
528 void xprt_set_retrans_timeout_def(struct rpc_task *task) xprt_set_retrans_timeout_def() argument
530 task->tk_timeout = task->tk_rqstp->rq_timeout; xprt_set_retrans_timeout_def()
536 * @task: task whose timeout is to be set
540 void xprt_set_retrans_timeout_rtt(struct rpc_task *task) xprt_set_retrans_timeout_rtt() argument
542 int timer = task->tk_msg.rpc_proc->p_timer; xprt_set_retrans_timeout_rtt()
543 struct rpc_clnt *clnt = task->tk_client; xprt_set_retrans_timeout_rtt()
545 struct rpc_rqst *req = task->tk_rqstp; xprt_set_retrans_timeout_rtt()
548 task->tk_timeout = rpc_calc_rto(rtt, timer); xprt_set_retrans_timeout_rtt()
549 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; xprt_set_retrans_timeout_rtt()
550 if (task->tk_timeout > max_timeout || task->tk_timeout == 0) xprt_set_retrans_timeout_rtt()
551 task->tk_timeout = max_timeout; xprt_set_retrans_timeout_rtt()
695 struct rpc_task *task, xprt_lock_connect()
703 if (xprt->snd_task != task) xprt_lock_connect()
705 xprt_task_clear_bytes_sent(task); xprt_lock_connect()
729 * @task: RPC task that is requesting the connect
732 void xprt_connect(struct rpc_task *task) xprt_connect() argument
734 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; xprt_connect()
736 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, xprt_connect()
740 task->tk_status = -EAGAIN; xprt_connect()
743 if (!xprt_lock_write(xprt, task)) xprt_connect()
750 task->tk_rqstp->rq_bytes_sent = 0; xprt_connect()
751 task->tk_timeout = task->tk_rqstp->rq_timeout; xprt_connect()
752 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); xprt_connect()
759 xprt->ops->connect(xprt, task); xprt_connect()
761 xprt_release_write(xprt, task); xprt_connect()
764 static void xprt_connect_status(struct rpc_task *task) xprt_connect_status() argument
766 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; xprt_connect_status()
768 if (task->tk_status == 0) { xprt_connect_status()
772 task->tk_pid); xprt_connect_status()
776 switch (task->tk_status) { xprt_connect_status()
784 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid); xprt_connect_status()
788 "out\n", task->tk_pid); xprt_connect_status()
792 "server %s\n", task->tk_pid, -task->tk_status, xprt_connect_status()
794 task->tk_status = -EIO; xprt_connect_status()
822 static void xprt_update_rtt(struct rpc_task *task) xprt_update_rtt() argument
824 struct rpc_rqst *req = task->tk_rqstp; xprt_update_rtt()
825 struct rpc_rtt *rtt = task->tk_client->cl_rtt; xprt_update_rtt()
826 unsigned int timer = task->tk_msg.rpc_proc->p_timer; xprt_update_rtt()
838 * @task: RPC request that recently completed
843 void xprt_complete_rqst(struct rpc_task *task, int copied) xprt_complete_rqst() argument
845 struct rpc_rqst *req = task->tk_rqstp; xprt_complete_rqst()
849 task->tk_pid, ntohl(req->rq_xid), copied); xprt_complete_rqst()
855 xprt_update_rtt(task); xprt_complete_rqst()
863 rpc_wake_up_queued_task(&xprt->pending, task); xprt_complete_rqst()
867 static void xprt_timer(struct rpc_task *task) xprt_timer() argument
869 struct rpc_rqst *req = task->tk_rqstp; xprt_timer()
872 if (task->tk_status != -ETIMEDOUT) xprt_timer()
874 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); xprt_timer()
879 xprt->ops->timer(xprt, task); xprt_timer()
881 task->tk_status = 0; xprt_timer()
892 * @task: RPC task about to send a request
895 bool xprt_prepare_transmit(struct rpc_task *task) xprt_prepare_transmit() argument
897 struct rpc_rqst *req = task->tk_rqstp; xprt_prepare_transmit()
901 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); xprt_prepare_transmit()
906 task->tk_status = req->rq_reply_bytes_recvd; xprt_prepare_transmit()
909 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) xprt_prepare_transmit()
912 xprt->ops->set_retrans_timeout(task); xprt_prepare_transmit()
913 rpc_sleep_on(&xprt->pending, task, xprt_timer); xprt_prepare_transmit()
917 if (!xprt->ops->reserve_xprt(xprt, task)) { xprt_prepare_transmit()
918 task->tk_status = -EAGAIN; xprt_prepare_transmit()
927 void xprt_end_transmit(struct rpc_task *task) xprt_end_transmit() argument
929 xprt_release_write(task->tk_rqstp->rq_xprt, task); xprt_end_transmit()
934 * @task: controlling RPC task
938 void xprt_transmit(struct rpc_task *task) xprt_transmit() argument
940 struct rpc_rqst *req = task->tk_rqstp; xprt_transmit()
944 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); xprt_transmit()
947 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) { xprt_transmit()
966 status = xprt->ops->send_request(task); xprt_transmit()
969 task->tk_status = status; xprt_transmit()
973 dprintk("RPC: %5u xmit complete\n", task->tk_pid); xprt_transmit()
974 task->tk_flags |= RPC_TASK_SENT; xprt_transmit()
977 xprt->ops->set_retrans_timeout(task); xprt_transmit()
990 task->tk_status = -ENOTCONN; xprt_transmit()
996 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) xprt_transmit()
997 rpc_sleep_on(&xprt->pending, task, xprt_timer); xprt_transmit()
1003 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) xprt_add_backlog() argument
1006 rpc_sleep_on(&xprt->backlog, task, NULL); xprt_add_backlog()
1015 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) xprt_throttle_congested() argument
1023 rpc_sleep_on(&xprt->backlog, task, NULL); xprt_throttle_congested()
1055 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) xprt_alloc_slot() argument
1072 task->tk_status = -ENOMEM; xprt_alloc_slot()
1075 xprt_add_backlog(xprt, task); xprt_alloc_slot()
1078 task->tk_status = -EAGAIN; xprt_alloc_slot()
1083 task->tk_status = 0; xprt_alloc_slot()
1084 task->tk_rqstp = req; xprt_alloc_slot()
1085 xprt_request_init(task, xprt); xprt_alloc_slot()
1090 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) xprt_lock_and_alloc_slot() argument
1097 if (xprt_lock_write(xprt, task)) { xprt_lock_and_alloc_slot()
1098 xprt_alloc_slot(xprt, task); xprt_lock_and_alloc_slot()
1099 xprt_release_write(xprt, task); xprt_lock_and_alloc_slot()
1171 * @task: RPC task requesting a slot allocation
1174 * slots are available, place the task on the transport's
1177 void xprt_reserve(struct rpc_task *task) xprt_reserve() argument
1181 task->tk_status = 0; xprt_reserve()
1182 if (task->tk_rqstp != NULL) xprt_reserve()
1185 task->tk_timeout = 0; xprt_reserve()
1186 task->tk_status = -EAGAIN; xprt_reserve()
1188 xprt = rcu_dereference(task->tk_client->cl_xprt); xprt_reserve()
1189 if (!xprt_throttle_congested(xprt, task)) xprt_reserve()
1190 xprt->ops->alloc_slot(xprt, task); xprt_reserve()
1196 * @task: RPC task requesting a slot allocation
1198 * If no more slots are available, place the task on the transport's
1203 void xprt_retry_reserve(struct rpc_task *task) xprt_retry_reserve() argument
1207 task->tk_status = 0; xprt_retry_reserve()
1208 if (task->tk_rqstp != NULL) xprt_retry_reserve()
1211 task->tk_timeout = 0; xprt_retry_reserve()
1212 task->tk_status = -EAGAIN; xprt_retry_reserve()
1214 xprt = rcu_dereference(task->tk_client->cl_xprt); xprt_retry_reserve()
1215 xprt->ops->alloc_slot(xprt, task); xprt_retry_reserve()
1229 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) xprt_request_init() argument
1231 struct rpc_rqst *req = task->tk_rqstp; xprt_request_init()
1234 req->rq_timeout = task->tk_client->cl_timeout->to_initval; xprt_request_init()
1235 req->rq_task = task; xprt_request_init()
1247 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, xprt_request_init()
1253 * @task: task which is finished with the slot
1256 void xprt_release(struct rpc_task *task) xprt_release() argument
1259 struct rpc_rqst *req = task->tk_rqstp; xprt_release()
1262 if (task->tk_client) { xprt_release()
1264 xprt = rcu_dereference(task->tk_client->cl_xprt); xprt_release()
1265 if (xprt->snd_task == task) xprt_release()
1266 xprt_release_write(xprt, task); xprt_release()
1273 if (task->tk_ops->rpc_count_stats != NULL) xprt_release()
1274 task->tk_ops->rpc_count_stats(task, task->tk_calldata); xprt_release()
1275 else if (task->tk_client) xprt_release()
1276 rpc_count_iostats(task, task->tk_client->cl_metrics); xprt_release()
1278 xprt->ops->release_xprt(xprt, task); xprt_release()
1280 xprt->ops->release_request(task); xprt_release()
1292 task->tk_rqstp = NULL; xprt_release()
1296 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); xprt_release()
694 xprt_lock_connect(struct rpc_xprt *xprt, struct rpc_task *task, void *cookie) xprt_lock_connect() argument
H A Dclnt.c60 static void call_start(struct rpc_task *task);
61 static void call_reserve(struct rpc_task *task);
62 static void call_reserveresult(struct rpc_task *task);
63 static void call_allocate(struct rpc_task *task);
64 static void call_decode(struct rpc_task *task);
65 static void call_bind(struct rpc_task *task);
66 static void call_bind_status(struct rpc_task *task);
67 static void call_transmit(struct rpc_task *task);
69 static void call_bc_transmit(struct rpc_task *task);
71 static void call_status(struct rpc_task *task);
72 static void call_transmit_status(struct rpc_task *task);
73 static void call_refresh(struct rpc_task *task);
74 static void call_refreshresult(struct rpc_task *task);
75 static void call_timeout(struct rpc_task *task);
76 static void call_connect(struct rpc_task *task);
77 static void call_connect_status(struct rpc_task *task);
79 static __be32 *rpc_encode_header(struct rpc_task *task);
80 static __be32 *rpc_verify_header(struct rpc_task *task);
868 void rpc_task_release_client(struct rpc_task *task) rpc_task_release_client() argument
870 struct rpc_clnt *clnt = task->tk_client; rpc_task_release_client()
873 /* Remove from client task list */ rpc_task_release_client()
875 list_del(&task->tk_task); rpc_task_release_client()
877 task->tk_client = NULL; rpc_task_release_client()
884 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) rpc_task_set_client() argument
887 rpc_task_release_client(task); rpc_task_set_client()
888 task->tk_client = clnt; rpc_task_set_client()
891 task->tk_flags |= RPC_TASK_SOFT; rpc_task_set_client()
893 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; rpc_task_set_client()
900 task->tk_flags |= RPC_TASK_SWAPPER; rpc_task_set_client()
905 list_add_tail(&task->tk_task, &clnt->cl_tasks); rpc_task_set_client()
910 void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt) rpc_task_reset_client() argument
912 rpc_task_release_client(task); rpc_task_reset_client()
913 rpc_task_set_client(task, clnt); rpc_task_reset_client()
919 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) rpc_task_set_rpc_message() argument
922 task->tk_msg.rpc_proc = msg->rpc_proc; rpc_task_set_rpc_message()
923 task->tk_msg.rpc_argp = msg->rpc_argp; rpc_task_set_rpc_message()
924 task->tk_msg.rpc_resp = msg->rpc_resp; rpc_task_set_rpc_message()
926 task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred); rpc_task_set_rpc_message()
934 rpc_default_callback(struct rpc_task *task, void *data) rpc_default_callback() argument
943 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
944 * @task_setup_data: pointer to task initialisation data
948 struct rpc_task *task; rpc_run_task() local
950 task = rpc_new_task(task_setup_data); rpc_run_task()
951 if (IS_ERR(task)) rpc_run_task()
954 rpc_task_set_client(task, task_setup_data->rpc_client); rpc_run_task()
955 rpc_task_set_rpc_message(task, task_setup_data->rpc_message); rpc_run_task()
957 if (task->tk_action == NULL) rpc_run_task()
958 rpc_call_start(task); rpc_run_task()
960 atomic_inc(&task->tk_count); rpc_run_task()
961 rpc_execute(task); rpc_run_task()
963 return task; rpc_run_task()
975 struct rpc_task *task; rpc_call_sync() local
991 task = rpc_run_task(&task_setup_data); rpc_call_sync()
992 if (IS_ERR(task)) rpc_call_sync()
993 return PTR_ERR(task); rpc_call_sync()
994 status = task->tk_status; rpc_call_sync()
995 rpc_put_task(task); rpc_call_sync()
1012 struct rpc_task *task; rpc_call_async() local
1021 task = rpc_run_task(&task_setup_data); rpc_call_async()
1022 if (IS_ERR(task)) rpc_call_async()
1023 return PTR_ERR(task); rpc_call_async()
1024 rpc_put_task(task); rpc_call_async()
1031 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1039 struct rpc_task *task; rpc_run_bc_task() local
1049 task = rpc_new_task(&task_setup_data); rpc_run_bc_task()
1050 if (IS_ERR(task)) { rpc_run_bc_task()
1054 task->tk_rqstp = req; rpc_run_bc_task()
1063 task->tk_action = call_bc_transmit; rpc_run_bc_task()
1064 atomic_inc(&task->tk_count); rpc_run_bc_task()
1065 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); rpc_run_bc_task()
1066 rpc_execute(task); rpc_run_bc_task()
1069 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); rpc_run_bc_task()
1070 return task; rpc_run_bc_task()
1075 rpc_call_start(struct rpc_task *task) rpc_call_start() argument
1077 task->tk_action = call_start; rpc_call_start()
1379 rpc_restart_call_prepare(struct rpc_task *task) rpc_restart_call_prepare() argument
1381 if (RPC_ASSASSINATED(task)) rpc_restart_call_prepare()
1383 task->tk_action = call_start; rpc_restart_call_prepare()
1384 task->tk_status = 0; rpc_restart_call_prepare()
1385 if (task->tk_ops->rpc_call_prepare != NULL) rpc_restart_call_prepare()
1386 task->tk_action = rpc_prepare_task; rpc_restart_call_prepare()
1396 rpc_restart_call(struct rpc_task *task) rpc_restart_call() argument
1398 if (RPC_ASSASSINATED(task)) rpc_restart_call()
1400 task->tk_action = call_start; rpc_restart_call()
1401 task->tk_status = 0; rpc_restart_call()
1408 *rpc_proc_name(const struct rpc_task *task) rpc_proc_name() argument
1410 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; rpc_proc_name()
1429 call_start(struct rpc_task *task) call_start() argument
1431 struct rpc_clnt *clnt = task->tk_client; call_start()
1433 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid, call_start()
1435 rpc_proc_name(task), call_start()
1436 (RPC_IS_ASYNC(task) ? "async" : "sync")); call_start()
1439 task->tk_msg.rpc_proc->p_count++; call_start()
1441 task->tk_action = call_reserve; call_start()
1448 call_reserve(struct rpc_task *task) call_reserve() argument
1450 dprint_status(task); call_reserve()
1452 task->tk_status = 0; call_reserve()
1453 task->tk_action = call_reserveresult; call_reserve()
1454 xprt_reserve(task); call_reserve()
1457 static void call_retry_reserve(struct rpc_task *task);
1463 call_reserveresult(struct rpc_task *task) call_reserveresult() argument
1465 int status = task->tk_status; call_reserveresult()
1467 dprint_status(task); call_reserveresult()
1473 task->tk_status = 0; call_reserveresult()
1475 if (task->tk_rqstp) { call_reserveresult()
1476 task->tk_action = call_refresh; call_reserveresult()
1482 rpc_exit(task, -EIO); call_reserveresult()
1490 if (task->tk_rqstp) { call_reserveresult()
1493 xprt_release(task); call_reserveresult()
1498 rpc_delay(task, HZ >> 2); call_reserveresult()
1500 task->tk_action = call_retry_reserve; call_reserveresult()
1509 rpc_exit(task, status); call_reserveresult()
1516 call_retry_reserve(struct rpc_task *task) call_retry_reserve() argument
1518 dprint_status(task); call_retry_reserve()
1520 task->tk_status = 0; call_retry_reserve()
1521 task->tk_action = call_reserveresult; call_retry_reserve()
1522 xprt_retry_reserve(task); call_retry_reserve()
1529 call_refresh(struct rpc_task *task) call_refresh() argument
1531 dprint_status(task); call_refresh()
1533 task->tk_action = call_refreshresult; call_refresh()
1534 task->tk_status = 0; call_refresh()
1535 task->tk_client->cl_stats->rpcauthrefresh++; call_refresh()
1536 rpcauth_refreshcred(task); call_refresh()
1543 call_refreshresult(struct rpc_task *task) call_refreshresult() argument
1545 int status = task->tk_status; call_refreshresult()
1547 dprint_status(task); call_refreshresult()
1549 task->tk_status = 0; call_refreshresult()
1550 task->tk_action = call_refresh; call_refreshresult()
1553 if (rpcauth_uptodatecred(task)) { call_refreshresult()
1554 task->tk_action = call_allocate; call_refreshresult()
1561 rpc_delay(task, 3*HZ); call_refreshresult()
1565 if (!task->tk_cred_retry) call_refreshresult()
1567 task->tk_cred_retry--; call_refreshresult()
1569 task->tk_pid, __func__); call_refreshresult()
1573 task->tk_pid, __func__, status); call_refreshresult()
1574 rpc_exit(task, status); call_refreshresult()
1582 call_allocate(struct rpc_task *task) call_allocate() argument
1584 unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack; call_allocate()
1585 struct rpc_rqst *req = task->tk_rqstp; call_allocate()
1587 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; call_allocate()
1589 dprint_status(task); call_allocate()
1591 task->tk_status = 0; call_allocate()
1592 task->tk_action = call_bind; call_allocate()
1613 req->rq_buffer = xprt->ops->buf_alloc(task, call_allocate()
1618 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); call_allocate()
1620 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { call_allocate()
1621 task->tk_action = call_allocate; call_allocate()
1622 rpc_delay(task, HZ>>4); call_allocate()
1626 rpc_exit(task, -ERESTARTSYS); call_allocate()
1630 rpc_task_need_encode(struct rpc_task *task) rpc_task_need_encode() argument
1632 return task->tk_rqstp->rq_snd_buf.len == 0; rpc_task_need_encode()
1636 rpc_task_force_reencode(struct rpc_task *task) rpc_task_force_reencode() argument
1638 task->tk_rqstp->rq_snd_buf.len = 0; rpc_task_force_reencode()
1639 task->tk_rqstp->rq_bytes_sent = 0; rpc_task_force_reencode()
1658 rpc_xdr_encode(struct rpc_task *task) rpc_xdr_encode() argument
1660 struct rpc_rqst *req = task->tk_rqstp; rpc_xdr_encode()
1664 dprint_status(task); rpc_xdr_encode()
1673 p = rpc_encode_header(task); rpc_xdr_encode()
1676 rpc_exit(task, -EIO); rpc_xdr_encode()
1680 encode = task->tk_msg.rpc_proc->p_encode; rpc_xdr_encode()
1684 task->tk_status = rpcauth_wrap_req(task, encode, req, p, rpc_xdr_encode()
1685 task->tk_msg.rpc_argp); rpc_xdr_encode()
1692 call_bind(struct rpc_task *task) call_bind() argument
1694 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; call_bind()
1696 dprint_status(task); call_bind()
1698 task->tk_action = call_connect; call_bind()
1700 task->tk_action = call_bind_status; call_bind()
1701 task->tk_timeout = xprt->bind_timeout; call_bind()
1702 xprt->ops->rpcbind(task); call_bind()
1710 call_bind_status(struct rpc_task *task) call_bind_status() argument
1714 if (task->tk_status >= 0) { call_bind_status()
1715 dprint_status(task); call_bind_status()
1716 task->tk_status = 0; call_bind_status()
1717 task->tk_action = call_connect; call_bind_status()
1721 trace_rpc_bind_status(task); call_bind_status()
1722 switch (task->tk_status) { call_bind_status()
1724 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); call_bind_status()
1725 rpc_delay(task, HZ >> 2); call_bind_status()
1729 "unavailable\n", task->tk_pid); call_bind_status()
1731 if (task->tk_msg.rpc_proc->p_proc == 0) { call_bind_status()
1735 if (task->tk_rebind_retry == 0) call_bind_status()
1737 task->tk_rebind_retry--; call_bind_status()
1738 rpc_delay(task, 3*HZ); call_bind_status()
1742 task->tk_pid); call_bind_status()
1747 task->tk_pid); call_bind_status()
1751 task->tk_pid); call_bind_status()
1763 task->tk_pid, task->tk_status); call_bind_status()
1764 if (!RPC_IS_SOFTCONN(task)) { call_bind_status()
1765 rpc_delay(task, 5*HZ); call_bind_status()
1768 status = task->tk_status; call_bind_status()
1772 task->tk_pid, -task->tk_status); call_bind_status()
1775 rpc_exit(task, status); call_bind_status()
1779 task->tk_status = 0; call_bind_status()
1780 task->tk_action = call_timeout; call_bind_status()
1787 call_connect(struct rpc_task *task) call_connect() argument
1789 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; call_connect()
1792 task->tk_pid, xprt, call_connect()
1795 task->tk_action = call_transmit; call_connect()
1797 task->tk_action = call_connect_status; call_connect()
1798 if (task->tk_status < 0) call_connect()
1800 if (task->tk_flags & RPC_TASK_NOCONNECT) { call_connect()
1801 rpc_exit(task, -ENOTCONN); call_connect()
1804 xprt_connect(task); call_connect()
1812 call_connect_status(struct rpc_task *task) call_connect_status() argument
1814 struct rpc_clnt *clnt = task->tk_client; call_connect_status()
1815 int status = task->tk_status; call_connect_status()
1817 dprint_status(task); call_connect_status()
1819 trace_rpc_connect_status(task, status); call_connect_status()
1820 task->tk_status = 0; call_connect_status()
1830 if (RPC_IS_SOFTCONN(task)) call_connect_status()
1833 rpc_delay(task, 3*HZ); call_connect_status()
1837 task->tk_action = call_timeout; call_connect_status()
1841 task->tk_action = call_transmit; call_connect_status()
1844 rpc_exit(task, status); call_connect_status()
1851 call_transmit(struct rpc_task *task) call_transmit() argument
1853 int is_retrans = RPC_WAS_SENT(task); call_transmit()
1855 dprint_status(task); call_transmit()
1857 task->tk_action = call_status; call_transmit()
1858 if (task->tk_status < 0) call_transmit()
1860 if (!xprt_prepare_transmit(task)) call_transmit()
1862 task->tk_action = call_transmit_status; call_transmit()
1864 if (rpc_task_need_encode(task)) { call_transmit()
1865 rpc_xdr_encode(task); call_transmit()
1867 if (task->tk_status != 0) { call_transmit()
1869 if (task->tk_status == -EAGAIN) call_transmit()
1870 rpc_delay(task, HZ >> 4); call_transmit()
1872 rpc_exit(task, task->tk_status); call_transmit()
1876 xprt_transmit(task); call_transmit()
1877 if (task->tk_status < 0) call_transmit()
1880 task->tk_client->cl_stats->rpcretrans++; call_transmit()
1885 call_transmit_status(task); call_transmit()
1886 if (rpc_reply_expected(task)) call_transmit()
1888 task->tk_action = rpc_exit_task; call_transmit()
1889 rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task); call_transmit()
1896 call_transmit_status(struct rpc_task *task) call_transmit_status() argument
1898 task->tk_action = call_status; call_transmit_status()
1904 if (task->tk_status == 0) { call_transmit_status()
1905 xprt_end_transmit(task); call_transmit_status()
1906 rpc_task_force_reencode(task); call_transmit_status()
1910 switch (task->tk_status) { call_transmit_status()
1914 dprint_status(task); call_transmit_status()
1915 xprt_end_transmit(task); call_transmit_status()
1916 rpc_task_force_reencode(task); call_transmit_status()
1929 if (RPC_IS_SOFTCONN(task)) { call_transmit_status()
1930 xprt_end_transmit(task); call_transmit_status()
1931 rpc_exit(task, task->tk_status); call_transmit_status()
1940 rpc_task_force_reencode(task); call_transmit_status()
1950 call_bc_transmit(struct rpc_task *task) call_bc_transmit() argument
1952 struct rpc_rqst *req = task->tk_rqstp; call_bc_transmit()
1954 if (!xprt_prepare_transmit(task)) { call_bc_transmit()
1959 task->tk_status = 0; call_bc_transmit()
1960 task->tk_action = call_bc_transmit; call_bc_transmit()
1964 task->tk_action = rpc_exit_task; call_bc_transmit()
1965 if (task->tk_status < 0) { call_bc_transmit()
1967 "error: %d\n", task->tk_status); call_bc_transmit()
1971 xprt_transmit(task); call_bc_transmit()
1972 xprt_end_transmit(task); call_bc_transmit()
1973 dprint_status(task); call_bc_transmit()
1974 switch (task->tk_status) { call_bc_transmit()
1990 "error: %d\n", task->tk_status); call_bc_transmit()
1999 WARN_ON_ONCE(task->tk_status == -EAGAIN); call_bc_transmit()
2001 "error: %d\n", task->tk_status); call_bc_transmit()
2004 rpc_wake_up_queued_task(&req->rq_xprt->pending, task); call_bc_transmit()
2012 call_status(struct rpc_task *task) call_status() argument
2014 struct rpc_clnt *clnt = task->tk_client; call_status()
2015 struct rpc_rqst *req = task->tk_rqstp; call_status()
2019 task->tk_status = req->rq_reply_bytes_recvd; call_status()
2021 dprint_status(task); call_status()
2023 status = task->tk_status; call_status()
2025 task->tk_action = call_decode; call_status()
2029 trace_rpc_call_status(task); call_status()
2030 task->tk_status = 0; call_status()
2036 if (RPC_IS_SOFTCONN(task)) { call_status()
2037 rpc_exit(task, status); call_status()
2044 rpc_delay(task, 3*HZ); call_status()
2046 task->tk_action = call_timeout; call_status()
2047 if (!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) call_status()
2048 && task->tk_client->cl_discrtry) call_status()
2058 rpc_delay(task, 3*HZ); call_status()
2061 task->tk_action = call_bind; call_status()
2064 task->tk_action = call_transmit; call_status()
2068 rpc_exit(task, status); call_status()
2074 rpc_exit(task, status); call_status()
2084 call_timeout(struct rpc_task *task) call_timeout() argument
2086 struct rpc_clnt *clnt = task->tk_client; call_timeout()
2088 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { call_timeout()
2089 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); call_timeout()
2093 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); call_timeout()
2094 task->tk_timeouts++; call_timeout()
2096 if (RPC_IS_SOFTCONN(task)) { call_timeout()
2097 rpc_exit(task, -ETIMEDOUT); call_timeout()
2100 if (RPC_IS_SOFT(task)) { call_timeout()
2108 if (task->tk_flags & RPC_TASK_TIMEOUT) call_timeout()
2109 rpc_exit(task, -ETIMEDOUT); call_timeout()
2111 rpc_exit(task, -EIO); call_timeout()
2115 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { call_timeout()
2116 task->tk_flags |= RPC_CALL_MAJORSEEN; call_timeout()
2130 rpcauth_invalcred(task); call_timeout()
2133 task->tk_action = call_bind; call_timeout()
2134 task->tk_status = 0; call_timeout()
2141 call_decode(struct rpc_task *task) call_decode() argument
2143 struct rpc_clnt *clnt = task->tk_client; call_decode()
2144 struct rpc_rqst *req = task->tk_rqstp; call_decode()
2145 kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; call_decode()
2148 dprint_status(task); call_decode()
2150 if (task->tk_flags & RPC_CALL_MAJORSEEN) { call_decode()
2158 task->tk_flags &= ~RPC_CALL_MAJORSEEN; call_decode()
2173 if (!RPC_IS_SOFT(task)) { call_decode()
2174 task->tk_action = call_bind; call_decode()
2178 clnt->cl_program->name, task->tk_status); call_decode()
2179 task->tk_action = call_timeout; call_decode()
2183 p = rpc_verify_header(task); call_decode()
2190 task->tk_action = rpc_exit_task; call_decode()
2193 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, call_decode()
2194 task->tk_msg.rpc_resp); call_decode()
2196 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, call_decode()
2197 task->tk_status); call_decode()
2200 task->tk_status = 0; call_decode()
2202 if (task->tk_rqstp == req) { call_decode()
2204 if (task->tk_client->cl_discrtry) call_decode()
2211 rpc_encode_header(struct rpc_task *task) rpc_encode_header() argument
2213 struct rpc_clnt *clnt = task->tk_client; rpc_encode_header()
2214 struct rpc_rqst *req = task->tk_rqstp; rpc_encode_header()
2225 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ rpc_encode_header()
2226 p = rpcauth_marshcred(task, p); rpc_encode_header()
2232 rpc_verify_header(struct rpc_task *task) rpc_verify_header() argument
2234 struct rpc_clnt *clnt = task->tk_client; rpc_verify_header()
2235 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; rpc_verify_header()
2236 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; rpc_verify_header()
2241 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { rpc_verify_header()
2248 " 4 bytes: 0x%x\n", task->tk_pid, __func__, rpc_verify_header()
2249 task->tk_rqstp->rq_rcv_buf.len); rpc_verify_header()
2259 task->tk_pid, __func__, n); rpc_verify_header()
2272 task->tk_pid, __func__); rpc_verify_header()
2278 task->tk_pid, __func__, n); rpc_verify_header()
2289 if (!task->tk_cred_retry) rpc_verify_header()
2291 task->tk_cred_retry--; rpc_verify_header()
2293 task->tk_pid, __func__); rpc_verify_header()
2294 rpcauth_invalcred(task); rpc_verify_header()
2296 xprt_release(task); rpc_verify_header()
2297 task->tk_action = call_reserve; rpc_verify_header()
2302 if (!task->tk_garb_retry) rpc_verify_header()
2304 task->tk_garb_retry--; rpc_verify_header()
2306 task->tk_pid, __func__); rpc_verify_header()
2307 task->tk_action = call_bind; rpc_verify_header()
2318 task->tk_pid, __func__, n); rpc_verify_header()
2322 task->tk_pid, __func__, n); rpc_verify_header()
2325 p = rpcauth_checkverf(task, p); rpc_verify_header()
2329 task->tk_pid, __func__, error); rpc_verify_header()
2340 "by server %s\n", task->tk_pid, __func__, rpc_verify_header()
2347 "by server %s\n", task->tk_pid, __func__, rpc_verify_header()
2356 task->tk_pid, __func__, rpc_verify_header()
2357 rpc_proc_name(task), rpc_verify_header()
2364 task->tk_pid, __func__); rpc_verify_header()
2368 task->tk_pid, __func__, n); rpc_verify_header()
2374 if (task->tk_garb_retry) { rpc_verify_header()
2375 task->tk_garb_retry--; rpc_verify_header()
2377 task->tk_pid, __func__); rpc_verify_header()
2378 task->tk_action = call_bind; rpc_verify_header()
2383 rpc_exit(task, error); rpc_verify_header()
2384 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, rpc_verify_header()
2388 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, rpc_verify_header()
2443 const struct rpc_task *task) rpc_show_task()
2447 if (RPC_IS_QUEUED(task)) rpc_show_task()
2448 rpc_waitq = rpc_qname(task->tk_waitqueue); rpc_show_task()
2451 task->tk_pid, task->tk_flags, task->tk_status, rpc_show_task()
2452 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, rpc_show_task()
2453 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), rpc_show_task()
2454 task->tk_action, rpc_waitq); rpc_show_task()
2460 struct rpc_task *task; rpc_show_tasks() local
2467 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { rpc_show_tasks()
2472 rpc_show_task(clnt, task); rpc_show_tasks()
2442 rpc_show_task(const struct rpc_clnt *clnt, const struct rpc_task *task) rpc_show_task() argument
H A Dbc_svc.c48 struct rpc_task *task; bc_send() local
52 task = rpc_run_bc_task(req, &nfs41_callback_ops); bc_send()
53 if (IS_ERR(task)) bc_send()
54 ret = PTR_ERR(task); bc_send()
56 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); bc_send()
57 ret = task->tk_status; bc_send()
58 rpc_put_task(task); bc_send()
H A Ddebugfs.c25 struct rpc_task *task = v; tasks_show() local
26 struct rpc_clnt *clnt = task->tk_client; tasks_show()
29 if (RPC_IS_QUEUED(task)) tasks_show()
30 rpc_waitq = rpc_qname(task->tk_waitqueue); tasks_show()
32 if (task->tk_rqstp) tasks_show()
33 xid = be32_to_cpu(task->tk_rqstp->rq_xid); tasks_show()
36 task->tk_pid, task->tk_flags, task->tk_status, tasks_show()
37 clnt->cl_clid, xid, task->tk_timeout, task->tk_ops, tasks_show()
38 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), tasks_show()
39 task->tk_action, rpc_waitq); tasks_show()
50 struct rpc_task *task; variable in typeref:struct:rpc_task
54 list_for_each_entry(task, &clnt->cl_tasks, tk_task)
56 return task;
65 struct rpc_task *task = v; tasks_next() local
66 struct list_head *next = task->tk_task.next; tasks_next()
71 /* If there's another task on list, return it */ tasks_next()
H A Dauth.c657 rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) rpcauth_generic_bind_cred() argument
659 dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, rpcauth_generic_bind_cred()
666 rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) rpcauth_bind_root_cred() argument
668 struct rpc_auth *auth = task->tk_client->cl_auth; rpcauth_bind_root_cred()
675 task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); rpcauth_bind_root_cred()
680 rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags) rpcauth_bind_new_cred() argument
682 struct rpc_auth *auth = task->tk_client->cl_auth; rpcauth_bind_new_cred()
685 task->tk_pid, auth->au_ops->au_name); rpcauth_bind_new_cred()
690 rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) rpcauth_bindcred() argument
692 struct rpc_rqst *req = task->tk_rqstp; rpcauth_bindcred()
699 new = cred->cr_ops->crbind(task, cred, lookupflags); rpcauth_bindcred()
701 new = rpcauth_bind_root_cred(task, lookupflags); rpcauth_bindcred()
703 new = rpcauth_bind_new_cred(task, lookupflags); rpcauth_bindcred()
749 rpcauth_marshcred(struct rpc_task *task, __be32 *p) rpcauth_marshcred() argument
751 struct rpc_cred *cred = task->tk_rqstp->rq_cred; rpcauth_marshcred()
754 task->tk_pid, cred->cr_auth->au_ops->au_name, cred); rpcauth_marshcred()
756 return cred->cr_ops->crmarshal(task, p); rpcauth_marshcred()
760 rpcauth_checkverf(struct rpc_task *task, __be32 *p) rpcauth_checkverf() argument
762 struct rpc_cred *cred = task->tk_rqstp->rq_cred; rpcauth_checkverf()
765 task->tk_pid, cred->cr_auth->au_ops->au_name, cred); rpcauth_checkverf()
767 return cred->cr_ops->crvalidate(task, p); rpcauth_checkverf()
780 rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, rpcauth_wrap_req() argument
783 struct rpc_cred *cred = task->tk_rqstp->rq_cred; rpcauth_wrap_req()
786 task->tk_pid, cred->cr_ops->cr_name, cred); rpcauth_wrap_req()
788 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); rpcauth_wrap_req()
805 rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, rpcauth_unwrap_resp() argument
808 struct rpc_cred *cred = task->tk_rqstp->rq_cred; rpcauth_unwrap_resp()
811 task->tk_pid, cred->cr_ops->cr_name, cred); rpcauth_unwrap_resp()
813 return cred->cr_ops->crunwrap_resp(task, decode, rqstp, rpcauth_unwrap_resp()
820 rpcauth_refreshcred(struct rpc_task *task) rpcauth_refreshcred() argument
825 cred = task->tk_rqstp->rq_cred; rpcauth_refreshcred()
827 err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags); rpcauth_refreshcred()
830 cred = task->tk_rqstp->rq_cred; rpcauth_refreshcred()
833 task->tk_pid, cred->cr_auth->au_ops->au_name, cred); rpcauth_refreshcred()
835 err = cred->cr_ops->crrefresh(task); rpcauth_refreshcred()
838 task->tk_status = err; rpcauth_refreshcred()
843 rpcauth_invalcred(struct rpc_task *task) rpcauth_invalcred() argument
845 struct rpc_cred *cred = task->tk_rqstp->rq_cred; rpcauth_invalcred()
848 task->tk_pid, cred->cr_auth->au_ops->au_name, cred); rpcauth_invalcred()
854 rpcauth_uptodatecred(struct rpc_task *task) rpcauth_uptodatecred() argument
856 struct rpc_cred *cred = task->tk_rqstp->rq_cred; rpcauth_uptodatecred()
H A Dstats.c143 * rpc_count_iostats_metrics - tally up per-task stats
144 * @task: completed rpc_task
145 * @op_metrics: stat structure for OP that will accumulate stats from @task
147 void rpc_count_iostats_metrics(const struct rpc_task *task, rpc_count_iostats_metrics() argument
150 struct rpc_rqst *req = task->tk_rqstp; rpc_count_iostats_metrics()
161 op_metrics->om_timeouts += task->tk_timeouts; rpc_count_iostats_metrics()
166 delta = ktime_sub(req->rq_xtime, task->tk_start); rpc_count_iostats_metrics()
171 delta = ktime_sub(now, task->tk_start); rpc_count_iostats_metrics()
179 * rpc_count_iostats - tally up per-task stats
180 * @task: completed rpc_task
183 * Uses the statidx from @task
185 void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) rpc_count_iostats() argument
187 rpc_count_iostats_metrics(task, rpc_count_iostats()
188 &stats[task->tk_msg.rpc_proc->p_statidx]); rpc_count_iostats()
/linux-4.1.27/include/trace/events/
H A Dtask.h2 #define TRACE_SYSTEM task
10 TP_PROTO(struct task_struct *task, unsigned long clone_flags),
12 TP_ARGS(task, clone_flags),
22 __entry->pid = task->pid;
23 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
25 __entry->oom_score_adj = task->signal->oom_score_adj;
35 TP_PROTO(struct task_struct *task, const char *comm),
37 TP_ARGS(task, comm),
47 __entry->pid = task->pid;
48 memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
50 __entry->oom_score_adj = task->signal->oom_score_adj;
H A Doom.h10 TP_PROTO(struct task_struct *task),
12 TP_ARGS(task),
21 __entry->pid = task->pid;
22 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
23 __entry->oom_score_adj = task->signal->oom_score_adj;
H A Dsunrpc.h18 TP_PROTO(struct rpc_task *task),
20 TP_ARGS(task),
29 __entry->task_id = task->tk_pid;
30 __entry->client_id = task->tk_client->cl_clid;
31 __entry->status = task->tk_status;
34 TP_printk("task:%u@%u, status %d",
40 TP_PROTO(struct rpc_task *task),
42 TP_ARGS(task)
46 TP_PROTO(struct rpc_task *task),
48 TP_ARGS(task)
52 TP_PROTO(struct rpc_task *task, int status),
54 TP_ARGS(task, status),
63 __entry->task_id = task->tk_pid;
64 __entry->client_id = task->tk_client->cl_clid;
68 TP_printk("task:%u@%u, status %d",
75 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
77 TP_ARGS(clnt, task, action),
90 __entry->task_id = task->tk_pid;
92 __entry->runstate = task->tk_runstate;
93 __entry->status = task->tk_status;
94 __entry->flags = task->tk_flags;
97 TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
108 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
110 TP_ARGS(clnt, task, action)
116 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
118 TP_ARGS(clnt, task, action)
124 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
126 TP_ARGS(clnt, task, action)
132 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
134 TP_ARGS(clnt, task, q),
148 __entry->task_id = task->tk_pid;
149 __entry->timeout = task->tk_timeout;
150 __entry->runstate = task->tk_runstate;
151 __entry->status = task->tk_status;
152 __entry->flags = task->tk_flags;
156 TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
168 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
170 TP_ARGS(clnt, task, q)
176 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
178 TP_ARGS(clnt, task, q)
H A Dsignal.h40 * @task: pointer to struct task_struct
44 * Current process sends a 'sig' signal to 'task' process with
52 TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
55 TP_ARGS(sig, info, task, group, result),
70 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
71 __entry->pid = task->pid;
/linux-4.1.27/arch/alpha/include/asm/
H A Dcurrent.h6 #define get_current() (current_thread_info()->task)
H A Dptrace.h13 #define task_pt_regs(task) \
14 ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
H A Dthread_info.h17 struct task_struct *task; /* main task structure */ member in struct:thread_info
36 .task = &tsk, \
117 #define SET_UNALIGN_CTL(task,value) ({ \
118 __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
125 task_thread_info(task)->status = status; \
128 #define GET_UNALIGN_CTL(task,value) ({ \
129 __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
/linux-4.1.27/include/asm-generic/
H A Dcurrent.h6 #define get_current() (current_thread_info()->task)
H A Dsyscall.h15 * and only when the caller is sure that the task of interest
26 * syscall_get_nr - find what system call a task is executing
27 * @task: task of interest, must be blocked
28 * @regs: task_pt_regs() of @task
30 * If @task is executing a system call or is at system call
32 * If @task is not executing a system call, i.e. it's blocked
39 * It's only valid to call this when @task is known to be blocked.
41 int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
45 * @task: task of interest, must be in system call exit tracing
46 * @regs: task_pt_regs() of @task
48 * It's only valid to call this when @task is stopped for system
59 void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
63 * @task: task of interest, must be blocked
64 * @regs: task_pt_regs() of @task
68 * It's only valid to call this when @task is stopped for tracing on exit
71 long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
75 * @task: task of interest, must be blocked
76 * @regs: task_pt_regs() of @task
81 * It's only valid to call this when @task is stopped for tracing on exit
84 long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
88 * @task: task of interest, must be blocked
89 * @regs: task_pt_regs() of @task
98 * It's only valid to call this when @task is stopped for tracing on exit
101 void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
106 * @task: task of interest, must be blocked
107 * @regs: task_pt_regs() of @task
116 * It's only valid to call this when @task is stopped for tracing on
121 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
126 * @task: task of interest, must be in system call entry tracing
127 * @regs: task_pt_regs() of @task
136 * It's only valid to call this when @task is stopped for tracing on
141 void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
H A Dresource.h8 * boot-time rlimit defaults for the init task:
/linux-4.1.27/include/linux/
H A Dcn_proc.h23 void proc_fork_connector(struct task_struct *task);
24 void proc_exec_connector(struct task_struct *task);
25 void proc_id_connector(struct task_struct *task, int which_id);
26 void proc_sid_connector(struct task_struct *task);
27 void proc_ptrace_connector(struct task_struct *task, int which_id);
28 void proc_comm_connector(struct task_struct *task);
29 void proc_coredump_connector(struct task_struct *task);
30 void proc_exit_connector(struct task_struct *task);
32 static inline void proc_fork_connector(struct task_struct *task) proc_fork_connector() argument
35 static inline void proc_exec_connector(struct task_struct *task) proc_exec_connector() argument
38 static inline void proc_id_connector(struct task_struct *task, proc_id_connector() argument
42 static inline void proc_sid_connector(struct task_struct *task) proc_sid_connector() argument
45 static inline void proc_comm_connector(struct task_struct *task) proc_comm_connector() argument
48 static inline void proc_ptrace_connector(struct task_struct *task, proc_ptrace_connector() argument
52 static inline void proc_coredump_connector(struct task_struct *task) proc_coredump_connector() argument
55 static inline void proc_exit_connector(struct task_struct *task) proc_exit_connector() argument
H A Dtask_io_accounting.h2 * task_io_accounting: a structure which is used for recording a single task's
25 * The number of bytes which this task has caused to be read from
31 * The number of bytes which this task has caused, or shall cause to be
37 * A task can cause "negative" IO too. If this task truncates some
38 * dirty pagecache, some IO which another task has been accounted for
40 * subtract that from the truncating task's write_bytes, but there is
H A Dioprio.h53 static inline int task_nice_ioprio(struct task_struct *task) task_nice_ioprio() argument
55 return (task_nice(task) + 20) / 5; task_nice_ioprio()
59 * This is for the case where the task hasn't asked for a specific IO class.
60 * Check for idle and rt task process, and return appropriate IO class.
62 static inline int task_nice_ioclass(struct task_struct *task) task_nice_ioclass() argument
64 if (task->policy == SCHED_IDLE) task_nice_ioclass()
66 else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR) task_nice_ioclass()
77 extern int set_task_ioprio(struct task_struct *task, int ioprio);
H A Dlatencytop.h30 void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
32 account_scheduler_latency(struct task_struct *task, int usecs, int inter) account_scheduler_latency() argument
35 __account_scheduler_latency(task, usecs, inter); account_scheduler_latency()
43 account_scheduler_latency(struct task_struct *task, int usecs, int inter) account_scheduler_latency() argument
H A Dtask_work.h15 int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
19 static inline void exit_task_work(struct task_struct *task) exit_task_work() argument
H A Dshm.h22 /* The task created the shm object. NULL if the task is dead. */
56 void exit_shm(struct task_struct *task);
57 #define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist)
73 static inline void exit_shm(struct task_struct *task) exit_shm() argument
76 static inline void shm_init_task(struct task_struct *task) shm_init_task() argument
H A Dptrace.h14 * The owner ship rules for task->ptrace which holds the ptrace
15 * flags is simple. When a task is running it owns it's task->ptrace
16 * flags. When the a task is stopped the ptracer owns task->ptrace.
70 * a target task.
71 * @task: target task
82 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
101 * ptrace_parent - return the task that is tracing the given task
102 * @task: task to consider
104 * Returns %NULL if no one is tracing @task, or the &struct task_struct
109 * on @task, still held from when check_unsafe_exec() was called.
111 static inline struct task_struct *ptrace_parent(struct task_struct *task) ptrace_parent() argument
113 if (unlikely(task->ptrace)) ptrace_parent()
114 return rcu_dereference(task->parent); ptrace_parent()
120 * @task: ptracee of interest
123 * Test whether @event is enabled for ptracee @task.
127 static inline bool ptrace_event_enabled(struct task_struct *task, int event) ptrace_event_enabled() argument
129 return task->ptrace & PT_EVENT_FLAG(event); ptrace_event_enabled()
187 * @child: new child task
218 * @task: task in %EXIT_DEAD state
222 static inline void ptrace_release_task(struct task_struct *task) ptrace_release_task() argument
224 BUG_ON(!list_empty(&task->ptraced)); ptrace_release_task()
225 ptrace_unlink(task); ptrace_release_task()
226 BUG_ON(!list_empty(&task->ptrace_entry)); ptrace_release_task()
274 * user_enable_single_step - single-step in user-mode task
275 * @task: either current or a task stopped in %TASK_TRACED
278 * Set @task so that when it returns to user mode, it will trap after the
282 static inline void user_enable_single_step(struct task_struct *task) user_enable_single_step() argument
289 * @task: either current or a task stopped in %TASK_TRACED
291 * Clear @task of the effects of user_enable_single_step() and
293 * of those was ever called on @task, and even if arch_has_single_step()
296 static inline void user_disable_single_step(struct task_struct *task) user_disable_single_step() argument
317 * user_enable_block_step - step until branch in user-mode task
318 * @task: either current or a task stopped in %TASK_TRACED
322 * Set @task so that when it returns to user mode, it will trap after the
325 static inline void user_enable_block_step(struct task_struct *task) user_enable_block_step() argument
360 * This is guaranteed to be invoked once before a task stops for ptrace and
H A Dnsproxy.h42 * 1. only current task is allowed to change tsk->nsproxy pointer or
46 * 2. when accessing (i.e. reading) current task's namespaces - no
49 * 3. the access to other task namespaces is performed like this
50 * task_lock(task);
51 * nsproxy = task->nsproxy;
58 * * NULL task->nsproxy means that this task is
61 * task_unlock(task);
H A Dcred.h46 * If the caller is accessing a task's credentials, they must hold the RCU read
96 * The security context of a task
100 * (1) The objective context of a task. These parts are used when some other
101 * task is attempting to affect this one.
103 * (2) The subjective context. These details are used when the task is acting
104 * upon another object, be that a file, a task, a key or whatever.
109 * A task has two security pointers. task->real_cred points to the objective
110 * context that defines that task's actual details. The objective part of this
111 * context is used whenever that task is acted upon.
113 * task->cred points to the subjective context that defines the details of how
114 * that task is going to act upon another object. This may be overridden
116 * same context as task->real_cred.
127 kuid_t uid; /* real UID of the task */
128 kgid_t gid; /* real GID of the task */
129 kuid_t suid; /* saved UID of the task */
130 kgid_t sgid; /* saved GID of the task */
131 kuid_t euid; /* effective UID of the task */
132 kgid_t egid; /* effective GID of the task */
269 * current_cred - Access the current task's subjective credentials
271 * Access the subjective credentials of the current task. RCU-safe,
278 * current_real_cred - Access the current task's objective credentials
280 * Access the objective credentials of the current task. RCU-safe,
287 * __task_cred - Access a task's objective credentials
288 * @task: The task to query
290 * Access the objective credentials of a task. The caller must hold the RCU
296 #define __task_cred(task) \
297 rcu_dereference((task)->real_cred)
300 * get_current_cred - Get the current task's subjective credentials
302 * Get the subjective credentials of the current task, pinning them so that
303 * they can't go away. Accessing the current task's credentials directly is
310 * get_current_user - Get the current task's user_struct
312 * Get the user record of the current task, pinning it so that it can't go
325 * get_current_groups - Get the current task's supplementary group list
327 * Get the supplementary group list of the current task, pinning it so that it
339 #define task_cred_xxx(task, xxx) \
343 ___val = __task_cred((task))->xxx; \
348 #define task_uid(task) (task_cred_xxx((task), uid))
349 #define task_euid(task) (task_cred_xxx((task), euid))
H A Dprofile.h69 /* task is in do_exit() */
70 void profile_task_exit(struct task_struct * task);
72 /* task is dead, free task struct ? Returns 1 if
73 * the task was taken, 0 if the task should be freed.
75 int profile_handoff_task(struct task_struct * task);
H A Dperf_regs.h13 u64 perf_reg_abi(struct task_struct *task);
28 static inline u64 perf_reg_abi(struct task_struct *task) perf_reg_abi() argument
H A Dcgroup.h159 * and its descendants contain no task; otherwise, 1. The file also
167 * - cpuset: a task can be moved into an empty cpuset, and again it takes
239 char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
252 * @task: the loop cursor
255 #define cgroup_taskset_for_each(task, tset) \
256 for ((task) = cgroup_taskset_first((tset)); (task); \
257 (task) = cgroup_taskset_next((tset)))
264 * task_css_set_check - obtain a task's css_set with extra access conditions
265 * @task: the task to obtain css_set for
268 * A task's css_set is RCU protected, initialized and exited while holding
270 * and task_lock() while the task is alive. This macro verifies that the
271 * caller is inside proper critical section and returns @task's css_set.
279 #define task_css_set_check(task, __c) \
280 rcu_dereference_check((task)->cgroups, \
283 ((task)->flags & PF_EXITING) || (__c))
285 #define task_css_set_check(task, __c) \
286 rcu_dereference((task)->cgroups)
290 * task_css_check - obtain css for (task, subsys) w/ extra access conds
291 * @task: the target task
295 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
298 #define task_css_check(task, subsys_id, __c) \
299 task_css_set_check((task), (__c))->subsys[(subsys_id)]
302 * task_css_set - obtain a task's css_set
303 * @task: the task to obtain css_set for
307 static inline struct css_set *task_css_set(struct task_struct *task) task_css_set() argument
309 return task_css_set_check(task, false); task_css_set()
313 * task_css - obtain css for (task, subsys)
314 * @task: the target task
319 static inline struct cgroup_subsys_state *task_css(struct task_struct *task, task_css() argument
322 return task_css_check(task, subsys_id, false); task_css()
326 * task_css_is_root - test whether a task belongs to the root css
327 * @task: the target task
330 * Test whether @task belongs to the root css on the specified subsystem.
333 static inline bool task_css_is_root(struct task_struct *task, int subsys_id) task_css_is_root() argument
335 return task_css_check(task, subsys_id, true) == task_css_is_root()
339 static inline struct cgroup *task_cgroup(struct task_struct *task, task_cgroup() argument
342 return task_css(task, subsys_id)->cgroup; task_cgroup()
H A Dpid.h86 extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
91 extern void attach_pid(struct task_struct *task, enum pid_type);
92 extern void detach_pid(struct task_struct *task, enum pid_type);
93 extern void change_pid(struct task_struct *task, enum pid_type,
129 * ns_of_pid() is expected to be called for a process (task) that has
175 #define do_each_pid_task(pid, type, task) \
178 hlist_for_each_entry_rcu((task), \
185 #define while_each_pid_task(pid, type, task) \
191 #define do_each_pid_thread(pid, type, task) \
192 do_each_pid_task(pid, type, task) { \
193 struct task_struct *tg___ = task; \
196 #define while_each_pid_thread(pid, type, task) \
197 } while_each_thread(tg___, task); \
198 task = tg___; \
199 } while_each_pid_task(pid, type, task)
H A Dtracehook.h81 * tracehook_report_syscall_entry - task is about to attempt a system call
82 * @regs: user register state of current task
85 * current task has just entered the kernel for a system call.
92 * made. If @task ever returns to user mode after this, its register state
106 * tracehook_report_syscall_exit - task has just finished a system call
107 * @regs: user register state of current task
111 * current task has just finished an attempted system call. Full
153 * @task: task that will call tracehook_notify_resume()
155 * Calling this arranges that @task will call tracehook_notify_resume()
160 static inline void set_notify_resume(struct task_struct *task) set_notify_resume() argument
163 if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) set_notify_resume()
164 kick_process(task); set_notify_resume()
170 * @regs: user-mode registers of @current task
186 * hlist_add_head(task->task_works); tracehook_notify_resume()
H A Dposix-timers.h24 struct task_struct *task; member in struct:cpu_timer_list
128 void run_posix_cpu_timers(struct task_struct *task);
129 void posix_cpu_timers_exit(struct task_struct *task);
130 void posix_cpu_timers_exit_group(struct task_struct *task);
134 void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
139 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
H A Ddebug_locks.h52 extern void debug_show_held_locks(struct task_struct *task);
60 static inline void debug_show_held_locks(struct task_struct *task) debug_show_held_locks() argument
H A Doom.h72 extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
87 static inline bool task_will_free_mem(struct task_struct *task) task_will_free_mem() argument
94 return (task->flags & PF_EXITING) && task_will_free_mem()
95 !(task->signal->flags & SIGNAL_GROUP_COREDUMP); task_will_free_mem()
/linux-4.1.27/scripts/gdb/linux/
H A Dtasks.py4 # task & thread tools
42 for task in task_lists():
43 if int(task['pid']) == pid:
44 return task
49 """Find Linux task by PID and return the task_struct variable.
58 task = get_task_by_pid(pid)
59 if task:
60 return task.dereference()
62 raise gdb.GdbError("No task of PID " + str(pid))
73 def get_thread_info(task):
80 thread_info_addr = task.address + ia64_task_size
83 thread_info = task['stack'].cast(thread_info_ptr_type)
88 """Calculate Linux thread_info from task variable.
96 def invoke(self, task):
97 return get_thread_info(task)
/linux-4.1.27/kernel/
H A Dtask_work.c8 * task_work_add - ask the @task to execute @work->func()
9 * @task: the task which should run the callback
13 * Queue @work for task_work_run() below and notify the @task if @notify.
14 * Fails if the @task is exiting/exited and thus it can't process this @work.
15 * Otherwise @work->func() will be called when the @task returns from kernel
19 * try to wake up the @task.
25 task_work_add(struct task_struct *task, struct callback_head *work, bool notify) task_work_add() argument
30 head = ACCESS_ONCE(task->task_works); task_work_add()
34 } while (cmpxchg(&task->task_works, head, work) != head); task_work_add()
37 set_notify_resume(task); task_work_add()
43 * @task: the task which should execute the work
53 task_work_cancel(struct task_struct *task, task_work_func_t func) task_work_cancel() argument
55 struct callback_head **pprev = &task->task_works; task_work_cancel()
64 raw_spin_lock_irqsave(&task->pi_lock, flags); task_work_cancel()
72 raw_spin_unlock_irqrestore(&task->pi_lock, flags); task_work_cancel()
81 * Called before the task returns to the user-mode or stops, or when
87 struct task_struct *task = current; task_work_run() local
96 work = ACCESS_ONCE(task->task_works); task_work_run()
97 head = !work && (task->flags & PF_EXITING) ? task_work_run()
99 } while (cmpxchg(&task->task_works, work, head) != work); task_work_run()
108 raw_spin_unlock_wait(&task->pi_lock); task_work_run()
H A Dworkqueue_internal.h37 struct task_struct *task; /* I: worker task */ member in struct:worker
48 * Opaque string set with work_set_desc(). Printed out with task
71 void wq_worker_waking_up(struct task_struct *task, int cpu);
72 struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
H A Dcgroup_freezer.c55 static inline struct freezer *task_freezer(struct task_struct *task) task_freezer() argument
57 return css_freezer(task_css(task, freezer_cgrp_id)); task_freezer()
65 bool cgroup_freezing(struct task_struct *task) cgroup_freezing() argument
70 ret = task_freezer(task)->state & CGROUP_FREEZING; cgroup_freezing()
154 * Freezer state changes and task migration are synchronized via
162 struct task_struct *task; freezer_attach() local
169 * For simplicity, when migrating any task to a FROZEN cgroup, we freezer_attach()
177 cgroup_taskset_for_each(task, tset) { cgroup_taskset_for_each()
179 __thaw_task(task); cgroup_taskset_for_each()
181 freeze_task(task); cgroup_taskset_for_each()
198 * @task: a task which has just been forked
200 * @task has just been created and should conform to the current state of
203 * to do anything as freezer_attach() will put @task into the appropriate
206 static void freezer_fork(struct task_struct *task) freezer_fork() argument
212 * freezer. This is safe regardless of race with task migration. freezer_fork()
217 if (task_css_is_root(task, freezer_cgrp_id)) freezer_fork()
223 freezer = task_freezer(task); freezer_fork()
225 freeze_task(task); freezer_fork()
244 * migrated into or out of @css, so we can't verify task states against
252 struct task_struct *task; update_if_frozen() local
276 while ((task = css_task_iter_next(&it))) {
277 if (freezing(task)) {
279 * freezer_should_skip() indicates that the task
284 if (!frozen(task) && !freezer_should_skip(task))
324 struct task_struct *task; freeze_cgroup() local
327 while ((task = css_task_iter_next(&it))) freeze_cgroup()
328 freeze_task(task); freeze_cgroup()
335 struct task_struct *task; unfreeze_cgroup() local
338 while ((task = css_task_iter_next(&it))) unfreeze_cgroup()
339 __thaw_task(task); unfreeze_cgroup()
355 /* also synchronizes against task migration, see freezer_attach() */ freezer_apply_state()
H A Dptrace.c32 * ptrace a task: make the debugger its new parent and
65 * stopped task. However, in this direction, the intermediate RUNNING
100 * traced task running in the stopped group, set the signal __ptrace_unlink()
122 static bool ptrace_freeze_traced(struct task_struct *task) ptrace_freeze_traced() argument
127 if (task->jobctl & JOBCTL_LISTENING) ptrace_freeze_traced()
130 spin_lock_irq(&task->sighand->siglock); ptrace_freeze_traced()
131 if (task_is_traced(task) && !__fatal_signal_pending(task)) { ptrace_freeze_traced()
132 task->state = __TASK_TRACED; ptrace_freeze_traced()
135 spin_unlock_irq(&task->sighand->siglock); ptrace_freeze_traced()
140 static void ptrace_unfreeze_traced(struct task_struct *task) ptrace_unfreeze_traced() argument
142 if (task->state != __TASK_TRACED) ptrace_unfreeze_traced()
145 WARN_ON(!task->ptrace || task->parent != current); ptrace_unfreeze_traced()
147 spin_lock_irq(&task->sighand->siglock); ptrace_unfreeze_traced()
148 if (__fatal_signal_pending(task)) ptrace_unfreeze_traced()
149 wake_up_state(task, __TASK_TRACED); ptrace_unfreeze_traced()
151 task->state = TASK_TRACED; ptrace_unfreeze_traced()
152 spin_unlock_irq(&task->sighand->siglock); ptrace_unfreeze_traced()
219 static int __ptrace_may_access(struct task_struct *task, unsigned int mode) __ptrace_may_access() argument
231 /* May we inspect the given task? __ptrace_may_access()
237 * or halting the specified task is impossible. __ptrace_may_access()
241 if (same_thread_group(task, current)) __ptrace_may_access()
259 tcred = __task_cred(task); __ptrace_may_access()
274 if (task->mm) __ptrace_may_access()
275 dumpable = get_dumpable(task->mm); __ptrace_may_access()
278 !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { __ptrace_may_access()
284 return security_ptrace_access_check(task, mode); __ptrace_may_access()
287 bool ptrace_may_access(struct task_struct *task, unsigned int mode) ptrace_may_access() argument
290 task_lock(task); ptrace_may_access()
291 err = __ptrace_may_access(task, mode); ptrace_may_access()
292 task_unlock(task); ptrace_may_access()
296 static int ptrace_attach(struct task_struct *task, long request, ptrace_attach() argument
314 audit_ptrace(task); ptrace_attach()
317 if (unlikely(task->flags & PF_KTHREAD)) ptrace_attach()
319 if (same_thread_group(task, current)) ptrace_attach()
328 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) ptrace_attach()
331 task_lock(task); ptrace_attach()
332 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); ptrace_attach()
333 task_unlock(task); ptrace_attach()
339 if (unlikely(task->exit_state)) ptrace_attach()
341 if (task->ptrace) ptrace_attach()
347 if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) ptrace_attach()
350 task->ptrace = flags; ptrace_attach()
352 __ptrace_link(task, current); ptrace_attach()
356 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); ptrace_attach()
358 spin_lock(&task->sighand->siglock); ptrace_attach()
361 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and ptrace_attach()
377 if (task_is_stopped(task) && ptrace_attach()
378 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) ptrace_attach()
379 signal_wake_up_state(task, __TASK_STOPPED); ptrace_attach()
381 spin_unlock(&task->sighand->siglock); ptrace_attach()
387 mutex_unlock(&task->signal->cred_guard_mutex); ptrace_attach()
390 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, ptrace_attach()
392 proc_ptrace_connector(task, PTRACE_ATTACH); ptrace_attach()
442 * Unlink a traced task, and clean it up if it was a traced zombie.
792 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, ptrace_regset() argument
795 const struct user_regset_view *view = task_user_regset_view(task); ptrace_regset()
807 return copy_regset_to_user(task, view, regset_no, 0, ptrace_regset()
810 return copy_regset_from_user(task, view, regset_no, 0, ptrace_regset()
H A Dcapability.c285 * has_ns_capability - Does a task have a capability in a specific user ns
286 * @t: The task in question
290 * Return true if the specified task has the given superior capability
293 * Note that this does not set PF_SUPERPRIV on the task.
308 * has_capability - Does a task have a capability in init_user_ns
309 * @t: The task in question
312 * Return true if the specified task has the given superior capability
315 * Note that this does not set PF_SUPERPRIV on the task.
323 * has_ns_capability_noaudit - Does a task have a capability (unaudited)
325 * @t: The task in question
329 * Return true if the specified task has the given superior capability
333 * Note that this does not set PF_SUPERPRIV on the task.
348 * has_capability_noaudit - Does a task have a capability (unaudited) in the
350 * @t: The task in question
353 * Return true if the specified task has the given superior capability
357 * Note that this does not set PF_SUPERPRIV on the task.
365 * ns_capable - Determine if the current task has a superior capability in effect
369 * Return true if the current task has the given superior capability currently
372 * This sets PF_SUPERPRIV on the task if the capability is available on the
392 * capable - Determine if the current task has a superior capability in effect
395 * Return true if the current task has the given superior capability currently
398 * This sets PF_SUPERPRIV on the task if the capability is available on the
414 * Return true if task that opened the file had a capability in effect
438 * Return true if the current task has the given capability targeted at
H A Dkthread.c128 * @task: kthread task in question
130 * Return the data value specified when kthread @task was created.
131 * The caller is responsible for ensuring the validity of @task when
134 void *kthread_data(struct task_struct *task) kthread_data() argument
136 return to_kthread(task)->data; kthread_data()
141 * @task: possible kthread task in question
143 * @task could be a kthread task. Return the data value specified when it
144 * was created if accessible. If @task isn't a kthread task or its data is
146 * that @task itself is safe to dereference.
148 void *probe_kthread_data(struct task_struct *task) probe_kthread_data() argument
150 struct kthread *kthread = to_kthread(task); probe_kthread_data()
213 /* called from do_fork() to get node information for about to be created task */ tsk_fork_get_node()
273 struct task_struct *task; kthread_create_on_node() local
308 task = create->result; kthread_create_on_node()
309 if (!IS_ERR(task)) { kthread_create_on_node()
314 vsnprintf(task->comm, sizeof(task->comm), namefmt, args); kthread_create_on_node()
320 sched_setscheduler_nocheck(task, SCHED_NORMAL, &param); kthread_create_on_node()
321 set_cpus_allowed_ptr(task, cpu_all_mask); kthread_create_on_node()
324 return task; kthread_create_on_node()
335 /* It's safe because the task is inactive. */ __kthread_bind()
388 * until the task has left the park code. So if we'd __kthread_unpark()
527 worker->task = NULL; __init_kthread_worker()
551 WARN_ON(worker->task); kthread_worker_fn()
552 worker->task = current; kthread_worker_fn()
559 worker->task = NULL; kthread_worker_fn()
594 if (!worker->current_work && likely(worker->task)) insert_kthread_work()
595 wake_up_process(worker->task); insert_kthread_work()
603 * Queue @work to work processor @task for async execution. @task
H A Dcred.c36 * The initial credentials for the initial task
149 * Clean up a task's credentials when it exits
173 * get_task_cred - Get another task's objective credentials
174 * @task: The task to query
176 * Get the objective credentials of a task, pinning them so that they can't go
177 * away. Accessing a task's credentials directly is not permitted.
179 * The caller must also make sure task doesn't get deleted, either by holding a
180 * ref on task or by holding tasklist_lock to prevent it from being unlinked.
182 const struct cred *get_task_cred(struct task_struct *task) get_task_cred() argument
189 cred = __task_cred((task)); get_task_cred()
227 * Prepare a new set of task credentials for modification. A task's creds
240 struct task_struct *task = current; prepare_creds() local
252 old = task->cred; prepare_creds()
404 * commit_creds - Install new credentials upon the current task
407 * Install a new set of credentials to the current task, using RCU to replace
419 struct task_struct *task = current; commit_creds() local
420 const struct cred *old = task->real_cred; commit_creds()
426 BUG_ON(task->cred != old); commit_creds()
442 if (task->mm) commit_creds()
443 set_dumpable(task->mm, suid_dumpable); commit_creds()
444 task->pdeath_signal = 0; commit_creds()
450 key_fsuid_changed(task); commit_creds()
452 key_fsgid_changed(task); commit_creds()
461 rcu_assign_pointer(task->real_cred, new); commit_creds()
462 rcu_assign_pointer(task->cred, new); commit_creds()
472 proc_id_connector(task, PROC_EVENT_UID); commit_creds()
478 proc_id_connector(task, PROC_EVENT_GID); commit_creds()
488 * abort_creds - Discard a set of credentials and unlock the current task
492 * current task.
576 * override a task's own credentials so that work can be done on behalf of that
577 * task that requires a different subjective context.
H A Dfreezer.c32 * freezing_slow_path - slow path for testing whether a task needs to be frozen
33 * @p: task to be tested
87 * Restore saved task state before returning. The mb'd version __refrigerator()
89 * synchronization which depends on ordered task state change. __refrigerator()
108 * freeze_task - send a freeze request to given task
109 * @p: task to send the request to
124 * will result in an extra wakeup being sent to the task. It does not freeze_task()
128 * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task freeze_task()
/linux-4.1.27/drivers/scsi/aic94xx/
H A Daic94xx_task.c54 static int asd_map_scatterlist(struct sas_task *task, asd_map_scatterlist() argument
58 struct asd_ascb *ascb = task->lldd_task; asd_map_scatterlist()
63 if (task->data_dir == PCI_DMA_NONE) asd_map_scatterlist()
66 if (task->num_scatter == 0) { asd_map_scatterlist()
67 void *p = task->scatter; asd_map_scatterlist()
69 task->total_xfer_len, asd_map_scatterlist()
70 task->data_dir); asd_map_scatterlist()
72 sg_arr[0].size = cpu_to_le32(task->total_xfer_len); asd_map_scatterlist()
79 if (sas_protocol_ata(task->task_proto)) asd_map_scatterlist()
80 num_sg = task->num_scatter; asd_map_scatterlist()
82 num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, asd_map_scatterlist()
83 task->num_scatter, task->data_dir); asd_map_scatterlist()
97 for_each_sg(task->scatter, sc, num_sg, i) { asd_map_scatterlist()
106 for_each_sg(task->scatter, sc, 2, i) { asd_map_scatterlist()
118 for_each_sg(task->scatter, sc, num_sg, i) { asd_map_scatterlist()
128 if (sas_protocol_ata(task->task_proto)) asd_map_scatterlist()
129 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, asd_map_scatterlist()
130 task->data_dir); asd_map_scatterlist()
137 struct sas_task *task = ascb->uldd_task; asd_unmap_scatterlist() local
139 if (task->data_dir == PCI_DMA_NONE) asd_unmap_scatterlist()
142 if (task->num_scatter == 0) { asd_unmap_scatterlist()
145 pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len, asd_unmap_scatterlist()
146 task->data_dir); asd_unmap_scatterlist()
151 if (task->task_proto != SAS_PROTOCOL_STP) asd_unmap_scatterlist()
152 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, asd_unmap_scatterlist()
153 task->data_dir); asd_unmap_scatterlist()
162 struct sas_task *task = ascb->uldd_task; asd_get_response_tasklet() local
163 struct task_status_struct *ts = &task->task_status; asd_get_response_tasklet()
190 if (task->task_proto == SAS_PROTOCOL_SSP) { asd_get_response_tasklet()
196 sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu); asd_get_response_tasklet()
215 struct sas_task *task = ascb->uldd_task; asd_task_tasklet_complete() local
216 struct task_status_struct *ts = &task->task_status; asd_task_tasklet_complete()
327 switch (task->task_proto) { asd_task_tasklet_complete()
341 spin_lock_irqsave(&task->task_state_lock, flags); asd_task_tasklet_complete()
342 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; asd_task_tasklet_complete()
343 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; asd_task_tasklet_complete()
344 task->task_state_flags |= SAS_TASK_STATE_DONE; asd_task_tasklet_complete()
345 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { asd_task_tasklet_complete()
347 spin_unlock_irqrestore(&task->task_state_lock, flags); asd_task_tasklet_complete()
348 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " asd_task_tasklet_complete()
350 task, opcode, ts->resp, ts->stat); asd_task_tasklet_complete()
354 spin_unlock_irqrestore(&task->task_state_lock, flags); asd_task_tasklet_complete()
355 task->lldd_task = NULL; asd_task_tasklet_complete()
358 task->task_done(task); asd_task_tasklet_complete()
364 static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, asd_build_ata_ascb() argument
367 struct domain_device *dev = task->dev; asd_build_ata_ascb()
374 if (unlikely(task->ata_task.device_control_reg_update)) asd_build_ata_ascb()
385 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); asd_build_ata_ascb()
386 scb->ata_task.fis = task->ata_task.fis; asd_build_ata_ascb()
387 if (likely(!task->ata_task.device_control_reg_update)) asd_build_ata_ascb()
391 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, asd_build_ata_ascb()
397 if (likely(!task->ata_task.device_control_reg_update)) { asd_build_ata_ascb()
399 if (task->ata_task.dma_xfer) asd_build_ata_ascb()
401 if (task->ata_task.use_ncq && asd_build_ata_ascb()
404 flags |= data_dir_flags[task->data_dir]; asd_build_ata_ascb()
407 scb->ata_task.retry_count = task->ata_task.retry_count; asd_build_ata_ascb()
410 if (task->ata_task.set_affil_pol) asd_build_ata_ascb()
412 if (task->ata_task.stp_affil_pol) asd_build_ata_ascb()
418 if (likely(!task->ata_task.device_control_reg_update)) asd_build_ata_ascb()
419 res = asd_map_scatterlist(task, scb->ata_task.sg_element, asd_build_ata_ascb()
432 static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, asd_build_smp_ascb() argument
436 struct domain_device *dev = task->dev; asd_build_smp_ascb()
439 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, asd_build_smp_ascb()
441 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, asd_build_smp_ascb()
451 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); asd_build_smp_ascb()
453 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); asd_build_smp_ascb()
456 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); asd_build_smp_ascb()
458 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); asd_build_smp_ascb()
471 struct sas_task *task = a->uldd_task; asd_unbuild_smp_ascb() local
473 BUG_ON(!task); asd_unbuild_smp_ascb()
474 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, asd_unbuild_smp_ascb()
476 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, asd_unbuild_smp_ascb()
482 static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, asd_build_ssp_ascb() argument
485 struct domain_device *dev = task->dev; asd_build_ssp_ascb()
495 scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); asd_build_ssp_ascb()
503 memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8); asd_build_ssp_ascb()
504 if (task->ssp_task.enable_first_burst) asd_build_ssp_ascb()
506 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3); asd_build_ssp_ascb()
507 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); asd_build_ssp_ascb()
508 memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd, asd_build_ssp_ascb()
509 task->ssp_task.cmd->cmd_len); asd_build_ssp_ascb()
514 scb->ssp_task.data_dir = data_dir_flags[task->data_dir]; asd_build_ssp_ascb()
519 res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags); asd_build_ssp_ascb()
546 int asd_execute_task(struct sas_task *task, gfp_t gfp_flags) asd_execute_task() argument
550 struct sas_task *t = task; asd_execute_task()
552 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; asd_execute_task()
H A Daic94xx_tmf.c233 static int asd_clear_nexus_tag(struct sas_task *task) asd_clear_nexus_tag() argument
235 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; asd_clear_nexus_tag()
236 struct asd_ascb *tascb = task->lldd_task; asd_clear_nexus_tag()
240 memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8); asd_clear_nexus_tag()
242 if (task->dev->tproto) asd_clear_nexus_tag()
244 task->dev->lldd_dev); asd_clear_nexus_tag()
248 static int asd_clear_nexus_index(struct sas_task *task) asd_clear_nexus_index() argument
250 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; asd_clear_nexus_index()
251 struct asd_ascb *tascb = task->lldd_task; asd_clear_nexus_index()
255 if (task->dev->tproto) asd_clear_nexus_index()
257 task->dev->lldd_dev); asd_clear_nexus_index()
343 static int asd_clear_nexus(struct sas_task *task) asd_clear_nexus() argument
347 struct asd_ascb *tascb = task->lldd_task; asd_clear_nexus()
353 ASD_DPRINTK("task not done, clearing nexus\n"); asd_clear_nexus()
355 res = asd_clear_nexus_tag(task); asd_clear_nexus()
357 res = asd_clear_nexus_index(task); asd_clear_nexus()
362 spin_lock_irqsave(&task->task_state_lock, flags); asd_clear_nexus()
365 if (task->task_state_flags & SAS_TASK_STATE_DONE) asd_clear_nexus()
367 spin_unlock_irqrestore(&task->task_state_lock, flags); asd_clear_nexus()
374 * @task: the task to be aborted
376 * Before calling ABORT TASK the task state flags should be ORed with
386 * task->task_state_flags, and then the return value of ABORT TASK.
388 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
389 * task was completed successfully prior to it being aborted. The
390 * caller of ABORT TASK has responsibility to call task->task_done()
391 * xor free the task, depending on their framework. The return code
396 * the task was aborted successfully. The caller of
397 * ABORT TASK has responsibility to call task->task_done()
398 * to finish the task, xor free the task depending on their
401 * the ABORT TASK returned some kind of error. The task
405 int asd_abort_task(struct sas_task *task) asd_abort_task() argument
407 struct asd_ascb *tascb = task->lldd_task; asd_abort_task()
420 spin_lock_irqsave(&task->task_state_lock, flags); asd_abort_task()
421 if (task->task_state_flags & SAS_TASK_STATE_DONE) { asd_abort_task()
422 spin_unlock_irqrestore(&task->task_state_lock, flags); asd_abort_task()
424 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); asd_abort_task()
427 spin_unlock_irqrestore(&task->task_state_lock, flags); asd_abort_task()
438 switch (task->task_proto) { asd_abort_task()
445 scb->abort_task.proto_conn_rate |= task->dev->linkrate; asd_abort_task()
453 if (task->task_proto == SAS_PROTOCOL_SSP) { asd_abort_task()
456 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); asd_abort_task()
458 task->dev->port->ha->hashed_sas_addr, asd_abort_task()
462 memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8); asd_abort_task()
469 (u16)(unsigned long)task->dev->lldd_dev); asd_abort_task()
484 spin_lock_irqsave(&task->task_state_lock, flags); asd_abort_task()
485 if (task->task_state_flags & SAS_TASK_STATE_DONE) { asd_abort_task()
486 spin_unlock_irqrestore(&task->task_state_lock, flags); asd_abort_task()
488 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); asd_abort_task()
491 spin_unlock_irqrestore(&task->task_state_lock, flags); asd_abort_task()
494 /* The task to be aborted has been sent to the device. asd_abort_task()
497 res = asd_clear_nexus(task); asd_abort_task()
511 res = asd_clear_nexus(task); asd_abort_task()
515 /* The task hasn't been sent to the device xor asd_abort_task()
527 spin_lock_irqsave(&task->task_state_lock, flags); asd_abort_task()
530 if (task->task_state_flags & SAS_TASK_STATE_DONE) asd_abort_task()
532 spin_unlock_irqrestore(&task->task_state_lock, flags); asd_abort_task()
547 task->lldd_task = NULL; asd_abort_task()
551 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); asd_abort_task()
556 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); asd_abort_task()
565 * @index: the transaction context of the task to be queried if QT TMF
695 * task: pointer to sas_task struct of interest
697 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
698 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
700 * Normally the management layer sets the task to aborted state,
701 * and then calls query task and then abort task.
703 int asd_query_task(struct sas_task *task) asd_query_task() argument
705 struct asd_ascb *ascb = task->lldd_task; asd_query_task()
710 return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN, asd_query_task()
/linux-4.1.27/fs/proc/
H A Dnamespaces.c37 struct task_struct *task; proc_ns_follow_link() local
41 task = get_proc_task(inode); proc_ns_follow_link()
42 if (!task) proc_ns_follow_link()
45 if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { proc_ns_follow_link()
46 error = ns_get_path(&ns_path, task, ns_ops); proc_ns_follow_link()
50 put_task_struct(task); proc_ns_follow_link()
58 struct task_struct *task; proc_ns_readlink() local
62 task = get_proc_task(inode); proc_ns_readlink()
63 if (!task) proc_ns_readlink()
66 if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { proc_ns_readlink()
67 res = ns_get_name(name, sizeof(name), task, ns_ops); proc_ns_readlink()
71 put_task_struct(task); proc_ns_readlink()
82 struct dentry *dentry, struct task_struct *task, const void *ptr) proc_ns_instantiate()
88 inode = proc_pid_make_inode(dir->i_sb, task); proc_ns_instantiate()
108 struct task_struct *task = get_proc_task(file_inode(file)); proc_ns_dir_readdir() local
111 if (!task) proc_ns_dir_readdir()
123 proc_ns_instantiate, task, ops)) proc_ns_dir_readdir()
129 put_task_struct(task); proc_ns_dir_readdir()
142 struct task_struct *task = get_proc_task(dir); proc_ns_dir_lookup() local
148 if (!task) proc_ns_dir_lookup()
161 error = proc_ns_instantiate(dir, dentry, task, *entry); proc_ns_dir_lookup()
163 put_task_struct(task); proc_ns_dir_lookup()
81 proc_ns_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) proc_ns_instantiate() argument
H A Darray.c110 * The task state array is a strange "bitmap" of
335 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) task_cpus_allowed() argument
338 cpumask_pr_args(&task->cpus_allowed)); task_cpus_allowed()
340 cpumask_pr_args(&task->cpus_allowed)); task_cpus_allowed()
344 struct pid *pid, struct task_struct *task) proc_pid_status()
346 struct mm_struct *mm = get_task_mm(task); proc_pid_status()
348 task_name(m, task); proc_pid_status()
349 task_state(m, ns, pid, task); proc_pid_status()
355 task_sig(m, task); proc_pid_status()
356 task_cap(m, task); proc_pid_status()
357 task_seccomp(m, task); proc_pid_status()
358 task_cpus_allowed(m, task); proc_pid_status()
359 cpuset_task_status_allowed(m, task); proc_pid_status()
360 task_context_switch_counts(m, task); proc_pid_status()
365 struct pid *pid, struct task_struct *task, int whole) do_task_stat()
382 char tcomm[sizeof(task->comm)]; do_task_stat()
385 state = *get_task_state(task); do_task_stat()
387 permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT); do_task_stat()
388 mm = get_task_mm(task); do_task_stat()
392 eip = KSTK_EIP(task); do_task_stat()
393 esp = KSTK_ESP(task); do_task_stat()
397 get_task_comm(tcomm, task); do_task_stat()
404 if (lock_task_sighand(task, &flags)) { do_task_stat()
405 struct signal_struct *sig = task->signal; do_task_stat()
414 num_threads = get_nr_threads(task); do_task_stat()
415 collect_sigign_sigcatch(task, &sigign, &sigcatch); do_task_stat()
426 struct task_struct *t = task; do_task_stat()
431 } while_each_thread(task, t); do_task_stat()
435 thread_group_cputime_adjusted(task, &utime, &stime); do_task_stat()
439 sid = task_session_nr_ns(task, ns); do_task_stat()
440 ppid = task_tgid_nr_ns(task->real_parent, ns); do_task_stat()
441 pgid = task_pgrp_nr_ns(task, ns); do_task_stat()
443 unlock_task_sighand(task, &flags); do_task_stat()
447 wchan = get_wchan(task); do_task_stat()
449 min_flt = task->min_flt; do_task_stat()
450 maj_flt = task->maj_flt; do_task_stat()
451 task_cputime_adjusted(task, &utime, &stime); do_task_stat()
452 gtime = task_gtime(task); do_task_stat()
457 priority = task_prio(task); do_task_stat()
458 nice = task_nice(task); do_task_stat()
461 start_time = nsec_to_clock_t(task->real_start_time); do_task_stat()
469 seq_put_decimal_ull(m, ' ', task->flags); do_task_stat()
495 seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL); do_task_stat()
496 seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL); do_task_stat()
514 seq_put_decimal_ll(m, ' ', task->exit_signal); do_task_stat()
515 seq_put_decimal_ll(m, ' ', task_cpu(task)); do_task_stat()
516 seq_put_decimal_ull(m, ' ', task->rt_priority); do_task_stat()
517 seq_put_decimal_ull(m, ' ', task->policy); do_task_stat()
518 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task)); do_task_stat()
534 seq_put_decimal_ll(m, ' ', task->exit_code); do_task_stat()
545 struct pid *pid, struct task_struct *task) proc_tid_stat()
547 return do_task_stat(m, ns, pid, task, 0); proc_tid_stat()
551 struct pid *pid, struct task_struct *task) proc_tgid_stat()
553 return do_task_stat(m, ns, pid, task, 1); proc_tgid_stat()
557 struct pid *pid, struct task_struct *task) proc_pid_statm()
560 struct mm_struct *mm = get_task_mm(task); proc_pid_statm()
588 struct task_struct *start, *task; get_children_pid() local
602 task = pid_task(pid_prev, PIDTYPE_PID); get_children_pid()
603 if (task && task->real_parent == start && get_children_pid()
604 !(list_empty(&task->sibling))) { get_children_pid()
605 if (list_is_last(&task->sibling, &start->children)) get_children_pid()
607 task = list_first_entry(&task->sibling, get_children_pid()
609 pid = get_pid(task_pid(task)); get_children_pid()
629 list_for_each_entry(task, &start->children, sibling) { get_children_pid()
631 pid = get_pid(task_pid(task)); get_children_pid()
343 proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_status() argument
364 do_task_stat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task, int whole) do_task_stat() argument
544 proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_tid_stat() argument
550 proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_tgid_stat() argument
556 proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_statm() argument
H A Dbase.c104 * in /proc for a task before it execs a suid executable.
157 static int get_task_root(struct task_struct *task, struct path *root) get_task_root() argument
161 task_lock(task); get_task_root()
162 if (task->fs) { get_task_root()
163 get_fs_root(task->fs, root); get_task_root()
166 task_unlock(task); get_task_root()
172 struct task_struct *task = get_proc_task(d_inode(dentry)); proc_cwd_link() local
175 if (task) { proc_cwd_link()
176 task_lock(task); proc_cwd_link()
177 if (task->fs) { proc_cwd_link()
178 get_fs_pwd(task->fs, path); proc_cwd_link()
181 task_unlock(task); proc_cwd_link()
182 put_task_struct(task); proc_cwd_link()
189 struct task_struct *task = get_proc_task(d_inode(dentry)); proc_root_link() local
192 if (task) { proc_root_link()
193 result = get_task_root(task, path); proc_root_link()
194 put_task_struct(task); proc_root_link()
200 struct pid *pid, struct task_struct *task) proc_pid_cmdline()
207 m->count += get_cmdline(task, m->buf, PAGE_SIZE); proc_pid_cmdline()
212 struct pid *pid, struct task_struct *task) proc_pid_auxv()
214 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); proc_pid_auxv()
234 struct pid *pid, struct task_struct *task) proc_pid_wchan()
239 wchan = get_wchan(task); proc_pid_wchan()
241 if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS) proc_pid_wchan()
251 static int lock_trace(struct task_struct *task) lock_trace() argument
253 int err = mutex_lock_killable(&task->signal->cred_guard_mutex); lock_trace()
256 if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { lock_trace()
257 mutex_unlock(&task->signal->cred_guard_mutex); lock_trace()
263 static void unlock_trace(struct task_struct *task) unlock_trace() argument
265 mutex_unlock(&task->signal->cred_guard_mutex); unlock_trace()
273 struct pid *pid, struct task_struct *task) proc_pid_stack()
289 err = lock_trace(task); proc_pid_stack()
291 save_stack_trace_tsk(task, &trace); proc_pid_stack()
297 unlock_trace(task); proc_pid_stack()
310 struct pid *pid, struct task_struct *task) proc_pid_schedstat()
313 (unsigned long long)task->se.sum_exec_runtime, proc_pid_schedstat()
314 (unsigned long long)task->sched_info.run_delay, proc_pid_schedstat()
315 task->sched_info.pcount); proc_pid_schedstat()
326 struct task_struct *task = get_proc_task(inode); lstats_show_proc() local
328 if (!task) lstats_show_proc()
332 struct latency_record *lr = &task->latency_record[i]; lstats_show_proc()
349 put_task_struct(task); lstats_show_proc()
361 struct task_struct *task = get_proc_task(file_inode(file)); lstats_write() local
363 if (!task) lstats_write()
365 clear_all_latency_tracing(task); lstats_write()
366 put_task_struct(task); lstats_write()
382 struct pid *pid, struct task_struct *task) proc_oom_score()
388 if (pid_alive(task)) proc_oom_score()
389 points = oom_badness(task, NULL, NULL, totalpages) * proc_oom_score()
423 struct pid *pid, struct task_struct *task) proc_pid_limits()
430 if (!lock_task_sighand(task, &flags)) proc_pid_limits()
432 memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS); proc_pid_limits()
433 unlock_task_sighand(task, &flags); proc_pid_limits()
465 struct pid *pid, struct task_struct *task) proc_pid_syscall()
471 res = lock_trace(task); proc_pid_syscall()
475 if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) proc_pid_syscall()
485 unlock_trace(task); proc_pid_syscall()
498 struct task_struct *task; proc_fd_access_allowed() local
500 /* Allow access to a task's file descriptors if it is us or we proc_fd_access_allowed()
504 task = get_proc_task(inode); proc_fd_access_allowed()
505 if (task) { proc_fd_access_allowed()
506 allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); proc_fd_access_allowed()
507 put_task_struct(task); proc_fd_access_allowed()
530 * May current process learn task's sched/cmdline info (for hide_pid_min=1)
534 struct task_struct *task, has_pid_permissions()
541 return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); has_pid_permissions()
548 struct task_struct *task; proc_pid_permission() local
551 task = get_proc_task(inode); proc_pid_permission()
552 if (!task) proc_pid_permission()
554 has_perms = has_pid_permissions(pid, task, 1); proc_pid_permission()
555 put_task_struct(task); proc_pid_permission()
584 struct task_struct *task; proc_single_show() local
589 task = get_pid_task(pid, PIDTYPE_PID); proc_single_show()
590 if (!task) proc_single_show()
593 ret = PROC_I(inode)->op.proc_show(m, ns, pid, task); proc_single_show()
595 put_task_struct(task); proc_single_show()
614 struct task_struct *task = get_proc_task(inode); proc_mem_open() local
617 if (task) { proc_mem_open()
618 mm = mm_access(task, mode | PTRACE_MODE_FSCREDS); proc_mem_open()
619 put_task_struct(task); proc_mem_open()
820 struct task_struct *task = get_proc_task(file_inode(file)); oom_adj_read() local
826 if (!task) oom_adj_read()
828 if (lock_task_sighand(task, &flags)) { oom_adj_read()
829 if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) oom_adj_read()
832 oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / oom_adj_read()
834 unlock_task_sighand(task, &flags); oom_adj_read()
836 put_task_struct(task); oom_adj_read()
844 struct task_struct *task; oom_adj_write() local
867 task = get_proc_task(file_inode(file)); oom_adj_write()
868 if (!task) { oom_adj_write()
873 task_lock(task); oom_adj_write()
874 if (!task->mm) { oom_adj_write()
879 if (!lock_task_sighand(task, &flags)) { oom_adj_write()
893 if (oom_adj < task->signal->oom_score_adj && oom_adj_write()
904 current->comm, task_pid_nr(current), task_pid_nr(task), oom_adj_write()
905 task_pid_nr(task)); oom_adj_write()
907 task->signal->oom_score_adj = oom_adj; oom_adj_write()
908 trace_oom_score_adj_update(task); oom_adj_write()
910 unlock_task_sighand(task, &flags); oom_adj_write()
912 task_unlock(task); oom_adj_write()
913 put_task_struct(task); oom_adj_write()
927 struct task_struct *task = get_proc_task(file_inode(file)); oom_score_adj_read() local
933 if (!task) oom_score_adj_read()
935 if (lock_task_sighand(task, &flags)) { oom_score_adj_read()
936 oom_score_adj = task->signal->oom_score_adj; oom_score_adj_read()
937 unlock_task_sighand(task, &flags); oom_score_adj_read()
939 put_task_struct(task); oom_score_adj_read()
947 struct task_struct *task; oom_score_adj_write() local
970 task = get_proc_task(file_inode(file)); oom_score_adj_write()
971 if (!task) { oom_score_adj_write()
976 task_lock(task); oom_score_adj_write()
977 if (!task->mm) { oom_score_adj_write()
982 if (!lock_task_sighand(task, &flags)) { oom_score_adj_write()
987 if ((short)oom_score_adj < task->signal->oom_score_adj_min && oom_score_adj_write()
993 task->signal->oom_score_adj = (short)oom_score_adj; oom_score_adj_write()
995 task->signal->oom_score_adj_min = (short)oom_score_adj; oom_score_adj_write()
996 trace_oom_score_adj_update(task); oom_score_adj_write()
999 unlock_task_sighand(task, &flags); oom_score_adj_write()
1001 task_unlock(task); oom_score_adj_write()
1002 put_task_struct(task); oom_score_adj_write()
1019 struct task_struct *task = get_proc_task(inode); proc_loginuid_read() local
1023 if (!task) proc_loginuid_read()
1027 audit_get_loginuid(task))); proc_loginuid_read()
1028 put_task_struct(task); proc_loginuid_read()
1100 struct task_struct *task = get_proc_task(inode); proc_sessionid_read() local
1104 if (!task) proc_sessionid_read()
1107 audit_get_sessionid(task)); proc_sessionid_read()
1108 put_task_struct(task); proc_sessionid_read()
1122 struct task_struct *task = get_proc_task(file_inode(file)); proc_fault_inject_read() local
1127 if (!task) proc_fault_inject_read()
1129 make_it_fail = task->make_it_fail; proc_fault_inject_read()
1130 put_task_struct(task); proc_fault_inject_read()
1140 struct task_struct *task; proc_fault_inject_write() local
1157 task = get_proc_task(file_inode(file)); proc_fault_inject_write()
1158 if (!task) proc_fault_inject_write()
1160 task->make_it_fail = make_it_fail; proc_fault_inject_write()
1161 put_task_struct(task); proc_fault_inject_write()
1176 * Print out various scheduling related per-task fields:
1359 struct task_struct *task; proc_exe_link() local
1363 task = get_proc_task(d_inode(dentry)); proc_exe_link()
1364 if (!task) proc_exe_link()
1366 mm = get_task_mm(task); proc_exe_link()
1367 put_task_struct(task); proc_exe_link()
1454 struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task) proc_pid_make_inode() argument
1473 * grab the reference to task. proc_pid_make_inode()
1475 ei->pid = get_task_pid(task, PIDTYPE_PID); proc_pid_make_inode()
1479 if (task_dumpable(task)) { proc_pid_make_inode()
1481 cred = __task_cred(task); proc_pid_make_inode()
1486 security_task_to_inode(task, inode); proc_pid_make_inode()
1499 struct task_struct *task; pid_getattr() local
1508 task = pid_task(proc_pid(inode), PIDTYPE_PID); pid_getattr()
1509 if (task) { pid_getattr()
1510 if (!has_pid_permissions(pid, task, 2)) { pid_getattr()
1519 task_dumpable(task)) { pid_getattr()
1520 cred = __task_cred(task); pid_getattr()
1536 * Rewrite the inode's ownerships here because the owning task may have
1549 struct task_struct *task; pid_revalidate() local
1556 task = get_proc_task(inode); pid_revalidate()
1558 if (task) { pid_revalidate()
1560 task_dumpable(task)) { pid_revalidate()
1562 cred = __task_cred(task); pid_revalidate()
1571 security_task_to_inode(task, inode); pid_revalidate()
1572 put_task_struct(task); pid_revalidate()
1585 /* Is the task we represent dead? pid_delete_dentry()
1614 instantiate_t instantiate, struct task_struct *task, const void *ptr) proc_fill_cache()
1627 if (instantiate(d_inode(dir), child, task, ptr) < 0) { proc_fill_cache()
1662 struct task_struct *task; map_files_d_revalidate() local
1676 task = get_proc_task(inode); map_files_d_revalidate()
1677 if (!task) map_files_d_revalidate()
1680 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); map_files_d_revalidate()
1693 if (task_dumpable(task)) { map_files_d_revalidate()
1695 cred = __task_cred(task); map_files_d_revalidate()
1703 security_task_to_inode(task, inode); map_files_d_revalidate()
1708 put_task_struct(task); map_files_d_revalidate()
1723 struct task_struct *task; proc_map_files_get_link() local
1728 task = get_proc_task(d_inode(dentry)); proc_map_files_get_link()
1729 if (!task) proc_map_files_get_link()
1732 mm = get_task_mm(task); proc_map_files_get_link()
1733 put_task_struct(task); proc_map_files_get_link()
1765 struct task_struct *task, const void *ptr) proc_map_files_instantiate()
1771 inode = proc_pid_make_inode(dir->i_sb, task); proc_map_files_instantiate()
1798 struct task_struct *task; proc_map_files_lookup() local
1807 task = get_proc_task(dir); proc_map_files_lookup()
1808 if (!task) proc_map_files_lookup()
1812 if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) proc_map_files_lookup()
1819 mm = get_task_mm(task); proc_map_files_lookup()
1829 result = proc_map_files_instantiate(dir, dentry, task, proc_map_files_lookup()
1836 put_task_struct(task); proc_map_files_lookup()
1851 struct task_struct *task; proc_map_files_readdir() local
1864 task = get_proc_task(file_inode(file)); proc_map_files_readdir()
1865 if (!task) proc_map_files_readdir()
1869 if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) proc_map_files_readdir()
1876 mm = get_task_mm(task); proc_map_files_readdir()
1932 task, proc_map_files_readdir()
1942 put_task_struct(task); proc_map_files_readdir()
1955 struct task_struct *task; member in struct:timers_private
1965 tp->task = get_pid_task(tp->pid, PIDTYPE_PID); timers_start()
1966 if (!tp->task) timers_start()
1969 tp->sighand = lock_task_sighand(tp->task, &tp->flags); timers_start()
1973 return seq_list_start(&tp->task->signal->posix_timers, *pos); timers_start()
1979 return seq_list_next(v, &tp->task->signal->posix_timers, pos); timers_next()
1987 unlock_task_sighand(tp->task, &tp->flags); timers_stop()
1991 if (tp->task) { timers_stop()
1992 put_task_struct(tp->task); timers_stop()
1993 tp->task = NULL; timers_stop()
2054 struct dentry *dentry, struct task_struct *task, const void *ptr) proc_pident_instantiate()
2060 inode = proc_pid_make_inode(dir->i_sb, task); proc_pident_instantiate()
2088 struct task_struct *task = get_proc_task(dir); proc_pident_lookup() local
2093 if (!task) proc_pident_lookup()
2110 error = proc_pident_instantiate(dir, dentry, task, p); proc_pident_lookup()
2112 put_task_struct(task); proc_pident_lookup()
2120 struct task_struct *task = get_proc_task(file_inode(file)); proc_pident_readdir() local
2123 if (!task) proc_pident_readdir()
2134 proc_pident_instantiate, task, p)) proc_pident_readdir()
2139 put_task_struct(task); proc_pident_readdir()
2150 struct task_struct *task = get_proc_task(inode); proc_pid_attr_read() local
2152 if (!task) proc_pid_attr_read()
2155 length = security_getprocattr(task, proc_pid_attr_read()
2158 put_task_struct(task); proc_pid_attr_read()
2171 struct task_struct *task = get_proc_task(inode); proc_pid_attr_write() local
2174 if (!task) proc_pid_attr_write()
2194 length = mutex_lock_interruptible(&task->signal->cred_guard_mutex); proc_pid_attr_write()
2198 length = security_setprocattr(task, proc_pid_attr_write()
2201 mutex_unlock(&task->signal->cred_guard_mutex); proc_pid_attr_write()
2205 put_task_struct(task); proc_pid_attr_write()
2256 struct task_struct *task = get_proc_task(file_inode(file)); proc_coredump_filter_read() local
2262 if (!task) proc_coredump_filter_read()
2266 mm = get_task_mm(task); proc_coredump_filter_read()
2275 put_task_struct(task); proc_coredump_filter_read()
2285 struct task_struct *task; proc_coredump_filter_write() local
2308 task = get_proc_task(file_inode(file)); proc_coredump_filter_write()
2309 if (!task) proc_coredump_filter_write()
2313 mm = get_task_mm(task); proc_coredump_filter_write()
2326 put_task_struct(task); proc_coredump_filter_write()
2339 static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole) do_io_accounting() argument
2341 struct task_io_accounting acct = task->ioac; do_io_accounting()
2345 result = mutex_lock_killable(&task->signal->cred_guard_mutex); do_io_accounting()
2349 if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { do_io_accounting()
2354 if (whole && lock_task_sighand(task, &flags)) { do_io_accounting()
2355 struct task_struct *t = task; do_io_accounting()
2357 task_io_accounting_add(&acct, &task->signal->ioac); do_io_accounting()
2358 while_each_thread(task, t) do_io_accounting()
2361 unlock_task_sighand(task, &flags); do_io_accounting()
2381 mutex_unlock(&task->signal->cred_guard_mutex); do_io_accounting()
2386 struct pid *pid, struct task_struct *task) proc_tid_io_accounting()
2388 return do_io_accounting(task, m, 0); proc_tid_io_accounting()
2392 struct pid *pid, struct task_struct *task) proc_tgid_io_accounting()
2394 return do_io_accounting(task, m, 1); proc_tgid_io_accounting()
2403 struct task_struct *task; proc_id_map_open() local
2407 task = get_proc_task(inode); proc_id_map_open()
2408 if (task) { proc_id_map_open()
2410 ns = get_user_ns(task_cred_xxx(task, user_ns)); proc_id_map_open()
2412 put_task_struct(task); proc_id_map_open()
2481 struct task_struct *task; proc_setgroups_open() local
2485 task = get_proc_task(inode); proc_setgroups_open()
2486 if (task) { proc_setgroups_open()
2488 ns = get_user_ns(task_cred_xxx(task, user_ns)); proc_setgroups_open()
2490 put_task_struct(task); proc_setgroups_open()
2531 struct pid *pid, struct task_struct *task) proc_pid_personality()
2533 int err = lock_trace(task); proc_pid_personality()
2535 seq_printf(m, "%08x\n", task->personality); proc_pid_personality()
2536 unlock_trace(task); proc_pid_personality()
2548 DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
2692 name.name = "task"; proc_flush_task_mnt()
2714 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
2715 * @task: task that should be flushed.
2718 * proc (proc_mnt) and from all the namespaces' procs this task was seen
2723 * /proc/@tgid/task/@pid
2727 * It is safe and reasonable to cache /proc entries for a task until
2728 * that task exits. After that they just clog up the dcache with
2738 void proc_flush_task(struct task_struct *task) proc_flush_task() argument
2744 pid = task_pid(task); proc_flush_task()
2745 tgid = task_tgid(task); proc_flush_task()
2756 struct task_struct *task, const void *ptr) proc_pid_instantiate()
2760 inode = proc_pid_make_inode(dir->i_sb, task); proc_pid_instantiate()
2785 struct task_struct *task; proc_pid_lookup() local
2795 task = find_task_by_pid_ns(tgid, ns); proc_pid_lookup()
2796 if (task) proc_pid_lookup()
2797 get_task_struct(task); proc_pid_lookup()
2799 if (!task) proc_pid_lookup()
2802 result = proc_pid_instantiate(dir, dentry, task, NULL); proc_pid_lookup()
2803 put_task_struct(task); proc_pid_lookup()
2809 * Find the first task with tgid >= tgid
2814 struct task_struct *task; member in struct:tgid_iter
2820 if (iter.task) next_tgid()
2821 put_task_struct(iter.task); next_tgid()
2824 iter.task = NULL; next_tgid()
2828 iter.task = pid_task(pid, PIDTYPE_PID); next_tgid()
2830 * pid of a thread_group_leader. Testing for task next_tgid()
2837 * group leader, and don't worry if the task we have next_tgid()
2841 if (!iter.task || !has_group_leader_pid(iter.task)) { next_tgid()
2845 get_task_struct(iter.task); next_tgid()
2876 iter.task = NULL; proc_pid_readdir()
2878 iter.task; proc_pid_readdir()
2882 if (!has_pid_permissions(ns, iter.task, 2)) proc_pid_readdir()
2888 proc_pid_instantiate, iter.task, NULL)) { proc_pid_readdir()
2889 put_task_struct(iter.task); proc_pid_readdir()
3010 struct dentry *dentry, struct task_struct *task, const void *ptr) proc_task_instantiate()
3013 inode = proc_pid_make_inode(dir->i_sb, task); proc_task_instantiate()
3038 struct task_struct *task; proc_task_lookup() local
3052 task = find_task_by_pid_ns(tid, ns); proc_task_lookup()
3053 if (task) proc_task_lookup()
3054 get_task_struct(task); proc_task_lookup()
3056 if (!task) proc_task_lookup()
3058 if (!same_thread_group(leader, task)) proc_task_lookup()
3061 result = proc_task_instantiate(dir, dentry, task, NULL); proc_task_lookup()
3063 put_task_struct(task); proc_task_lookup()
3085 struct task_struct *pos, *task; first_tid() local
3092 task = pid_task(pid, PIDTYPE_PID); first_tid()
3093 if (!task) first_tid()
3099 if (pos && same_thread_group(pos, task)) first_tid()
3104 if (nr >= get_nr_threads(task)) first_tid()
3110 pos = task = task->group_leader; first_tid()
3114 } while_each_thread(task, pos); first_tid()
3147 /* for the /proc/TGID/task/ directories */ proc_task_readdir()
3151 struct task_struct *task; proc_task_readdir() local
3167 for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); proc_task_readdir()
3168 task; proc_task_readdir()
3169 task = next_tid(task), ctx->pos++) { proc_task_readdir()
3172 tid = task_pid_nr_ns(task, ns); proc_task_readdir()
3175 proc_task_instantiate, task, NULL)) { proc_task_readdir()
3179 put_task_struct(task); proc_task_readdir()
199 proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_cmdline() argument
211 proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_auxv() argument
233 proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_wchan() argument
272 proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_stack() argument
309 proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_schedstat() argument
381 proc_oom_score(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_oom_score() argument
422 proc_pid_limits(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_limits() argument
464 proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_syscall() argument
533 has_pid_permissions(struct pid_namespace *pid, struct task_struct *task, int hide_pid_min) has_pid_permissions() argument
1612 proc_fill_cache(struct file *file, struct dir_context *ctx, const char *name, int len, instantiate_t instantiate, struct task_struct *task, const void *ptr) proc_fill_cache() argument
1764 proc_map_files_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) proc_map_files_instantiate() argument
2053 proc_pident_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) proc_pident_instantiate() argument
2385 proc_tid_io_accounting(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_tid_io_accounting() argument
2391 proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_tgid_io_accounting() argument
2530 proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) proc_pid_personality() argument
2754 proc_pid_instantiate(struct inode *dir, struct dentry * dentry, struct task_struct *task, const void *ptr) proc_pid_instantiate() argument
3009 proc_task_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) proc_task_instantiate() argument
H A Dfd.c24 struct task_struct *task; seq_show() local
26 task = get_proc_task(m->private); seq_show()
27 if (!task) seq_show()
30 files = get_files_struct(task); seq_show()
31 put_task_struct(task); seq_show()
86 struct task_struct *task; tid_fd_revalidate() local
95 task = get_proc_task(inode); tid_fd_revalidate()
98 if (task) { tid_fd_revalidate()
99 files = get_files_struct(task); tid_fd_revalidate()
111 if (task_dumpable(task)) { tid_fd_revalidate()
113 cred = __task_cred(task); tid_fd_revalidate()
131 security_task_to_inode(task, inode); tid_fd_revalidate()
132 put_task_struct(task); tid_fd_revalidate()
138 put_task_struct(task); tid_fd_revalidate()
151 struct task_struct *task; proc_fd_link() local
154 task = get_proc_task(d_inode(dentry)); proc_fd_link()
155 if (task) { proc_fd_link()
156 files = get_files_struct(task); proc_fd_link()
157 put_task_struct(task); proc_fd_link()
180 struct task_struct *task, const void *ptr) proc_fd_instantiate()
186 inode = proc_pid_make_inode(dir->i_sb, task); proc_fd_instantiate()
213 struct task_struct *task = get_proc_task(dir); proc_lookupfd_common() local
217 if (!task) proc_lookupfd_common()
222 result = instantiate(dir, dentry, task, (void *)(unsigned long)fd); proc_lookupfd_common()
224 put_task_struct(task); proc_lookupfd_common()
310 struct task_struct *task, const void *ptr) proc_fdinfo_instantiate()
316 inode = proc_pid_make_inode(dir->i_sb, task); proc_fdinfo_instantiate()
179 proc_fd_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) proc_fd_instantiate() argument
309 proc_fdinfo_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) proc_fdinfo_instantiate() argument
H A Dtask_nommu.c130 struct task_struct *task; pid_of_stack() local
134 task = pid_task(proc_pid(inode), PIDTYPE_PID); pid_of_stack()
135 if (task) { pid_of_stack()
136 task = task_of_stack(task, vma, is_pid); pid_of_stack()
137 if (task) pid_of_stack()
138 ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); pid_of_stack()
190 * Thread stack in /proc/PID/task/TID/maps or nommu_vma_show()
233 /* pin the task and mm whilst we play with them */ m_start()
234 priv->task = get_proc_task(priv->inode); m_start()
235 if (!priv->task) m_start()
261 if (priv->task) { m_stop()
262 put_task_struct(priv->task); m_stop()
263 priv->task = NULL; m_stop()
/linux-4.1.27/kernel/locking/
H A Drtmutex-debug.c58 void rt_mutex_debug_task_free(struct task_struct *task) rt_mutex_debug_task_free() argument
60 DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters)); rt_mutex_debug_task_free()
61 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); rt_mutex_debug_task_free()
73 struct task_struct *task; debug_rt_mutex_deadlock() local
78 task = rt_mutex_owner(act_waiter->lock); debug_rt_mutex_deadlock()
79 if (task && task != current) { debug_rt_mutex_deadlock()
80 act_waiter->deadlock_task_pid = get_pid(task_pid(task)); debug_rt_mutex_deadlock()
87 struct task_struct *task; debug_rt_mutex_print_deadlock() local
93 task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); debug_rt_mutex_print_deadlock()
94 if (!task) { debug_rt_mutex_print_deadlock()
108 printk("%s/%d is deadlocking current task %s/%d\n\n", debug_rt_mutex_print_deadlock()
109 task->comm, task_pid_nr(task), debug_rt_mutex_print_deadlock()
117 task->comm, task_pid_nr(task)); debug_rt_mutex_print_deadlock()
121 debug_show_held_locks(task); debug_rt_mutex_print_deadlock()
124 task->comm, task_pid_nr(task)); debug_rt_mutex_print_deadlock()
125 show_stack(task, NULL); debug_rt_mutex_print_deadlock()
177 rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) rt_mutex_deadlock_account_lock() argument
181 void rt_mutex_deadlock_account_unlock(struct task_struct *task) rt_mutex_deadlock_account_unlock() argument
H A Drtmutex.c161 return (left->task->dl.deadline < right->task->dl.deadline); rt_mutex_waiter_less()
206 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_enqueue_pi() argument
208 struct rb_node **link = &task->pi_waiters.rb_node; rt_mutex_enqueue_pi()
225 task->pi_waiters_leftmost = &waiter->pi_tree_entry; rt_mutex_enqueue_pi()
228 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); rt_mutex_enqueue_pi()
232 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_dequeue_pi() argument
237 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) rt_mutex_dequeue_pi()
238 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); rt_mutex_dequeue_pi()
240 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); rt_mutex_dequeue_pi()
245 * Calculate task priority from the waiter tree priority
247 * Return task->normal_prio when the waiter tree is empty or when
250 int rt_mutex_getprio(struct task_struct *task) rt_mutex_getprio() argument
252 if (likely(!task_has_pi_waiters(task))) rt_mutex_getprio()
253 return task->normal_prio; rt_mutex_getprio()
255 return min(task_top_pi_waiter(task)->prio, rt_mutex_getprio()
256 task->normal_prio); rt_mutex_getprio()
259 struct task_struct *rt_mutex_get_top_task(struct task_struct *task) rt_mutex_get_top_task() argument
261 if (likely(!task_has_pi_waiters(task))) rt_mutex_get_top_task()
264 return task_top_pi_waiter(task)->task; rt_mutex_get_top_task()
271 int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) rt_mutex_get_effective_prio() argument
273 if (!task_has_pi_waiters(task)) rt_mutex_get_effective_prio()
276 if (task_top_pi_waiter(task)->task->prio <= newprio) rt_mutex_get_effective_prio()
277 return task_top_pi_waiter(task)->task->prio; rt_mutex_get_effective_prio()
282 * Adjust the priority of a task, after its pi_waiters got modified.
284 * This can be both boosting and unboosting. task->pi_lock must be held.
286 static void __rt_mutex_adjust_prio(struct task_struct *task) __rt_mutex_adjust_prio() argument
288 int prio = rt_mutex_getprio(task); __rt_mutex_adjust_prio()
290 if (task->prio != prio || dl_prio(prio)) __rt_mutex_adjust_prio()
291 rt_mutex_setprio(task, prio); __rt_mutex_adjust_prio()
295 * Adjust task priority (undo boosting). Called from the exit path of
300 * of task. We do not use the spin_xx_mutex() variants here as we are
303 static void rt_mutex_adjust_prio(struct task_struct *task) rt_mutex_adjust_prio() argument
307 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_prio()
308 __rt_mutex_adjust_prio(task); rt_mutex_adjust_prio()
309 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio()
350 * Decreases task's usage by one - may thus free the task.
352 * @task: the task owning the mutex (owner) for which a chain walk is
356 * things for a task that has just got its priority adjusted, and
361 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
371 * [R] refcount on task
372 * [P] task->pi_lock held
377 * @task [R]
389 * [1] lock(task->pi_lock); [R] acquire [P]
390 * [2] waiter = task->pi_blocked_on; [P]
394 * unlock(task->pi_lock); release [P]
399 * [8] unlock(task->pi_lock); release [P]
400 * put_task_struct(task); release [R]
402 * [10] task = owner(lock); [L]
403 * get_task_struct(task); [L] acquire [R]
404 * lock(task->pi_lock); [L] acquire [P]
407 * [13] unlock(task->pi_lock); release [P]
411 static int rt_mutex_adjust_prio_chain(struct task_struct *task, rt_mutex_adjust_prio_chain() argument
448 "task: %s (%d)\n", max_lock_depth, rt_mutex_adjust_prio_chain()
451 put_task_struct(task); rt_mutex_adjust_prio_chain()
458 * @task. So everything can have changed under us since the rt_mutex_adjust_prio_chain()
466 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
469 * [2] Get the waiter on which @task is blocked on. rt_mutex_adjust_prio_chain()
471 waiter = task->pi_blocked_on; rt_mutex_adjust_prio_chain()
474 * [3] check_exit_conditions_1() protected by task->pi_lock. rt_mutex_adjust_prio_chain()
493 * We dropped all locks after taking a refcount on @task, so rt_mutex_adjust_prio_chain()
494 * the task might have moved on in the lock chain or even left rt_mutex_adjust_prio_chain()
498 * We stored the lock on which @task was blocked in @next_lock, rt_mutex_adjust_prio_chain()
505 * Drop out, when the task has no waiters. Note, rt_mutex_adjust_prio_chain()
510 if (!task_has_pi_waiters(task)) rt_mutex_adjust_prio_chain()
514 * are not the top pi waiter of the task. If deadlock rt_mutex_adjust_prio_chain()
518 if (top_waiter != task_top_pi_waiter(task)) { rt_mutex_adjust_prio_chain()
527 * If the waiter priority is the same as the task priority rt_mutex_adjust_prio_chain()
533 if (waiter->prio == task->prio) { rt_mutex_adjust_prio_chain()
545 * [5] We need to trylock here as we are holding task->pi_lock, rt_mutex_adjust_prio_chain()
550 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
556 * [6] check_exit_conditions_2() protected by task->pi_lock and rt_mutex_adjust_prio_chain()
561 * current lock is owned by the task which initiated the chain rt_mutex_adjust_prio_chain()
579 * No requeue[7] here. Just release @task [8] rt_mutex_adjust_prio_chain()
581 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
582 put_task_struct(task); rt_mutex_adjust_prio_chain()
593 /* [10] Grab the next task, i.e. owner of @lock */ rt_mutex_adjust_prio_chain()
594 task = rt_mutex_owner(lock); rt_mutex_adjust_prio_chain()
595 get_task_struct(task); rt_mutex_adjust_prio_chain()
596 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
604 next_lock = task_blocked_on_lock(task); rt_mutex_adjust_prio_chain()
611 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
629 waiter->prio = task->prio; rt_mutex_adjust_prio_chain()
632 /* [8] Release the task */ rt_mutex_adjust_prio_chain()
633 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
634 put_task_struct(task); rt_mutex_adjust_prio_chain()
650 wake_up_process(rt_mutex_top_waiter(lock)->task); rt_mutex_adjust_prio_chain()
655 /* [10] Grab the next task, i.e. the owner of @lock */ rt_mutex_adjust_prio_chain()
656 task = rt_mutex_owner(lock); rt_mutex_adjust_prio_chain()
657 get_task_struct(task); rt_mutex_adjust_prio_chain()
658 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
668 rt_mutex_dequeue_pi(task, prerequeue_top_waiter); rt_mutex_adjust_prio_chain()
669 rt_mutex_enqueue_pi(task, waiter); rt_mutex_adjust_prio_chain()
670 __rt_mutex_adjust_prio(task); rt_mutex_adjust_prio_chain()
683 rt_mutex_dequeue_pi(task, waiter); rt_mutex_adjust_prio_chain()
685 rt_mutex_enqueue_pi(task, waiter); rt_mutex_adjust_prio_chain()
686 __rt_mutex_adjust_prio(task); rt_mutex_adjust_prio_chain()
695 * [12] check_exit_conditions_4() protected by task->pi_lock rt_mutex_adjust_prio_chain()
699 * Check whether the task which owns the current lock is pi rt_mutex_adjust_prio_chain()
702 * task->pi_lock next_lock cannot be dereferenced anymore. rt_mutex_adjust_prio_chain()
704 next_lock = task_blocked_on_lock(task); rt_mutex_adjust_prio_chain()
712 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
736 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain()
738 put_task_struct(task); rt_mutex_adjust_prio_chain()
749 * @task: The task which wants to acquire the lock
753 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, try_to_take_rt_mutex() argument
771 * - @task acquires the lock and there are no other try_to_take_rt_mutex()
772 * waiters. This is undone in rt_mutex_set_owner(@task) at try_to_take_rt_mutex()
784 * If @waiter != NULL, @task has already enqueued the waiter try_to_take_rt_mutex()
804 * If the lock has waiters already we check whether @task is try_to_take_rt_mutex()
807 * If there are no other waiters, @task can acquire try_to_take_rt_mutex()
808 * the lock. @task->pi_blocked_on is NULL, so it does try_to_take_rt_mutex()
813 * If @task->prio is greater than or equal to try_to_take_rt_mutex()
815 * @task lost. try_to_take_rt_mutex()
817 if (task->prio >= rt_mutex_top_waiter(lock)->prio) try_to_take_rt_mutex()
828 * pi_lock dance.@task->pi_blocked_on is NULL try_to_take_rt_mutex()
829 * and we have no waiters to enqueue in @task try_to_take_rt_mutex()
837 * Clear @task->pi_blocked_on. Requires protection by try_to_take_rt_mutex()
838 * @task->pi_lock. Redundant operation for the @waiter == NULL try_to_take_rt_mutex()
842 raw_spin_lock_irqsave(&task->pi_lock, flags); try_to_take_rt_mutex()
843 task->pi_blocked_on = NULL; try_to_take_rt_mutex()
845 * Finish the lock acquisition. @task is the new owner. If try_to_take_rt_mutex()
847 * waiter into @task->pi_waiters list. try_to_take_rt_mutex()
850 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); try_to_take_rt_mutex()
851 raw_spin_unlock_irqrestore(&task->pi_lock, flags); try_to_take_rt_mutex()
861 rt_mutex_set_owner(lock, task); try_to_take_rt_mutex()
863 rt_mutex_deadlock_account_lock(lock, task); try_to_take_rt_mutex()
877 struct task_struct *task, task_blocks_on_rt_mutex()
887 * Early deadlock detection. We really don't want the task to task_blocks_on_rt_mutex()
895 if (owner == task) task_blocks_on_rt_mutex()
898 raw_spin_lock_irqsave(&task->pi_lock, flags); task_blocks_on_rt_mutex()
899 __rt_mutex_adjust_prio(task); task_blocks_on_rt_mutex()
900 waiter->task = task; task_blocks_on_rt_mutex()
902 waiter->prio = task->prio; task_blocks_on_rt_mutex()
909 task->pi_blocked_on = waiter; task_blocks_on_rt_mutex()
911 raw_spin_unlock_irqrestore(&task->pi_lock, flags); task_blocks_on_rt_mutex()
950 next_lock, waiter, task); task_blocks_on_rt_mutex()
987 * slow path making sure no task of lower priority than wakeup_next_waiter()
996 * long as we hold lock->wait_lock. The waiter task needs to wakeup_next_waiter()
999 wake_up_process(waiter->task); wakeup_next_waiter()
1043 * Don't walk the chain, if the owner task is not blocked remove_waiter()
1065 void rt_mutex_adjust_pi(struct task_struct *task) rt_mutex_adjust_pi() argument
1071 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_pi()
1073 waiter = task->pi_blocked_on; rt_mutex_adjust_pi()
1074 if (!waiter || (waiter->prio == task->prio && rt_mutex_adjust_pi()
1075 !dl_prio(task->prio))) { rt_mutex_adjust_pi()
1076 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_pi()
1080 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_pi()
1083 get_task_struct(task); rt_mutex_adjust_pi()
1085 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, rt_mutex_adjust_pi()
1086 next_lock, NULL, task); rt_mutex_adjust_pi()
1092 * @state: the state the task should block in (TASK_INTERRUPTIBLE
1119 if (timeout && !timeout->task) __rt_mutex_slowlock()
1150 * Yell lowdly and stop the task right here. rt_mutex_handle_deadlock()
1188 timeout->task = NULL; rt_mutex_slowlock()
1508 * @proxy_owner:the task to set as owner
1539 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1542 * @task: the task to prepare
1545 * 0 - task blocked on lock
1546 * 1 - acquired the lock for task, caller should wake it up
1553 struct task_struct *task) rt_mutex_start_proxy_lock()
1559 if (try_to_take_rt_mutex(lock, task, NULL)) { rt_mutex_start_proxy_lock()
1565 ret = task_blocks_on_rt_mutex(lock, waiter, task, rt_mutex_start_proxy_lock()
1605 return rt_mutex_top_waiter(lock)->task; rt_mutex_next_owner()
875 task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, enum rtmutex_chainwalk chwalk) task_blocks_on_rt_mutex() argument
1551 rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) rt_mutex_start_proxy_lock() argument
H A Dmutex-debug.c57 ti->task->blocked_on = waiter; debug_mutex_add_waiter()
64 DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); mutex_remove_waiter()
65 DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); mutex_remove_waiter()
66 ti->task->blocked_on = NULL; mutex_remove_waiter()
69 waiter->task = NULL; mutex_remove_waiter()
H A Drtmutex_common.h41 * which is allocated on the kernel stack on of the blocked task.
45 * @task: task reference to the blocked task
50 struct task_struct *task; member in struct:rt_mutex_waiter
129 struct task_struct *task);
H A Drtmutex-debug.h13 rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
14 extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
/linux-4.1.27/drivers/scsi/isci/
H A DMakefile4 host.o task.o probe_roms.o \
H A Dtask.c64 #include "task.h"
71 * @task: request to complete
72 * @response: response code for the completed task.
73 * @status: status code for the completed task.
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, isci_task_refuse() argument
84 dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", isci_task_refuse()
85 __func__, task, response, status); isci_task_refuse()
87 spin_lock_irqsave(&task->task_state_lock, flags); isci_task_refuse()
89 task->task_status.resp = response; isci_task_refuse()
90 task->task_status.stat = status; isci_task_refuse()
93 task->task_state_flags |= SAS_TASK_STATE_DONE; isci_task_refuse()
94 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | isci_task_refuse()
96 task->lldd_task = NULL; isci_task_refuse()
97 spin_unlock_irqrestore(&task->task_state_lock, flags); isci_task_refuse()
99 task->task_done(task); isci_task_refuse()
102 #define for_each_sas_task(num, task) \
104 task = list_entry(task->list.next, struct sas_task, list))
108 struct sas_task *task) isci_device_io_ready()
112 isci_task_is_ncq_recovery(task)) isci_device_io_ready()
117 * functions. This function is called by libsas to send a task down to
119 * @task: This parameter specifies the SAS task to send.
124 int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags) isci_task_execute_task() argument
126 struct isci_host *ihost = dev_to_ihost(task->dev); isci_task_execute_task()
134 idev = isci_lookup_device(task->dev); isci_task_execute_task()
135 io_ready = isci_device_io_ready(idev, task); isci_task_execute_task()
140 "task: %p, dev: %p idev: %p:%#lx cmd = %p\n", isci_task_execute_task()
141 task, task->dev, idev, idev ? idev->flags : 0, isci_task_execute_task()
142 task->uldd_task); isci_task_execute_task()
145 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, isci_task_execute_task()
151 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, isci_task_execute_task()
155 spin_lock_irqsave(&task->task_state_lock, flags); isci_task_execute_task()
157 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { isci_task_execute_task()
159 spin_unlock_irqrestore(&task->task_state_lock, flags); isci_task_execute_task()
161 isci_task_refuse(ihost, task, isci_task_execute_task()
165 task->task_state_flags |= SAS_TASK_AT_INITIATOR; isci_task_execute_task()
166 spin_unlock_irqrestore(&task->task_state_lock, flags); isci_task_execute_task()
169 status = isci_request_execute(ihost, idev, task, tag); isci_task_execute_task()
172 spin_lock_irqsave(&task->task_state_lock, flags); isci_task_execute_task()
174 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; isci_task_execute_task()
175 spin_unlock_irqrestore(&task->task_state_lock, flags); isci_task_execute_task()
181 isci_task_refuse(ihost, task, isci_task_execute_task()
193 isci_task_refuse(ihost, task, isci_task_execute_task()
245 /* XXX convert to get this from task->tproto like other drivers */ isci_task_request_build()
447 /* Send the task management part of the reset. */ isci_task_lu_reset()
473 * functions. This function is called by libsas to abort a specified task.
474 * @task: This parameter specifies the SAS task to abort.
478 int isci_task_abort_task(struct sas_task *task) isci_task_abort_task() argument
480 struct isci_host *ihost = dev_to_ihost(task->dev); isci_task_abort_task()
489 /* Get the isci_request reference from the task. Note that isci_task_abort_task()
495 spin_lock(&task->task_state_lock); isci_task_abort_task()
497 old_request = task->lldd_task; isci_task_abort_task()
499 /* If task is already done, the request isn't valid */ isci_task_abort_task()
500 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && isci_task_abort_task()
501 (task->task_state_flags & SAS_TASK_AT_INITIATOR) && isci_task_abort_task()
503 idev = isci_get_device(task->dev->lldd_dev); isci_task_abort_task()
507 spin_unlock(&task->task_state_lock); isci_task_abort_task()
511 "%s: dev = %p (%s%s), task = %p, old_request == %p\n", isci_task_abort_task()
513 (dev_is_sata(task->dev) ? "STP/SATA" isci_task_abort_task()
514 : ((dev_is_expander(task->dev)) isci_task_abort_task()
521 task, old_request); isci_task_abort_task()
529 * is nothing to do here other than to set the task isci_task_abort_task()
530 * done bit, and indicate that the task abort function isci_task_abort_task()
533 spin_lock_irqsave(&task->task_state_lock, flags); isci_task_abort_task()
534 task->task_state_flags |= SAS_TASK_STATE_DONE; isci_task_abort_task()
535 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | isci_task_abort_task()
537 spin_unlock_irqrestore(&task->task_state_lock, flags); isci_task_abort_task()
542 "%s: abort task not needed for %p\n", isci_task_abort_task()
543 __func__, task); isci_task_abort_task()
551 "req=%p, task=%p) failed\n", isci_task_abort_task()
552 __func__, idev, old_request, task); isci_task_abort_task()
558 if (task->task_proto == SAS_PROTOCOL_SMP || isci_task_abort_task()
559 sas_protocol_ata(task->task_proto) || isci_task_abort_task()
565 /* No task to send, so explicitly resume the device here */ isci_task_abort_task()
573 ((task->task_proto == SAS_PROTOCOL_SMP) isci_task_abort_task()
575 : (sas_protocol_ata(task->task_proto) isci_task_abort_task()
583 spin_lock_irqsave(&task->task_state_lock, flags); isci_task_abort_task()
584 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | isci_task_abort_task()
586 task->task_state_flags |= SAS_TASK_STATE_DONE; isci_task_abort_task()
587 spin_unlock_irqrestore(&task->task_state_lock, flags); isci_task_abort_task()
597 /* Send the task management request. */ isci_task_abort_task()
604 "%s: Done; dev = %p, task = %p , old_request == %p\n", isci_task_abort_task()
605 __func__, idev, task, old_request); isci_task_abort_task()
613 * to abort all task for the given lun.
667 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
670 * @task: This parameter specifies the sas task being queried.
676 struct sas_task *task) isci_task_query_task()
679 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) isci_task_query_task()
687 * an task request completes.
721 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ isci_task_request_complete()
736 /* The task management part completes last. */ isci_task_request_complete()
772 /* Explicitly resume the RNC here, since there was no task sent. */ isci_reset_device()
107 isci_device_io_ready(struct isci_remote_device *idev, struct sas_task *task) isci_device_io_ready() argument
675 isci_task_query_task( struct sas_task *task) isci_task_query_task() argument
H A Drequest.c58 #include "task.h"
116 struct sas_task *task = isci_request_access_task(ireq); sci_request_build_sgl() local
123 if (task->num_scatter > 0) { sci_request_build_sgl()
124 sg = task->scatter; sci_request_build_sgl()
154 task->scatter, sci_request_build_sgl()
155 task->total_xfer_len, sci_request_build_sgl()
156 task->data_dir); sci_request_build_sgl()
160 scu_sg->A.length = task->total_xfer_len; sci_request_build_sgl()
174 struct sas_task *task = isci_request_access_task(ireq); sci_io_request_build_ssp_command_iu() local
178 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); sci_io_request_build_ssp_command_iu()
183 cmd_iu->task_prio = task->ssp_task.task_prio; sci_io_request_build_ssp_command_iu()
184 cmd_iu->task_attr = task->ssp_task.task_attr; sci_io_request_build_ssp_command_iu()
187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, sci_io_request_build_ssp_command_iu()
188 (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); sci_io_request_build_ssp_command_iu()
194 struct sas_task *task = isci_request_access_task(ireq); sci_task_request_build_ssp_task_iu() local
201 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); sci_task_request_build_ssp_task_iu()
461 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
462 * ahead of other task destined for the same Remote Node. -# task_type ==
464 * (i.e. non-raw frame) is being utilized to perform task management. -#
466 * that the bytes are transmitted in the right order for a task frame.
467 * @sci_req: This parameter specifies the task request object being
490 * constructing the SCU task context.
491 * @task_context: The buffer pointer for the SCU task context which is being
495 * the command buffer is complete. none Revisit task context construction to
496 * determine what is common for SSP/SMP/STP task context structures.
615 /* Build the STP task context structure */ sci_stp_optimized_request_construct()
626 * The difference between the DMA IN and DMA OUT request task type sci_stp_optimized_request_construct()
628 * and FPDMA WRITE values. Add the supplied task type parameter sci_stp_optimized_request_construct()
629 * to this difference to set the task type properly for this sci_stp_optimized_request_construct()
636 * optimized task type. */ sci_stp_optimized_request_construct()
644 struct sas_task *task; sci_atapi_construct() local
655 task = isci_request_access_task(ireq); sci_atapi_construct()
656 if (task->data_dir == DMA_NONE) sci_atapi_construct()
657 task->total_xfer_len = 0; sci_atapi_construct()
672 struct sas_task *task = isci_request_access_task(ireq); sci_io_request_construct_sata() local
687 if (!sas_protocol_ata(task->task_proto)) { sci_io_request_construct_sata()
691 task->task_proto); sci_io_request_construct_sata()
698 task->ata_task.fis.command == ATA_CMD_PACKET) { sci_io_request_construct_sata()
704 if (task->data_dir == DMA_NONE) { sci_io_request_construct_sata()
710 if (task->ata_task.use_ncq) { sci_io_request_construct_sata()
718 if (task->ata_task.dma_xfer) { sci_io_request_construct_sata()
731 struct sas_task *task = isci_request_access_task(ireq); sci_io_request_construct_basic_ssp() local
736 task->data_dir, sci_io_request_construct_basic_ssp()
737 task->total_xfer_len); sci_io_request_construct_basic_ssp()
764 struct sas_task *task = isci_request_access_task(ireq); sci_io_request_construct_basic_sata() local
768 copy = (task->data_dir == DMA_NONE) ? false : true; sci_io_request_construct_basic_sata()
771 task->total_xfer_len, sci_io_request_construct_basic_sata()
772 task->data_dir, sci_io_request_construct_basic_sata()
797 * = start of task context SRAM + offset of (type.ssp.data_offset) sci_req_tx_bytes()
890 /* The task frame was already confirmed to have been sci_io_request_terminate()
892 * now only waiting for the task response itself, sci_io_request_terminate()
894 * and don't wait for the task response. sci_io_request_terminate()
1147 /* Unless we get some strange error wait for the task abort to complete request_aborting_state_tc_event()
1166 /* Currently, the decision is to simply allow the task request ssp_task_request_await_tc_event()
1167 * to timeout if the task IU wasn't received successfully. ssp_task_request_await_tc_event()
1168 * There is a potential for receiving multiple task responses if ssp_task_request_await_tc_event()
1169 * we decide to send the task IU again. ssp_task_request_await_tc_event()
1414 struct sas_task *task; sci_stp_request_pio_data_in_copy_data_buffer() local
1420 task = isci_request_access_task(ireq); sci_stp_request_pio_data_in_copy_data_buffer()
1423 if (task->num_scatter > 0) { sci_stp_request_pio_data_in_copy_data_buffer()
1424 sg = task->scatter; sci_stp_request_pio_data_in_copy_data_buffer()
1438 BUG_ON(task->total_xfer_len < total_len); sci_stp_request_pio_data_in_copy_data_buffer()
1439 memcpy(task->scatter, src_addr, total_len); sci_stp_request_pio_data_in_copy_data_buffer()
1622 struct sas_task *task = isci_request_access_task(ireq); atapi_d2h_reg_frame_handler() local
1643 if (task->data_dir == DMA_NONE) atapi_d2h_reg_frame_handler()
1669 struct sas_task *task = isci_request_access_task(ireq); scu_atapi_construct_task_context() local
1676 if (task->data_dir == DMA_TO_DEVICE) { scu_atapi_construct_task_context()
1689 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); scu_atapi_construct_task_context()
1692 /* task phase is set to TX_CMD */ scu_atapi_construct_task_context()
1699 task_context->transfer_length_bytes = task->total_xfer_len; scu_atapi_construct_task_context()
1772 struct sas_task *task = isci_request_access_task(ireq); sci_io_request_frame_handler() local
1773 struct scatterlist *sg = &task->smp_task.smp_resp; sci_io_request_frame_handler()
1895 struct sas_task *task = isci_request_access_task(ireq); sci_io_request_frame_handler() local
1939 if (task->data_dir == DMA_FROM_DEVICE) { sci_io_request_frame_handler()
1941 } else if (task->data_dir == DMA_TO_DEVICE) { sci_io_request_frame_handler()
2065 struct sas_task *task = isci_request_access_task(ireq); sci_io_request_frame_handler() local
2069 if (task->data_dir == DMA_NONE) { sci_io_request_frame_handler()
2459 * response iu, in the task struct, from the request object for the upper
2461 * @sas_task: This parameter is the task struct from the upper layer driver.
2468 struct sas_task *task, isci_request_process_response_iu()
2484 task->task_status.stat = resp_iu->status; isci_request_process_response_iu()
2486 /* libsas updates the task status fields based on the response iu. */ isci_request_process_response_iu()
2487 sas_ssp_task_response(dev, task, resp_iu); isci_request_process_response_iu()
2503 struct sas_task *task, isci_request_set_open_reject_status()
2512 task->task_status.open_rej_reason = open_rej_reason; isci_request_set_open_reject_status()
2527 struct sas_task *task, isci_request_handle_controller_specific_errors()
2542 * the target may still have a task outstanding that isci_request_handle_controller_specific_errors()
2556 if (task->task_proto == SAS_PROTOCOL_SMP) { isci_request_handle_controller_specific_errors()
2593 * has completed the task, so that no cleanup isci_request_handle_controller_specific_errors()
2617 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2627 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2634 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2641 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2648 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2655 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2662 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2669 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2676 request, task, response_ptr, status_ptr, isci_request_handle_controller_specific_errors()
2709 if (task->task_proto == SAS_PROTOCOL_SMP) isci_request_handle_controller_specific_errors()
2717 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) isci_process_stp_response() argument
2719 struct task_status_struct *ts = &task->task_status; isci_process_stp_response()
2739 struct sas_task *task = isci_request_access_task(request); isci_request_io_request_complete() local
2747 "%s: request = %p, task = %p, " isci_request_io_request_complete()
2748 "task->data_dir = %d completion_status = 0x%x\n", isci_request_io_request_complete()
2749 __func__, request, task, task->data_dir, completion_status); isci_request_io_request_complete()
2759 __func__, request, task); isci_request_io_request_complete()
2761 if (sas_protocol_ata(task->task_proto)) { isci_request_io_request_complete()
2762 isci_process_stp_response(task, &request->stp.rsp); isci_request_io_request_complete()
2763 } else if (SAS_PROTOCOL_SSP == task->task_proto) { isci_request_io_request_complete()
2767 isci_request_process_response_iu(task, resp_iu, isci_request_io_request_complete()
2770 } else if (SAS_PROTOCOL_SMP == task->task_proto) { isci_request_io_request_complete()
2781 /* use the task status set in the task struct by the isci_request_io_request_complete()
2785 response = task->task_status.resp; isci_request_io_request_complete()
2786 status = task->task_status.stat; isci_request_io_request_complete()
2804 task->task_status.residual isci_request_io_request_complete()
2805 = task->total_xfer_len - transferred_length; isci_request_io_request_complete()
2810 if (task->task_status.residual != 0) isci_request_io_request_complete()
2826 __func__, request, task); isci_request_io_request_complete()
2845 task, &response, isci_request_io_request_complete()
2857 spin_lock_irqsave(&task->task_state_lock, task_flags); isci_request_io_request_complete()
2858 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; isci_request_io_request_complete()
2859 spin_unlock_irqrestore(&task->task_state_lock, task_flags); isci_request_io_request_complete()
2899 if (SAS_PROTOCOL_SMP == task->task_proto) isci_request_io_request_complete()
2906 switch (task->task_proto) { isci_request_io_request_complete()
2908 if (task->data_dir == DMA_NONE) isci_request_io_request_complete()
2910 if (task->num_scatter == 0) isci_request_io_request_complete()
2914 task->total_xfer_len, task->data_dir); isci_request_io_request_complete()
2916 dma_unmap_sg(&ihost->pdev->dev, task->scatter, isci_request_io_request_complete()
2917 request->num_sg_entries, task->data_dir); isci_request_io_request_complete()
2920 struct scatterlist *sg = &task->smp_task.smp_req; isci_request_io_request_complete()
2937 spin_lock_irqsave(&task->task_state_lock, task_flags); isci_request_io_request_complete()
2939 task->task_status.resp = response; isci_request_io_request_complete()
2940 task->task_status.stat = status; isci_request_io_request_complete()
2944 task->task_state_flags |= SAS_TASK_STATE_DONE; isci_request_io_request_complete()
2945 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | isci_request_io_request_complete()
2948 spin_unlock_irqrestore(&task->task_state_lock, task_flags); isci_request_io_request_complete()
2955 * task to recognize the already completed case. isci_request_io_request_complete()
2959 ireq_done(ihost, request, task); isci_request_io_request_complete()
2967 struct sas_task *task; sci_request_started_state_enter() local
2972 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); sci_request_started_state_enter()
2977 if (!task && dev->dev_type == SAS_END_DEVICE) { sci_request_started_state_enter()
2979 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { sci_request_started_state_enter()
2981 } else if (task && sas_protocol_ata(task->task_proto) && sci_request_started_state_enter()
2982 !task->ata_task.use_ncq) { sci_request_started_state_enter()
2984 task->ata_task.fis.command == ATA_CMD_PACKET) { sci_request_started_state_enter()
2986 } else if (task->data_dir == DMA_NONE) { sci_request_started_state_enter()
2988 } else if (task->ata_task.dma_xfer) { sci_request_started_state_enter()
3154 struct sas_task *task = isci_request_access_task(ireq); isci_request_stp_request_construct() local
3156 struct ata_queued_cmd *qc = task->uldd_task; isci_request_stp_request_construct()
3164 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); isci_request_stp_request_construct()
3165 if (!task->ata_task.device_control_reg_update) isci_request_stp_request_construct()
3183 struct sas_task *task) sci_io_request_construct_smp()
3185 struct scatterlist *sg = &task->smp_task.smp_req; sci_io_request_construct_smp()
3313 struct sas_task *task = isci_request_access_task(ireq); isci_smp_request_build() local
3317 status = sci_io_request_construct_smp(dev, ireq, task); isci_smp_request_build()
3342 struct sas_task *task = isci_request_access_task(request); isci_io_request_build() local
3350 task->num_scatter); isci_io_request_build()
3356 if (task->num_scatter && isci_io_request_build()
3357 !sas_protocol_ata(task->task_proto) && isci_io_request_build()
3358 !(SAS_PROTOCOL_SMP & task->task_proto)) { isci_io_request_build()
3362 task->scatter, isci_io_request_build()
3363 task->num_scatter, isci_io_request_build()
3364 task->data_dir isci_io_request_build()
3380 switch (task->task_proto) { isci_io_request_build()
3415 struct sas_task *task, isci_io_request_from_tag()
3421 ireq->ttype_ptr.io_task_ptr = task; isci_io_request_from_tag()
3423 task->lldd_task = ireq; isci_io_request_from_tag()
3442 struct sas_task *task, u16 tag) isci_request_execute()
3450 ireq = isci_io_request_from_tag(ihost, task, tag); isci_request_execute()
3465 if (isci_task_is_ncq_recovery(task)) { isci_request_execute()
3468 * request on the task side. Note that it will isci_request_execute()
3511 spin_lock_irqsave(&task->task_state_lock, flags); isci_request_execute()
3512 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; isci_request_execute()
3513 spin_unlock_irqrestore(&task->task_state_lock, flags); isci_request_execute()
3515 /* Cause this task to be scheduled in the SCSI error isci_request_execute()
3518 sas_task_abort(task); isci_request_execute()
2467 isci_request_process_response_iu( struct sas_task *task, struct ssp_response_iu *resp_iu, struct device *dev) isci_request_process_response_iu() argument
2501 isci_request_set_open_reject_status( struct isci_request *request, struct sas_task *task, enum service_response *response_ptr, enum exec_status *status_ptr, enum sas_open_rej_reason open_rej_reason) isci_request_set_open_reject_status() argument
2524 isci_request_handle_controller_specific_errors( struct isci_remote_device *idev, struct isci_request *request, struct sas_task *task, enum service_response *response_ptr, enum exec_status *status_ptr) isci_request_handle_controller_specific_errors() argument
3181 sci_io_request_construct_smp(struct device *dev, struct isci_request *ireq, struct sas_task *task) sci_io_request_construct_smp() argument
3414 isci_io_request_from_tag(struct isci_host *ihost, struct sas_task *task, u16 tag) isci_io_request_from_tag() argument
3441 isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, struct sas_task *task, u16 tag) isci_request_execute() argument
/linux-4.1.27/drivers/scsi/pm8001/
H A Dpm8001_sas.c45 * pm8001_find_tag - from sas task to find out tag that belongs to this task
46 * @task: the task sent to the LLDD
47 * @tag: the found tag associated with the task
49 static int pm8001_find_tag(struct sas_task *task, u32 *tag) pm8001_find_tag() argument
51 if (task->lldd_task) { pm8001_find_tag()
53 ccb = task->lldd_task; pm8001_find_tag()
63 * @tag: the found tag associated with the task
72 * pm8001_tag_alloc - allocate a empty tag for task used.
268 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
270 * @ccb: the ccb which attached to smp task
278 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) pm8001_get_ncq_tag() argument
280 struct ata_queued_cmd *qc = task->uldd_task; pm8001_get_ncq_tag()
292 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
294 * @ccb: the ccb which attached to sata task
303 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
306 * @tmf: the task management IU
315 * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
317 * @ccb: the ccb which attached to ssp task
343 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
344 * @task: the task to be execute.
345 * @num: if can_queue great than 1, the task can be queued up. for SMP task,
348 * @is_tmf: if it is task management task.
349 * @tmf: the task management IU
353 static int pm8001_task_exec(struct sas_task *task, pm8001_task_exec() argument
356 struct domain_device *dev = task->dev; pm8001_task_exec()
360 struct sas_task *t = task; pm8001_task_exec()
373 pm8001_ha = pm8001_find_ha_by_dev(task->dev); pm8001_task_exec()
421 ccb->task = t; pm8001_task_exec()
476 * @task: the task to be execute.
479 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) pm8001_queue_command() argument
481 return pm8001_task_exec(task, gfp_flags, 0, NULL); pm8001_queue_command()
487 * @ccb: the ccb which attached to ssp task
488 * @task: the task to be free.
492 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) pm8001_ccb_task_free()
494 if (!ccb->task) pm8001_ccb_task_free()
496 if (!sas_protocol_ata(task->task_proto)) pm8001_ccb_task_free()
498 dma_unmap_sg(pm8001_ha->dev, task->scatter, pm8001_ccb_task_free()
499 task->num_scatter, task->data_dir); pm8001_ccb_task_free()
501 switch (task->task_proto) { pm8001_ccb_task_free()
503 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, pm8001_ccb_task_free()
505 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, pm8001_ccb_task_free()
516 task->lldd_task = NULL; pm8001_ccb_task_free()
517 ccb->task = NULL; pm8001_ccb_task_free()
649 void pm8001_task_done(struct sas_task *task) pm8001_task_done() argument
651 if (!del_timer(&task->slow_task->timer)) pm8001_task_done()
653 complete(&task->slow_task->completion); pm8001_task_done()
658 struct sas_task *task = (struct sas_task *)data; pm8001_tmf_timedout() local
660 task->task_state_flags |= SAS_TASK_STATE_ABORTED; pm8001_tmf_timedout()
661 complete(&task->slow_task->completion); pm8001_tmf_timedout()
666 * pm8001_exec_internal_tmf_task - execute some task management commands.
668 * @tmf: which task management wanted to be take.
670 * @parameter: ssp task parameter.
673 * abort the issued task which result in this execption, it is done by calling
674 * this function, note it is also with the task execute interface.
680 struct sas_task *task = NULL; pm8001_exec_internal_tmf_task() local
686 task = sas_alloc_slow_task(GFP_KERNEL); pm8001_exec_internal_tmf_task()
687 if (!task) pm8001_exec_internal_tmf_task()
690 task->dev = dev; pm8001_exec_internal_tmf_task()
691 task->task_proto = dev->tproto; pm8001_exec_internal_tmf_task()
692 memcpy(&task->ssp_task, parameter, para_len); pm8001_exec_internal_tmf_task()
693 task->task_done = pm8001_task_done; pm8001_exec_internal_tmf_task()
694 task->slow_task->timer.data = (unsigned long)task; pm8001_exec_internal_tmf_task()
695 task->slow_task->timer.function = pm8001_tmf_timedout; pm8001_exec_internal_tmf_task()
696 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; pm8001_exec_internal_tmf_task()
697 add_timer(&task->slow_task->timer); pm8001_exec_internal_tmf_task()
699 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf); pm8001_exec_internal_tmf_task()
702 del_timer(&task->slow_task->timer); pm8001_exec_internal_tmf_task()
704 pm8001_printk("Executing internal task " pm8001_exec_internal_tmf_task()
708 wait_for_completion(&task->slow_task->completion); pm8001_exec_internal_tmf_task()
717 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { pm8001_exec_internal_tmf_task()
718 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { pm8001_exec_internal_tmf_task()
720 pm8001_printk("TMF task[%x]timeout.\n", pm8001_exec_internal_tmf_task()
726 if (task->task_status.resp == SAS_TASK_COMPLETE && pm8001_exec_internal_tmf_task()
727 task->task_status.stat == SAM_STAT_GOOD) { pm8001_exec_internal_tmf_task()
732 if (task->task_status.resp == SAS_TASK_COMPLETE && pm8001_exec_internal_tmf_task()
733 task->task_status.stat == SAS_DATA_UNDERRUN) { pm8001_exec_internal_tmf_task()
736 res = task->task_status.residual; pm8001_exec_internal_tmf_task()
740 if (task->task_status.resp == SAS_TASK_COMPLETE && pm8001_exec_internal_tmf_task()
741 task->task_status.stat == SAS_DATA_OVERRUN) { pm8001_exec_internal_tmf_task()
743 pm8001_printk("Blocked task error.\n")); pm8001_exec_internal_tmf_task()
751 task->task_status.resp, pm8001_exec_internal_tmf_task()
752 task->task_status.stat)); pm8001_exec_internal_tmf_task()
753 sas_free_task(task); pm8001_exec_internal_tmf_task()
754 task = NULL; pm8001_exec_internal_tmf_task()
758 BUG_ON(retry == 3 && task != NULL); pm8001_exec_internal_tmf_task()
759 sas_free_task(task); pm8001_exec_internal_tmf_task()
771 struct sas_task *task = NULL; pm8001_exec_internal_task_abort() local
774 task = sas_alloc_slow_task(GFP_KERNEL); pm8001_exec_internal_task_abort()
775 if (!task) pm8001_exec_internal_task_abort()
778 task->dev = dev; pm8001_exec_internal_task_abort()
779 task->task_proto = dev->tproto; pm8001_exec_internal_task_abort()
780 task->task_done = pm8001_task_done; pm8001_exec_internal_task_abort()
781 task->slow_task->timer.data = (unsigned long)task; pm8001_exec_internal_task_abort()
782 task->slow_task->timer.function = pm8001_tmf_timedout; pm8001_exec_internal_task_abort()
783 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; pm8001_exec_internal_task_abort()
784 add_timer(&task->slow_task->timer); pm8001_exec_internal_task_abort()
792 ccb->task = task; pm8001_exec_internal_task_abort()
798 del_timer(&task->slow_task->timer); pm8001_exec_internal_task_abort()
800 pm8001_printk("Executing internal task " pm8001_exec_internal_task_abort()
804 wait_for_completion(&task->slow_task->completion); pm8001_exec_internal_task_abort()
807 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { pm8001_exec_internal_task_abort()
808 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { pm8001_exec_internal_task_abort()
810 pm8001_printk("TMF task timeout.\n")); pm8001_exec_internal_task_abort()
815 if (task->task_status.resp == SAS_TASK_COMPLETE && pm8001_exec_internal_task_abort()
816 task->task_status.stat == SAM_STAT_GOOD) { pm8001_exec_internal_task_abort()
825 task->task_status.resp, pm8001_exec_internal_task_abort()
826 task->task_status.stat)); pm8001_exec_internal_task_abort()
827 sas_free_task(task); pm8001_exec_internal_task_abort()
828 task = NULL; pm8001_exec_internal_task_abort()
832 BUG_ON(retry == 3 && task != NULL); pm8001_exec_internal_task_abort()
833 sas_free_task(task); pm8001_exec_internal_task_abort()
888 /* retry commands by ha, by task and/or by device */ pm8001_open_reject_retry()
903 struct sas_task *task; pm8001_open_reject_retry() local
924 task = ccb->task; pm8001_open_reject_retry()
925 if (!task || !task->task_done) pm8001_open_reject_retry()
927 if (task_to_close && (task != task_to_close)) pm8001_open_reject_retry()
929 ts = &task->task_status; pm8001_open_reject_retry()
936 spin_lock_irqsave(&task->task_state_lock, flags1); pm8001_open_reject_retry()
937 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; pm8001_open_reject_retry()
938 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; pm8001_open_reject_retry()
939 task->task_state_flags |= SAS_TASK_STATE_DONE; pm8001_open_reject_retry()
940 if (unlikely((task->task_state_flags pm8001_open_reject_retry()
942 spin_unlock_irqrestore(&task->task_state_lock, pm8001_open_reject_retry()
944 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); pm8001_open_reject_retry()
946 spin_unlock_irqrestore(&task->task_state_lock, pm8001_open_reject_retry()
948 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); pm8001_open_reject_retry()
951 task->task_done(task); pm8001_open_reject_retry()
1068 /* mandatory SAM-3, the task reset the specified LUN*/ pm8001_lu_reset()
1097 int pm8001_query_task(struct sas_task *task) pm8001_query_task() argument
1104 if (unlikely(!task || !task->lldd_task || !task->dev)) pm8001_query_task()
1107 if (task->task_proto & SAS_PROTOCOL_SSP) { pm8001_query_task()
1108 struct scsi_cmnd *cmnd = task->uldd_task; pm8001_query_task()
1109 struct domain_device *dev = task->dev; pm8001_query_task()
1114 rc = pm8001_find_tag(task, &tag); pm8001_query_task()
1128 /* The task is still in Lun, release it then */ pm8001_query_task()
1131 pm8001_printk("The task is still in Lun\n")); pm8001_query_task()
1133 /* The task is not in Lun or failed, reset the phy */ pm8001_query_task()
1137 pm8001_printk("The task is not in Lun or failed," pm8001_query_task()
1146 /* mandatory SAM-3, still need free task/ccb info, abord the specified task */ pm8001_abort_task()
1147 int pm8001_abort_task(struct sas_task *task) pm8001_abort_task() argument
1159 if (unlikely(!task || !task->lldd_task || !task->dev)) pm8001_abort_task()
1161 spin_lock_irqsave(&task->task_state_lock, flags); pm8001_abort_task()
1162 if (task->task_state_flags & SAS_TASK_STATE_DONE) { pm8001_abort_task()
1163 spin_unlock_irqrestore(&task->task_state_lock, flags); pm8001_abort_task()
1167 spin_unlock_irqrestore(&task->task_state_lock, flags); pm8001_abort_task()
1168 if (task->task_proto & SAS_PROTOCOL_SSP) { pm8001_abort_task()
1169 struct scsi_cmnd *cmnd = task->uldd_task; pm8001_abort_task()
1170 dev = task->dev; pm8001_abort_task()
1171 ccb = task->lldd_task; pm8001_abort_task()
1175 rc = pm8001_find_tag(task, &tag); pm8001_abort_task()
1189 } else if (task->task_proto & SAS_PROTOCOL_SATA || pm8001_abort_task()
1190 task->task_proto & SAS_PROTOCOL_STP) { pm8001_abort_task()
1191 dev = task->dev; pm8001_abort_task()
1194 rc = pm8001_find_tag(task, &tag); pm8001_abort_task()
1202 } else if (task->task_proto & SAS_PROTOCOL_SMP) { pm8001_abort_task()
1204 dev = task->dev; pm8001_abort_task()
1207 rc = pm8001_find_tag(task, &tag); pm8001_abort_task()
1252 pm8001_printk("I_T_L_Q clear task set[%x]\n", pm8001_clear_task_set()
491 pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) pm8001_ccb_task_free() argument
/linux-4.1.27/net/irda/
H A Dirda_device.c55 static void __irda_task_delete(struct irda_task *task);
162 static void __irda_task_delete(struct irda_task *task) __irda_task_delete() argument
164 del_timer(&task->timer); __irda_task_delete()
166 kfree(task); __irda_task_delete()
169 static void irda_task_delete(struct irda_task *task) irda_task_delete() argument
171 /* Unregister task */ irda_task_delete()
172 hashbin_remove(tasks, (long) task, NULL); irda_task_delete()
174 __irda_task_delete(task); irda_task_delete()
178 * Function irda_task_kick (task)
180 * Tries to execute a task possible multiple times until the task is either
181 * finished, or askes for a timeout. When a task is finished, we do post
182 * processing, and notify the parent task, that is waiting for this task
185 static int irda_task_kick(struct irda_task *task) irda_task_kick() argument
191 IRDA_ASSERT(task != NULL, return -1;); irda_task_kick()
192 IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); irda_task_kick()
194 /* Execute task until it's finished, or askes for a timeout */ irda_task_kick()
196 timeout = task->function(task); irda_task_kick()
198 net_err_ratelimited("%s: error in task handler!\n", irda_task_kick()
200 irda_task_delete(task); irda_task_kick()
203 } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); irda_task_kick()
206 net_err_ratelimited("%s: Error executing task!\n", __func__); irda_task_kick()
207 irda_task_delete(task); irda_task_kick()
212 if (task->state == IRDA_TASK_DONE) { irda_task_kick()
213 del_timer(&task->timer); irda_task_kick()
216 if (task->finished) irda_task_kick()
217 task->finished(task); irda_task_kick()
220 if (task->parent) { irda_task_kick()
222 if (task->parent->state == IRDA_TASK_CHILD_WAIT) { irda_task_kick()
223 task->parent->state = IRDA_TASK_CHILD_DONE; irda_task_kick()
226 del_timer(&task->parent->timer); irda_task_kick()
228 /* Kick parent task */ irda_task_kick()
229 irda_task_kick(task->parent); irda_task_kick()
232 irda_task_delete(task); irda_task_kick()
234 irda_start_timer(&task->timer, timeout, (void *) task, irda_task_kick()
249 * Task time has expired. We now try to execute task (again), and restart
250 * the timer if the task has not finished yet
254 struct irda_task *task; irda_task_timer_expired() local
256 task = data; irda_task_timer_expired()
258 irda_task_kick(task); irda_task_timer_expired()
/linux-4.1.27/arch/ia64/include/asm/
H A Dsyscall.h20 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
29 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
35 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
41 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
47 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
61 extern void ia64_syscall_get_set_arguments(struct task_struct *task,
64 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
71 ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); syscall_get_arguments()
74 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
81 ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); syscall_set_arguments()
H A Dperfmon.h24 extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
43 unsigned int block_task:1; /* block monitored task on kernel exit */
69 int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
70 int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
71 int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
72 int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
73 int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
74 int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
75 int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
88 extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
89 extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
96 #define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
H A Dcurrent.h12 * In kernel mode, thread pointer (r13) is used to point to the current task
/linux-4.1.27/arch/ia64/kernel/
H A Dinit_task.c3 * task.
23 * Initial task structure.
32 struct task_struct task; member in struct:__anon1703::__anon1704
38 .task = INIT_TASK(init_task_mem.s.task),
39 .thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
H A Dperfmon_default_smpl.c41 default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data) default_validate() argument
47 DPRINT(("[%d] no argument passed\n", task_pid_nr(task))); default_validate()
51 DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu)); default_validate()
64 default_get_size(struct task_struct *task, unsigned int flags, int cpu, void *data, unsigned long *size) default_get_size() argument
77 default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *data) default_init() argument
91 task_pid_nr(task), default_init()
102 default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp) default_handler() argument
112 if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) { default_handler()
113 DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg)); default_handler()
142 task->pid, default_handler()
150 * current = task running at the time of the overflow. default_handler()
152 * per-task mode: default_handler()
153 * - this is usually the task being monitored. default_handler()
154 * Under certain conditions, it might be a different task default_handler()
157 * - this is not necessarily the task controlling the session default_handler()
230 default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) default_restart() argument
246 default_exit(struct task_struct *task, void *buf, struct pt_regs *regs) default_exit() argument
248 DPRINT(("[%d] exit(%p)\n", task_pid_nr(task), buf)); default_exit()
H A Dperfmon.c60 #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
61 #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
263 unsigned int block:1; /* when 1, task will blocked on user notifications */
267 unsigned int excl_idle:1; /* exclude idle task in system wide session */
290 struct task_struct *ctx_task; /* task to which context is attached */
376 unsigned int pfs_task_sessions; /* number of per task sessions */
380 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
388 typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
579 pfm_put_task(struct task_struct *task) pfm_put_task() argument
581 if (task != current) put_task_struct(task); pfm_put_task()
921 pfm_mask_monitoring(struct task_struct *task) pfm_mask_monitoring() argument
923 pfm_context_t *ctx = PFM_GET_CTX(task); pfm_mask_monitoring()
927 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task))); pfm_mask_monitoring()
934 * from the current task. However the PMU state belongs pfm_mask_monitoring()
936 * In SMP, a valid overflow only happens when task is pfm_mask_monitoring()
938 * the PMU state belongs to the current task, therefore pfm_mask_monitoring()
973 * if task is current, modify actual registers, otherwise modify pfm_mask_monitoring()
990 * must always be done with task == current
995 pfm_restore_monitoring(struct task_struct *task) pfm_restore_monitoring() argument
997 pfm_context_t *ctx = PFM_GET_CTX(task); pfm_restore_monitoring()
1005 if (task != current) { pfm_restore_monitoring()
1006 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current)); pfm_restore_monitoring()
1010 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, pfm_restore_monitoring()
1011 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); pfm_restore_monitoring()
1066 task_pid_nr(task), i, ctx->th_pmcs[i])); pfm_restore_monitoring()
1123 pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) pfm_copy_pmds() argument
1159 pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) pfm_copy_pmcs() argument
1194 pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs) pfm_buf_fmt_exit() argument
1197 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs); pfm_buf_fmt_exit()
1202 pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size) pfm_buf_fmt_getsize() argument
1205 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size); pfm_buf_fmt_getsize()
1211 pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, pfm_buf_fmt_validate() argument
1215 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg); pfm_buf_fmt_validate()
1220 pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags, pfm_buf_fmt_init() argument
1224 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg); pfm_buf_fmt_init()
1229 pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) pfm_buf_fmt_restart() argument
1232 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs); pfm_buf_fmt_restart()
1237 pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) pfm_buf_fmt_restart_active() argument
1240 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs); pfm_buf_fmt_restart_active()
1327 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) pfm_reserve_session() argument
1344 * cannot mix system wide and per-task sessions pfm_reserve_session()
1356 pfm_sessions.pfs_sys_session[cpu] = task; pfm_reserve_session()
1448 struct task_struct *task = current; pfm_remove_smpl_mapping() local
1452 if (task->mm == NULL || size == 0UL || vaddr == NULL) { pfm_remove_smpl_mapping()
1453 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm); pfm_remove_smpl_mapping()
1465 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); pfm_remove_smpl_mapping()
1813 struct task_struct *task; pfm_flush() local
1849 task = PFM_CTX_TASK(ctx); pfm_flush()
1850 regs = task_pt_regs(task); pfm_flush()
1854 task == current ? 1 : 0)); pfm_flush()
1857 * if state == UNLOADED, then task is NULL pfm_flush()
1863 if (task == current) { pfm_flush()
1866 * the task IS the owner but it migrated to another CPU: that's bad pfm_flush()
1906 * remove virtual mapping, if any, for the calling task. pfm_flush()
1910 * by every task with access to the context pfm_flush()
1939 * (fput()),i.e, last task to access the file. Nobody else can access the
1945 * When called from exit_files(), the current task is not yet ZOMBIE but we
1952 struct task_struct *task; pfm_close() local
1979 task = PFM_CTX_TASK(ctx); pfm_close()
1980 regs = task_pt_regs(task); pfm_close()
1984 task == current ? 1 : 0)); pfm_close()
1987 * if task == current, then pfm_flush() unloaded the context pfm_close()
1992 * context is loaded/masked and task != current, we need to pfm_close()
1997 * The task is currently blocked or will block after an overflow. pfm_close()
2001 * This situation is only possible for per-task mode pfm_close()
2022 * force task to wake up from MASKED state pfm_close()
2030 * task to report completion pfm_close()
2060 else if (task != current) { pfm_close()
2067 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task))); pfm_close()
2070 * the task notices the ZOMBIE state pfm_close()
2083 * the context is still attached to a task (possibly current) pfm_close()
2090 * monitored task. It is not possible to free vmalloc'ed pfm_close()
2127 * from the callers side. The monitored task side is also cut, so we pfm_close()
2236 * allocate a sampling buffer and remaps it into the user address space of the task
2239 pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) pfm_smpl_buffer_alloc() argument
2241 struct mm_struct *mm = task->mm; pfm_smpl_buffer_alloc()
2259 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) pfm_smpl_buffer_alloc()
2262 if (size > task_rlimit(task, RLIMIT_MEMLOCK)) pfm_smpl_buffer_alloc()
2308 down_write(&task->mm->mmap_sem); pfm_smpl_buffer_alloc()
2314 up_write(&task->mm->mmap_sem); pfm_smpl_buffer_alloc()
2322 /* can only be applied to current task, need to have the mm semaphore held when called */ pfm_smpl_buffer_alloc()
2325 up_write(&task->mm->mmap_sem); pfm_smpl_buffer_alloc()
2337 up_write(&task->mm->mmap_sem); pfm_smpl_buffer_alloc()
2359 pfm_bad_permissions(struct task_struct *task) pfm_bad_permissions() argument
2367 tcred = __task_cred(task); pfm_bad_permissions()
2370 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", pfm_bad_permissions()
2391 pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx) pfarg_is_sane() argument
2416 pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags, pfm_setup_buffer_fmt() argument
2429 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task))); pfm_setup_buffer_fmt()
2438 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); pfm_setup_buffer_fmt()
2440 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret)); pfm_setup_buffer_fmt()
2451 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size); pfm_setup_buffer_fmt()
2464 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg); pfm_setup_buffer_fmt()
2489 * when they are not actively used by the task. In UP, the incoming process pfm_reset_pmu_state()
2552 * - kernel task
2553 * - task not owned by caller
2554 * - task incompatible with context mode
2557 pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) pfm_task_incompatible() argument
2560 * no kernel task or task not owner by caller pfm_task_incompatible()
2562 if (task->mm == NULL) { pfm_task_incompatible()
2563 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task))); pfm_task_incompatible()
2566 if (pfm_bad_permissions(task)) { pfm_task_incompatible()
2567 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task))); pfm_task_incompatible()
2573 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { pfm_task_incompatible()
2574 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task))); pfm_task_incompatible()
2578 if (task->exit_state == EXIT_ZOMBIE) { pfm_task_incompatible()
2579 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task))); pfm_task_incompatible()
2586 if (task == current) return 0; pfm_task_incompatible()
2588 if (!task_is_stopped_or_traced(task)) { pfm_task_incompatible()
2589 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state)); pfm_task_incompatible()
2593 * make sure the task is off any CPU pfm_task_incompatible()
2595 wait_task_inactive(task, 0); pfm_task_incompatible()
2603 pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) pfm_get_task() argument
2617 /* make sure task cannot go away while we operate on it */ pfm_get_task()
2627 *task = p; pfm_get_task()
2823 struct task_struct *task; pfm_write_pmcs() local
2837 task = ctx->ctx_task; pfm_write_pmcs()
2852 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; pfm_write_pmcs()
2889 * - per-task : PMCx.pm=0 (user monitor) pfm_write_pmcs()
2935 ret = (*wr_func)(task, ctx, cnum, &value, regs); pfm_write_pmcs()
3022 * per-task SMP only here pfm_write_pmcs()
3024 * we are guaranteed that the task is not running on the other CPU, pfm_write_pmcs()
3025 * we indicate that this PMD will need to be reloaded if the task pfm_write_pmcs()
3063 struct task_struct *task; pfm_write_pmds() local
3077 task = ctx->ctx_task; pfm_write_pmds()
3082 * on both UP and SMP, we can only write to the PMC when the task is pfm_write_pmds()
3095 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; pfm_write_pmds()
3117 ret = (*wr_func)(task, ctx, cnum, &v, regs); pfm_write_pmds()
3203 * we are guaranteed that the task is not running on the other CPU, pfm_write_pmds()
3204 * we indicate that this PMD will need to be reloaded if the task pfm_write_pmds()
3259 struct task_struct *task; pfm_read_pmds() local
3277 task = ctx->ctx_task; pfm_read_pmds()
3294 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; pfm_read_pmds()
3307 * the task is the owner of the local PMU. pfm_read_pmds()
3331 * If the task is not the current one, then we check if the pfm_read_pmds()
3340 * if context is zombie, then task does not exist anymore. pfm_read_pmds()
3388 pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) pfm_mod_write_pmcs() argument
3399 * for now limit to current task, which is enough when calling pfm_mod_write_pmcs()
3402 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; pfm_mod_write_pmcs()
3409 pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) pfm_mod_read_pmds() argument
3420 * for now limit to current task, which is enough when calling pfm_mod_read_pmds()
3423 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; pfm_mod_read_pmds()
3434 pfm_use_debug_registers(struct task_struct *task) pfm_use_debug_registers() argument
3436 pfm_context_t *ctx = task->thread.pfm_context; pfm_use_debug_registers()
3442 DPRINT(("called for [%d]\n", task_pid_nr(task))); pfm_use_debug_registers()
3447 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0; pfm_use_debug_registers()
3473 task_pid_nr(task), ret)); pfm_use_debug_registers()
3481 * This function is called for every task that exits with the
3482 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3489 pfm_release_debug_registers(struct task_struct *task) pfm_release_debug_registers() argument
3498 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task)); pfm_release_debug_registers()
3512 struct task_struct *task; pfm_restart() local
3521 task = PFM_CTX_TASK(ctx); pfm_restart()
3549 if (unlikely(task == NULL)) { pfm_restart()
3550 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current)); pfm_restart()
3554 if (task == current || is_system) { pfm_restart()
3559 task_pid_nr(task), pfm_restart()
3570 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); pfm_restart()
3572 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); pfm_restart()
3583 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task))); pfm_restart()
3585 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); pfm_restart()
3587 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task))); pfm_restart()
3589 // cannot use pfm_stop_monitoring(task, regs); pfm_restart()
3611 * restart another task pfm_restart()
3616 * one is seen by the task. pfm_restart()
3622 * seen by other task pfm_restart()
3629 * the task is blocked or on its way to block. That's the normal pfm_restart()
3630 * restart path. If the monitoring is not masked, then the task pfm_restart()
3632 * Therefore we use the trap mechanism to catch the task and pfm_restart()
3635 * if non-blocking, then we ensure that the task will go into pfm_restart()
3638 * We cannot explicitly reset another task, it MUST always pfm_restart()
3639 * be done by the task itself. This works for system wide because pfm_restart()
3644 DPRINT(("unblocking [%d]\n", task_pid_nr(task))); pfm_restart()
3647 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); pfm_restart()
3651 PFM_SET_WORK_PENDING(task, 1); pfm_restart()
3653 set_notify_resume(task); pfm_restart()
3656 * XXX: send reschedule if task runs on another CPU pfm_restart()
3685 struct task_struct *task; pfm_write_ibr_dbr() local
3700 task = ctx->ctx_task; pfm_write_ibr_dbr()
3705 * on both UP and SMP, we can only write to the PMC when the task is pfm_write_ibr_dbr()
3709 thread = &task->thread; pfm_write_ibr_dbr()
3719 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; pfm_write_ibr_dbr()
3732 * don't bother if we are loaded and task is being debugged pfm_write_ibr_dbr()
3735 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task))); pfm_write_ibr_dbr()
3776 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task))); pfm_write_ibr_dbr()
3889 pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) pfm_mod_write_ibrs() argument
3900 * for now limit to current task, which is enough when calling pfm_mod_write_ibrs()
3903 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; pfm_mod_write_ibrs()
3910 pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) pfm_mod_write_dbrs() argument
3921 * for now limit to current task, which is enough when calling pfm_mod_write_dbrs()
3924 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; pfm_mod_write_dbrs()
3944 struct task_struct *task = PFM_CTX_TASK(ctx); pfm_stop() local
3964 DPRINT(("task [%d] ctx_state=%d is_system=%d\n", pfm_stop()
4000 * per-task mode pfm_stop()
4003 if (task == current) { pfm_stop()
4012 tregs = task_pt_regs(task); pfm_stop()
4023 DPRINT(("task=[%d]\n", task_pid_nr(task))); pfm_stop()
4098 * time the task is scheduled pfm_start()
4162 struct task_struct *task; pfm_context_load() local
4194 ret = pfm_get_task(ctx, req->load_pid, &task); pfm_context_load()
4205 if (is_system && task != current) { pfm_context_load()
4211 thread = &task->thread; pfm_context_load()
4216 * into a task that is being debugged. pfm_context_load()
4221 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid)); pfm_context_load()
4229 task_pid_nr(task))); pfm_context_load()
4233 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs)); pfm_context_load()
4246 * The programming model expects the task to pfm_context_load()
4253 * must be done by the calling task prior pfm_context_load()
4268 * task is necessarily stopped at this point. pfm_context_load()
4291 * link context to task pfm_context_load()
4293 ctx->ctx_task = task; pfm_context_load()
4310 pfm_copy_pmds(task, ctx); pfm_context_load()
4311 pfm_copy_pmcs(task, ctx); pfm_context_load()
4319 if (task == current) { pfm_context_load()
4325 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task))); pfm_context_load()
4332 * push the other task out, if any pfm_context_load()
4358 SET_PMU_OWNER(task, ctx); pfm_context_load()
4360 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task))); pfm_context_load()
4363 * when not current, task MUST be stopped, so this is safe pfm_context_load()
4365 regs = task_pt_regs(task); pfm_context_load()
4390 * release task, there is now a link with the context pfm_context_load()
4392 if (is_system == 0 && task != current) { pfm_context_load()
4393 pfm_put_task(task); pfm_context_load()
4408 * for the task via get_task_struct(), because we hold the
4409 * context lock. If the task were to disappear while having
4419 struct task_struct *task = PFM_CTX_TASK(ctx); pfm_context_unload() local
4424 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1)); pfm_context_unload()
4474 * disconnect context from task pfm_context_unload()
4476 task->thread.pfm_context = NULL; pfm_context_unload()
4478 * disconnect task from context pfm_context_unload()
4489 * per-task mode pfm_context_unload()
4491 tregs = task == current ? regs : task_pt_regs(task); pfm_context_unload()
4493 if (task == current) { pfm_context_unload()
4499 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task))); pfm_context_unload()
4505 pfm_flush_pmds(task, ctx); pfm_context_unload()
4525 task->thread.flags &= ~IA64_THREAD_PM_VALID; pfm_context_unload()
4528 * break links between context and task pfm_context_unload()
4530 task->thread.pfm_context = NULL; pfm_context_unload()
4533 PFM_SET_WORK_PENDING(task, 0); pfm_context_unload()
4539 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task))); pfm_context_unload()
4546 * called only from exit_thread(): task == current
4550 pfm_exit_thread(struct task_struct *task) pfm_exit_thread() argument
4554 struct pt_regs *regs = task_pt_regs(task); pfm_exit_thread()
4558 ctx = PFM_GET_CTX(task); pfm_exit_thread()
4562 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task))); pfm_exit_thread()
4571 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task)); pfm_exit_thread()
4577 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); pfm_exit_thread()
4586 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); pfm_exit_thread()
4591 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state); pfm_exit_thread()
4660 struct task_struct *task; pfm_check_task_state() local
4665 task = ctx->ctx_task; pfm_check_task_state()
4667 if (task == NULL) { pfm_check_task_state()
4668 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state)); pfm_check_task_state()
4675 task_pid_nr(task), pfm_check_task_state()
4676 task->state, PFM_CMD_STOPPED(cmd))); pfm_check_task_state()
4683 * a task running on the same CPU as the session. pfm_check_task_state()
4685 if (task == current || ctx->ctx_fl_system) return 0; pfm_check_task_state()
4712 * the task stopped. pfm_check_task_state()
4715 * the user has no guarantee the task would not run between pfm_check_task_state()
4717 * If this user wants to ensure the task does not run, then pfm_check_task_state()
4718 * the task must be stopped. pfm_check_task_state()
4721 if (!task_is_stopped_or_traced(task)) { pfm_check_task_state()
4722 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task))); pfm_check_task_state()
4726 * task is now stopped, wait for ctxsw out pfm_check_task_state()
4733 * another task to get access to the context pfm_check_task_state()
4743 wait_task_inactive(task, 0); pfm_check_task_state()
4881 * check task is stopped sys_perfmonctl()
4970 * and wakeup controlling task, indicating we are now disconnected pfm_context_force_terminate()
4976 * task will only get access when we return from pfm_context_force_terminate()
5175 static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, pfm_overflow_handler() argument
5202 task ? task_pid_nr(task): -1, pfm_overflow_handler()
5305 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp); pfm_overflow_handler()
5373 * set the perfmon specific checking pending work for the task pfm_overflow_handler()
5375 PFM_SET_WORK_PENDING(task, 1); pfm_overflow_handler()
5379 * previous task, therefore we must work with task and not current. pfm_overflow_handler()
5381 set_notify_resume(task); pfm_overflow_handler()
5392 PFM_GET_WORK_PENDING(task), pfm_overflow_handler()
5401 pfm_mask_monitoring(task); pfm_overflow_handler()
5416 task ? task_pid_nr(task) : -1, pfm_overflow_handler()
5424 * come here as zombie only if the task is the current task. In which case, we pfm_overflow_handler()
5433 * by stopping monitoring for this task. We can only come here for a per-task pfm_overflow_handler()
5435 * are always task private. By re-enabling secure montioring, we ensure that pfm_overflow_handler()
5436 * the monitored task will not be able to re-activate monitoring. pfm_overflow_handler()
5437 * The task will eventually be context switched out, at which point the context pfm_overflow_handler()
5440 * So there might be a window of time where the number of per-task session is zero pfm_overflow_handler()
5442 * context. This is safe because if a per-task session comes in, it will push this one pfm_overflow_handler()
5444 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will pfm_overflow_handler()
5449 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1)); pfm_overflow_handler()
5459 struct task_struct *task; pfm_do_interrupt_handler() local
5473 task = GET_PMU_OWNER(); pfm_do_interrupt_handler()
5480 if (PMC0_HAS_OVFL(pmc0) && task) { pfm_do_interrupt_handler()
5488 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) pfm_do_interrupt_handler()
5493 pfm_overflow_handler(task, ctx, pmc0, regs); pfm_do_interrupt_handler()
5510 this_cpu, task_pid_nr(task)); pfm_do_interrupt_handler()
5516 task_pid_nr(task)); pfm_do_interrupt_handler()
5731 pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin) pfm_syst_wide_update_task() argument
5740 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0 pfm_syst_wide_update_task()
5741 * on every CPU, so we can rely on the pid to identify the idle task. pfm_syst_wide_update_task()
5743 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { pfm_syst_wide_update_task()
5744 regs = task_pt_regs(task); pfm_syst_wide_update_task()
5757 /* mask monitoring for the idle task */ pfm_syst_wide_update_task()
5765 * restore monitoring for next task pfm_syst_wide_update_task()
5781 struct task_struct *task = ctx->ctx_task; pfm_force_cleanup() local
5786 if (GET_PMU_OWNER() == task) { pfm_force_cleanup()
5793 * disconnect the task from the context and vice-versa pfm_force_cleanup()
5795 PFM_SET_WORK_PENDING(task, 0); pfm_force_cleanup()
5797 task->thread.pfm_context = NULL; pfm_force_cleanup()
5798 task->thread.flags &= ~IA64_THREAD_PM_VALID; pfm_force_cleanup()
5800 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task))); pfm_force_cleanup()
5808 pfm_save_regs(struct task_struct *task) pfm_save_regs() argument
5815 ctx = PFM_GET_CTX(task); pfm_save_regs()
5826 struct pt_regs *regs = task_pt_regs(task); pfm_save_regs()
5897 pfm_save_regs(struct task_struct *task) pfm_save_regs() argument
5902 ctx = PFM_GET_CTX(task); pfm_save_regs()
5928 pfm_lazy_save_regs (struct task_struct *task) pfm_lazy_save_regs() argument
5937 ctx = PFM_GET_CTX(task); pfm_lazy_save_regs()
5990 pfm_load_regs (struct task_struct *task) pfm_load_regs() argument
5998 ctx = PFM_GET_CTX(task); pfm_load_regs()
6006 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return; pfm_load_regs()
6022 struct pt_regs *regs = task_pt_regs(task); pfm_load_regs()
6132 SET_PMU_OWNER(task, ctx); pfm_load_regs()
6153 pfm_load_regs (struct task_struct *task) pfm_load_regs() argument
6162 ctx = PFM_GET_CTX(task); pfm_load_regs()
6172 * This must be done even when the task is still the owner pfm_load_regs()
6174 * (not perfmon) by the previous task. pfm_load_regs()
6195 if (likely(owner == task)) { pfm_load_regs()
6253 SET_PMU_OWNER(task, ctx); pfm_load_regs()
6269 pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) pfm_flush_pmds() argument
6277 * is the caller the task being monitored (or which initiated the pfm_flush_pmds()
6280 is_self = ctx->ctx_task == task ? 1 : 0; pfm_flush_pmds()
6283 * can access PMU is task is the owner of the PMU state on the current CPU pfm_flush_pmds()
6285 * (that is not necessarily the task the context is attached to in this mode). pfm_flush_pmds()
6286 * In system-wide we always have can_access_pmu true because a task running on an pfm_flush_pmds()
6289 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id()); pfm_flush_pmds()
6344 task_pid_nr(task), pfm_flush_pmds()
6366 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i)); pfm_flush_pmds()
6370 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val)); pfm_flush_pmds()
6685 struct task_struct *task; dump_pmu_state() local
6710 task = GET_PMU_OWNER(); dump_pmu_state()
6713 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx); dump_pmu_state()
6754 * called from process.c:copy_thread(). task is new child.
6757 pfm_inherit(struct task_struct *task, struct pt_regs *regs) pfm_inherit() argument
6761 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task))); pfm_inherit()
6763 thread = &task->thread; pfm_inherit()
6770 PFM_SET_WORK_PENDING(task, 0); pfm_inherit()
/linux-4.1.27/arch/xtensa/include/asm/
H A Dstacktrace.h20 static __always_inline unsigned long *stack_pointer(struct task_struct *task) stack_pointer() argument
24 if (!task || task == current) stack_pointer()
27 sp = (unsigned long *)task->thread.sp; stack_pointer()
H A Dswitch_to.h12 /* * switch_to(n) should switch tasks to task nr n, first
13 * checking that n isn't the current task, in which case it does nothing.
H A Dcurrent.h22 return current_thread_info()->task; get_current()
/linux-4.1.27/arch/parisc/include/asm/
H A Dcurrent.h10 return current_thread_info()->task; get_current()
H A Dptrace.h10 #define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
/linux-4.1.27/arch/avr32/include/asm/
H A Dcurrent.h10 return current_thread_info()->task; get_current()
H A Dswitch_to.h14 * switch task.
26 * switch_to(prev, next, last) should switch from task `prev' to task
H A Dthread_info.h22 struct task_struct *task; /* main task structure */ member in struct:thread_info
36 .task = &tsk, \
57 #define get_thread_info(ti) get_task_struct((ti)->task)
58 #define put_thread_info(ti) put_task_struct((ti)->task)
/linux-4.1.27/arch/cris/include/asm/
H A Dcurrent.h10 return current_thread_info()->task; get_current()
H A Dprocessor.h37 * This macro allows us to find those regs for a task.
46 * Dito but for the currently running task
49 #define task_pt_regs(task) user_regs(task_thread_info(task))
H A Dswitch_to.h5 * task switching.
/linux-4.1.27/arch/m32r/include/asm/
H A Dcurrent.h10 return current_thread_info()->task; get_current()
H A Dthread_info.h18 * low level task data that entry.S needs immediate access to
26 struct task_struct *task; /* main task structure */ member in struct:thread_info
51 .task = &tsk, \
104 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
124 #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
H A Dptrace.h39 #define task_pt_regs(task) \
40 ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_flip_work.c28 * drm_flip_work_allocate_task - allocate a flip-work task
29 * @data: data associated to the task
36 struct drm_flip_task *task; drm_flip_work_allocate_task() local
38 task = kzalloc(sizeof(*task), flags); drm_flip_work_allocate_task()
39 if (task) drm_flip_work_allocate_task()
40 task->data = data; drm_flip_work_allocate_task()
42 return task; drm_flip_work_allocate_task()
47 * drm_flip_work_queue_task - queue a specific task
49 * @task: the task to handle
51 * Queues task, that will later be run (passed back to drm_flip_func_t
55 struct drm_flip_task *task) drm_flip_work_queue_task()
60 list_add_tail(&task->node, &work->queued); drm_flip_work_queue_task()
75 struct drm_flip_task *task; drm_flip_work_queue() local
77 task = drm_flip_work_allocate_task(val, drm_flip_work_queue()
79 if (task) { drm_flip_work_queue()
80 drm_flip_work_queue_task(work, task); drm_flip_work_queue()
82 DRM_ERROR("%s could not allocate task!\n", work->name); drm_flip_work_queue()
118 struct drm_flip_task *task, *tmp; flip_worker() local
129 list_for_each_entry_safe(task, tmp, &tasks, node) { flip_worker()
130 work->func(work, task->data); flip_worker()
131 kfree(task); flip_worker()
54 drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) drm_flip_work_queue_task() argument
/linux-4.1.27/include/linux/fsl/bestcomm/
H A Dbestcomm.h22 * task type
23 * @data: An array of u32 extra data. Size of array is task dependent.
34 /* Generic task management */
38 * struct bcom_task - Structure describing a loaded BestComm task
41 * filled the intermediate layer of the BestComm API, the task dependent
67 * bcom_enable - Enable a BestComm task
68 * @tsk: The BestComm task structure
70 * This function makes sure the given task is enabled and can be run
76 * bcom_disable - Disable a BestComm task
77 * @tsk: The BestComm task structure
79 * This function disable a given task, making sure it's not executed
86 * bcom_get_task_irq - Returns the irq number of a BestComm task
87 * @tsk: The BestComm task structure
101 * @tsk: pointer to task structure
112 * @tsk: pointer to task structure
123 * bcom_queue_empty - Checks if a BestComm task BD queue is empty
124 * @tsk: The BestComm task structure
133 * bcom_queue_full - Checks if a BestComm task BD queue is full
134 * @tsk: The BestComm task structure
144 * @tsk: The BestComm task structure
157 * @tsk: The BestComm task structure
172 * @tsk: The BestComm task structure
H A Dbestcomm_priv.h111 /* task enable */
238 extern int bcom_load_image(int task, u32 *task_image);
239 extern void bcom_set_initiator(int task, int initiator);
261 bcom_enable_task(int task) bcom_enable_task() argument
264 reg = in_be16(&bcom_eng->regs->tcr[task]); bcom_enable_task()
265 out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE); bcom_enable_task()
269 bcom_disable_task(int task) bcom_disable_task() argument
271 u16 reg = in_be16(&bcom_eng->regs->tcr[task]); bcom_disable_task()
272 out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE); bcom_disable_task()
277 bcom_task_desc(int task) bcom_task_desc() argument
279 return bcom_sram_pa2va(bcom_eng->tdt[task].start); bcom_task_desc()
283 bcom_task_num_descs(int task) bcom_task_num_descs() argument
285 return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1; bcom_task_num_descs()
289 bcom_task_var(int task) bcom_task_var() argument
291 return bcom_sram_pa2va(bcom_eng->tdt[task].var); bcom_task_var()
295 bcom_task_inc(int task) bcom_task_inc() argument
297 return &bcom_task_var(task)[BCOM_MAX_VAR]; bcom_task_inc()
328 bcom_set_task_pragma(int task, int pragma) bcom_set_task_pragma() argument
330 u32 *fdt = &bcom_eng->tdt[task].fdt; bcom_set_task_pragma()
335 bcom_set_task_auto_start(int task, int next_task) bcom_set_task_auto_start() argument
337 u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; bcom_set_task_auto_start()
342 bcom_set_tcr_initiator(int task, int initiator) bcom_set_tcr_initiator() argument
344 u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; bcom_set_tcr_initiator()
/linux-4.1.27/drivers/scsi/
H A Dlibiscsi.c126 * @task: scsi command task
136 void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t, iscsi_prep_data_out_pdu() argument
139 struct iscsi_conn *conn = task->conn; iscsi_prep_data_out_pdu()
142 task->hdr_len = sizeof(struct iscsi_data); iscsi_prep_data_out_pdu()
149 hdr->lun = task->lun; iscsi_prep_data_out_pdu()
150 hdr->itt = task->hdr_itt; iscsi_prep_data_out_pdu()
166 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) iscsi_add_hdr() argument
168 unsigned exp_len = task->hdr_len + len; iscsi_add_hdr()
170 if (exp_len > task->hdr_max) { iscsi_add_hdr()
176 task->hdr_len = exp_len; iscsi_add_hdr()
183 static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) iscsi_prep_ecdb_ahs() argument
185 struct scsi_cmnd *cmd = task->sc; iscsi_prep_ecdb_ahs()
191 ecdb_ahdr = iscsi_next_hdr(task); iscsi_prep_ecdb_ahs()
199 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + iscsi_prep_ecdb_ahs()
212 ISCSI_DBG_SESSION(task->conn->session, iscsi_prep_ecdb_ahs()
216 task->hdr_len); iscsi_prep_ecdb_ahs()
220 static int iscsi_prep_bidi_ahs(struct iscsi_task *task) iscsi_prep_bidi_ahs() argument
222 struct scsi_cmnd *sc = task->sc; iscsi_prep_bidi_ahs()
226 rlen_ahdr = iscsi_next_hdr(task); iscsi_prep_bidi_ahs()
227 rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr)); iscsi_prep_bidi_ahs()
238 ISCSI_DBG_SESSION(task->conn->session, iscsi_prep_bidi_ahs()
247 * iscsi_check_tmf_restrictions - check if a task is affected by TMF
248 * @task: iscsi task
251 * During TMF a task has to be checked if it's affected.
259 static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) iscsi_check_tmf_restrictions() argument
261 struct iscsi_conn *conn = task->conn; iscsi_check_tmf_restrictions()
277 if (hdr_lun != task->sc->device->lun) iscsi_check_tmf_restrictions()
286 "task [op %x/%x itt " iscsi_check_tmf_restrictions()
289 task->hdr->opcode, opcode, iscsi_check_tmf_restrictions()
290 task->itt, task->hdr_itt); iscsi_check_tmf_restrictions()
299 "task [op %x/%x itt " iscsi_check_tmf_restrictions()
301 task->hdr->opcode, opcode, iscsi_check_tmf_restrictions()
302 task->itt, task->hdr_itt); iscsi_check_tmf_restrictions()
308 * the caller has already checked if the task iscsi_check_tmf_restrictions()
314 task->hdr_itt == tmf->rtt) { iscsi_check_tmf_restrictions()
316 "Preventing task %x/%x from sending " iscsi_check_tmf_restrictions()
317 "data-out due to abort task in " iscsi_check_tmf_restrictions()
318 "progress\n", task->itt, iscsi_check_tmf_restrictions()
319 task->hdr_itt); iscsi_check_tmf_restrictions()
330 * @task: iscsi task
335 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) iscsi_prep_scsi_cmd_pdu() argument
337 struct iscsi_conn *conn = task->conn; iscsi_prep_scsi_cmd_pdu()
339 struct scsi_cmnd *sc = task->sc; iscsi_prep_scsi_cmd_pdu()
345 rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD); iscsi_prep_scsi_cmd_pdu()
350 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); iscsi_prep_scsi_cmd_pdu()
354 hdr = (struct iscsi_scsi_req *)task->hdr; iscsi_prep_scsi_cmd_pdu()
359 hdr->itt = task->hdr_itt = itt; iscsi_prep_scsi_cmd_pdu()
361 hdr->itt = task->hdr_itt = build_itt(task->itt, iscsi_prep_scsi_cmd_pdu()
362 task->conn->session->age); iscsi_prep_scsi_cmd_pdu()
363 task->hdr_len = 0; iscsi_prep_scsi_cmd_pdu()
364 rc = iscsi_add_hdr(task, sizeof(*hdr)); iscsi_prep_scsi_cmd_pdu()
370 task->lun = hdr->lun; iscsi_prep_scsi_cmd_pdu()
376 rc = iscsi_prep_ecdb_ahs(task); iscsi_prep_scsi_cmd_pdu()
383 task->imm_count = 0; iscsi_prep_scsi_cmd_pdu()
386 rc = iscsi_prep_bidi_ahs(task); iscsi_prep_scsi_cmd_pdu()
392 task->protected = true; iscsi_prep_scsi_cmd_pdu()
397 struct iscsi_r2t_info *r2t = &task->unsol_r2t; iscsi_prep_scsi_cmd_pdu()
418 task->imm_count = min(session->first_burst, iscsi_prep_scsi_cmd_pdu()
421 task->imm_count = min(transfer_length, iscsi_prep_scsi_cmd_pdu()
423 hton24(hdr->dlength, task->imm_count); iscsi_prep_scsi_cmd_pdu()
430 task->imm_count; iscsi_prep_scsi_cmd_pdu()
431 r2t->data_offset = task->imm_count; iscsi_prep_scsi_cmd_pdu()
436 if (!task->unsol_r2t.data_length) iscsi_prep_scsi_cmd_pdu()
448 hdrlength = task->hdr_len - sizeof(*hdr); iscsi_prep_scsi_cmd_pdu()
455 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); iscsi_prep_scsi_cmd_pdu()
457 if (session->tt->init_task && session->tt->init_task(task)) iscsi_prep_scsi_cmd_pdu()
460 task->state = ISCSI_TASK_RUNNING; iscsi_prep_scsi_cmd_pdu()
469 task->itt, transfer_length, iscsi_prep_scsi_cmd_pdu()
477 * iscsi_free_task - free a task
478 * @task: iscsi cmd task
482 * up mgmt tasks then returns the task to the pool.
484 static void iscsi_free_task(struct iscsi_task *task) iscsi_free_task() argument
486 struct iscsi_conn *conn = task->conn; iscsi_free_task()
488 struct scsi_cmnd *sc = task->sc; iscsi_free_task()
489 int oldstate = task->state; iscsi_free_task()
491 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", iscsi_free_task()
492 task->itt, task->state, task->sc); iscsi_free_task()
494 session->tt->cleanup_task(task); iscsi_free_task()
495 task->state = ISCSI_TASK_FREE; iscsi_free_task()
496 task->sc = NULL; iscsi_free_task()
498 * login task is preallocated so do not free iscsi_free_task()
500 if (conn->login_task == task) iscsi_free_task()
503 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); iscsi_free_task()
509 * queue command may call this to free the task, so iscsi_free_task()
517 void __iscsi_get_task(struct iscsi_task *task) __iscsi_get_task() argument
519 atomic_inc(&task->refcount); __iscsi_get_task()
523 void __iscsi_put_task(struct iscsi_task *task) __iscsi_put_task() argument
525 if (atomic_dec_and_test(&task->refcount)) __iscsi_put_task()
526 iscsi_free_task(task); __iscsi_put_task()
530 void iscsi_put_task(struct iscsi_task *task) iscsi_put_task() argument
532 struct iscsi_session *session = task->conn->session; iscsi_put_task()
536 __iscsi_put_task(task); iscsi_put_task()
542 * iscsi_complete_task - finish a task
543 * @task: iscsi cmd task
544 * @state: state to complete task with
548 static void iscsi_complete_task(struct iscsi_task *task, int state) iscsi_complete_task() argument
550 struct iscsi_conn *conn = task->conn; iscsi_complete_task()
553 "complete task itt 0x%x state %d sc %p\n", iscsi_complete_task()
554 task->itt, task->state, task->sc); iscsi_complete_task()
555 if (task->state == ISCSI_TASK_COMPLETED || iscsi_complete_task()
556 task->state == ISCSI_TASK_ABRT_TMF || iscsi_complete_task()
557 task->state == ISCSI_TASK_ABRT_SESS_RECOV || iscsi_complete_task()
558 task->state == ISCSI_TASK_REQUEUE_SCSIQ) iscsi_complete_task()
560 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); iscsi_complete_task()
561 task->state = state; iscsi_complete_task()
563 if (!list_empty(&task->running)) iscsi_complete_task()
564 list_del_init(&task->running); iscsi_complete_task()
566 if (conn->task == task) iscsi_complete_task()
567 conn->task = NULL; iscsi_complete_task()
569 if (conn->ping_task == task) iscsi_complete_task()
573 __iscsi_put_task(task); iscsi_complete_task()
577 * iscsi_complete_scsi_task - finish scsi task normally
578 * @task: iscsi task for scsi cmd
587 void iscsi_complete_scsi_task(struct iscsi_task *task, iscsi_complete_scsi_task() argument
590 struct iscsi_conn *conn = task->conn; iscsi_complete_scsi_task()
592 ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt); iscsi_complete_scsi_task()
596 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); iscsi_complete_scsi_task()
602 * session back_lock must be held and if not called for a task that is
606 static void fail_scsi_task(struct iscsi_task *task, int err) fail_scsi_task() argument
608 struct iscsi_conn *conn = task->conn; fail_scsi_task()
615 * a ref to the task. fail_scsi_task()
617 sc = task->sc; fail_scsi_task()
621 if (task->state == ISCSI_TASK_PENDING) { fail_scsi_task()
644 iscsi_complete_task(task, state); fail_scsi_task()
649 struct iscsi_task *task) iscsi_prep_mgmt_task()
652 struct iscsi_hdr *hdr = task->hdr; iscsi_prep_mgmt_task()
681 if (session->tt->init_task && session->tt->init_task(task)) iscsi_prep_mgmt_task()
687 task->state = ISCSI_TASK_RUNNING; iscsi_prep_mgmt_task()
690 hdr->itt, task->data_count); iscsi_prep_mgmt_task()
701 struct iscsi_task *task; __iscsi_conn_send_pdu() local
711 * Same task can be used. Same ITT must be used. __iscsi_conn_send_pdu()
716 "progress. Cannot start new task.\n"); __iscsi_conn_send_pdu()
721 iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN); __iscsi_conn_send_pdu()
725 task = conn->login_task; __iscsi_conn_send_pdu()
739 (void*)&task, sizeof(void*))) __iscsi_conn_send_pdu()
743 * released in complete pdu for task we expect a response for, and __iscsi_conn_send_pdu()
744 * released by the lld when it has transmitted the task for __iscsi_conn_send_pdu()
747 atomic_set(&task->refcount, 1); __iscsi_conn_send_pdu()
748 task->conn = conn; __iscsi_conn_send_pdu()
749 task->sc = NULL; __iscsi_conn_send_pdu()
750 INIT_LIST_HEAD(&task->running); __iscsi_conn_send_pdu()
751 task->state = ISCSI_TASK_PENDING; __iscsi_conn_send_pdu()
754 memcpy(task->data, data, data_size); __iscsi_conn_send_pdu()
755 task->data_count = data_size; __iscsi_conn_send_pdu()
757 task->data_count = 0; __iscsi_conn_send_pdu()
760 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { __iscsi_conn_send_pdu()
762 "pdu for mgmt task.\n"); __iscsi_conn_send_pdu()
767 itt = task->hdr->itt; __iscsi_conn_send_pdu()
768 task->hdr_len = sizeof(struct iscsi_hdr); __iscsi_conn_send_pdu()
769 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); __iscsi_conn_send_pdu()
773 task->hdr->itt = itt; __iscsi_conn_send_pdu()
775 task->hdr->itt = build_itt(task->itt, __iscsi_conn_send_pdu()
776 task->conn->session->age); __iscsi_conn_send_pdu()
780 if (iscsi_prep_mgmt_task(conn, task)) __iscsi_conn_send_pdu()
783 if (session->tt->xmit_task(task)) __iscsi_conn_send_pdu()
786 list_add_tail(&task->running, &conn->mgmtqueue); __iscsi_conn_send_pdu()
790 return task; __iscsi_conn_send_pdu()
795 __iscsi_put_task(task); __iscsi_conn_send_pdu()
819 * @task: scsi command task
824 * then completes the command and task.
827 struct iscsi_task *task, char *data, iscsi_scsi_cmd_rsp()
832 struct scsi_cmnd *sc = task->sc; iscsi_scsi_cmd_rsp()
839 if (task->protected) { iscsi_scsi_cmd_rsp()
850 ascq = session->tt->check_protection(task, &sector); iscsi_scsi_cmd_rsp()
920 sc, sc->result, task->itt); iscsi_scsi_cmd_rsp()
922 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); iscsi_scsi_cmd_rsp()
929 * @task: scsi command task
933 struct iscsi_task *task) iscsi_data_in_rsp()
936 struct scsi_cmnd *sc = task->sc; iscsi_data_in_rsp()
958 sc, sc->result, task->itt); iscsi_data_in_rsp()
960 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); iscsi_data_in_rsp()
985 struct iscsi_task *task; iscsi_send_nopout() local
1001 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); iscsi_send_nopout()
1002 if (!task) iscsi_send_nopout()
1006 conn->ping_task = task; iscsi_send_nopout()
1011 static int iscsi_nop_out_rsp(struct iscsi_task *task, iscsi_nop_out_rsp() argument
1014 struct iscsi_conn *conn = task->conn; iscsi_nop_out_rsp()
1017 if (conn->ping_task != task) { iscsi_nop_out_rsp()
1027 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); iscsi_nop_out_rsp()
1087 struct iscsi_task *task; iscsi_handle_reject() local
1092 task = iscsi_itt_to_task(conn, rejected_pdu.itt); iscsi_handle_reject()
1093 if (!task) { iscsi_handle_reject()
1096 "not lookup rejected task.\n"); iscsi_handle_reject()
1099 rc = iscsi_nop_out_rsp(task, iscsi_handle_reject()
1115 * iscsi_itt_to_task - look up task by itt
1159 struct iscsi_task *task; __iscsi_complete_pdu() local
1213 task = iscsi_itt_to_ctask(conn, hdr->itt); __iscsi_complete_pdu()
1214 if (!task) __iscsi_complete_pdu()
1216 task->last_xfer = jiffies; __iscsi_complete_pdu()
1228 task = iscsi_itt_to_task(conn, hdr->itt); __iscsi_complete_pdu()
1229 if (!task) __iscsi_complete_pdu()
1238 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); __iscsi_complete_pdu()
1241 iscsi_data_in_rsp(conn, hdr, task); __iscsi_complete_pdu()
1267 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); __iscsi_complete_pdu()
1277 rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr, __iscsi_complete_pdu()
1290 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); __iscsi_complete_pdu()
1350 struct iscsi_task *task; iscsi_itt_to_ctask() local
1355 task = iscsi_itt_to_task(conn, itt); iscsi_itt_to_ctask()
1356 if (!task || !task->sc) iscsi_itt_to_ctask()
1359 if (task->sc->SCp.phase != conn->session->age) { iscsi_itt_to_ctask()
1361 "task's session age %d, expected %d\n", iscsi_itt_to_ctask()
1362 task->sc->SCp.phase, conn->session->age); iscsi_itt_to_ctask()
1366 return task; iscsi_itt_to_ctask()
1439 struct iscsi_task *task = conn->task; iscsi_xmit_task() local
1445 __iscsi_get_task(task); iscsi_xmit_task()
1447 rc = conn->session->tt->xmit_task(task); iscsi_xmit_task()
1450 /* done with this task */ iscsi_xmit_task()
1451 task->last_xfer = jiffies; iscsi_xmit_task()
1452 conn->task = NULL; iscsi_xmit_task()
1456 __iscsi_put_task(task); iscsi_xmit_task()
1462 * iscsi_requeue_task - requeue task to run from session workqueue
1463 * @task: task to requeue
1465 * LLDs that need to run a task from the session workqueue should call
1469 void iscsi_requeue_task(struct iscsi_task *task) iscsi_requeue_task() argument
1471 struct iscsi_conn *conn = task->conn; iscsi_requeue_task()
1477 if (list_empty(&task->running)) iscsi_requeue_task()
1478 list_add_tail(&task->running, &conn->requeue); iscsi_requeue_task()
1494 struct iscsi_task *task; iscsi_data_xmit() local
1504 if (conn->task) { iscsi_data_xmit()
1517 conn->task = list_entry(conn->mgmtqueue.next, iscsi_data_xmit()
1519 list_del_init(&conn->task->running); iscsi_data_xmit()
1520 if (iscsi_prep_mgmt_task(conn, conn->task)) { iscsi_data_xmit()
1523 __iscsi_put_task(conn->task); iscsi_data_xmit()
1525 conn->task = NULL; iscsi_data_xmit()
1535 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, iscsi_data_xmit()
1537 list_del_init(&conn->task->running); iscsi_data_xmit()
1539 fail_scsi_task(conn->task, DID_IMM_RETRY); iscsi_data_xmit()
1542 rc = iscsi_prep_scsi_cmd_pdu(conn->task); iscsi_data_xmit()
1545 list_add_tail(&conn->task->running, iscsi_data_xmit()
1547 conn->task = NULL; iscsi_data_xmit()
1550 fail_scsi_task(conn->task, DID_ABORT); iscsi_data_xmit()
1557 * we could continuously get new task requests so iscsi_data_xmit()
1572 task = list_entry(conn->requeue.next, struct iscsi_task, iscsi_data_xmit()
1574 if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) iscsi_data_xmit()
1577 conn->task = task; iscsi_data_xmit()
1578 list_del_init(&conn->task->running); iscsi_data_xmit()
1579 conn->task->state = ISCSI_TASK_RUNNING; iscsi_data_xmit()
1610 struct iscsi_task *task; iscsi_alloc_task() local
1613 (void *) &task, sizeof(void *))) iscsi_alloc_task()
1617 sc->SCp.ptr = (char *) task; iscsi_alloc_task()
1619 atomic_set(&task->refcount, 1); iscsi_alloc_task()
1620 task->state = ISCSI_TASK_PENDING; iscsi_alloc_task()
1621 task->conn = conn; iscsi_alloc_task()
1622 task->sc = sc; iscsi_alloc_task()
1623 task->have_checked_conn = false; iscsi_alloc_task()
1624 task->last_timeout = jiffies; iscsi_alloc_task()
1625 task->last_xfer = jiffies; iscsi_alloc_task()
1626 task->protected = false; iscsi_alloc_task()
1627 INIT_LIST_HEAD(&task->running); iscsi_alloc_task()
1628 return task; iscsi_alloc_task()
1651 struct iscsi_task *task = NULL; iscsi_queuecommand() local
1718 task = iscsi_alloc_task(conn, sc); iscsi_queuecommand()
1719 if (!task) { iscsi_queuecommand()
1725 reason = iscsi_prep_scsi_cmd_pdu(task); iscsi_queuecommand()
1735 if (session->tt->xmit_task(task)) { iscsi_queuecommand()
1741 list_add_tail(&task->running, &conn->cmdqueue); iscsi_queuecommand()
1750 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); iscsi_queuecommand()
1758 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); iscsi_queuecommand()
1804 struct iscsi_task *task; iscsi_exec_task_mgmt_fn() local
1806 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, iscsi_exec_task_mgmt_fn()
1808 if (!task) { iscsi_exec_task_mgmt_fn()
1842 /* if the session drops it will clean up the task */ iscsi_exec_task_mgmt_fn()
1855 struct iscsi_task *task; fail_scsi_tasks() local
1859 task = conn->session->cmds[i]; fail_scsi_tasks()
1860 if (!task->sc || task->state == ISCSI_TASK_FREE) fail_scsi_tasks()
1863 if (lun != -1 && lun != task->sc->device->lun) fail_scsi_tasks()
1868 task->sc, task->itt, task->state); fail_scsi_tasks()
1869 fail_scsi_task(task, error); fail_scsi_tasks()
1936 struct iscsi_task *task = NULL, *running_task; iscsi_eh_cmd_timed_out() local
1948 task = (struct iscsi_task *)sc->SCp.ptr; iscsi_eh_cmd_timed_out()
1949 if (!task) { iscsi_eh_cmd_timed_out()
1976 * recvd one for the task since the last timeout ask for iscsi_eh_cmd_timed_out()
1978 * we can check if it is the task or connection when we send the iscsi_eh_cmd_timed_out()
1981 if (time_after(task->last_xfer, task->last_timeout)) { iscsi_eh_cmd_timed_out()
1985 "%lu\n.", task->last_xfer, task->last_timeout); iscsi_eh_cmd_timed_out()
1986 task->have_checked_conn = false; iscsi_eh_cmd_timed_out()
2004 if (!running_task->sc || running_task == task || iscsi_eh_cmd_timed_out()
2013 task->sc->jiffies_at_alloc)) iscsi_eh_cmd_timed_out()
2016 if (time_after(running_task->last_xfer, task->last_timeout)) { iscsi_eh_cmd_timed_out()
2018 * This task has not made progress, but a task iscsi_eh_cmd_timed_out()
2030 "complete. Our last xfer vs running task " iscsi_eh_cmd_timed_out()
2032 task->last_xfer, running_task->last_xfer, iscsi_eh_cmd_timed_out()
2033 task->last_timeout); iscsi_eh_cmd_timed_out()
2040 if (task->have_checked_conn) iscsi_eh_cmd_timed_out()
2048 task->have_checked_conn = true; iscsi_eh_cmd_timed_out()
2055 task->have_checked_conn = true; iscsi_eh_cmd_timed_out()
2059 if (task) iscsi_eh_cmd_timed_out()
2060 task->last_timeout = jiffies; iscsi_eh_cmd_timed_out()
2109 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, iscsi_prep_abort_task_pdu() argument
2116 hdr->lun = task->lun; iscsi_prep_abort_task_pdu()
2117 hdr->rtt = task->hdr_itt; iscsi_prep_abort_task_pdu()
2118 hdr->refcmdsn = task->cmdsn; iscsi_prep_abort_task_pdu()
2126 struct iscsi_task *task; iscsi_eh_abort() local
2166 task = (struct iscsi_task *)sc->SCp.ptr; iscsi_eh_abort()
2168 sc, task->itt); iscsi_eh_abort()
2170 /* task completed before time out */ iscsi_eh_abort()
2171 if (!task->sc) { iscsi_eh_abort()
2176 if (task->state == ISCSI_TASK_PENDING) { iscsi_eh_abort()
2177 fail_scsi_task(task, DID_ABORT); iscsi_eh_abort()
2187 iscsi_prep_abort_task_pdu(task, hdr); iscsi_eh_abort()
2208 fail_scsi_task(task, DID_ABORT); iscsi_eh_abort()
2222 /* task completed before tmf abort response */ iscsi_eh_abort()
2237 sc, task->itt); iscsi_eh_abort()
2245 task ? task->itt : 0); iscsi_eh_abort()
2705 * @cmd_task_size: LLD task private data size
2800 struct iscsi_task *task = session->cmds[cmd_i]; iscsi_session_setup() local
2803 task->dd_data = &task[1]; iscsi_session_setup()
2804 task->itt = cmd_i; iscsi_session_setup()
2805 task->state = ISCSI_TASK_FREE; iscsi_session_setup()
2806 INIT_LIST_HEAD(&task->running); iscsi_session_setup()
3052 struct iscsi_task *task; fail_mgmt_tasks() local
3056 task = conn->session->cmds[i]; fail_mgmt_tasks()
3057 if (task->sc) fail_mgmt_tasks()
3060 if (task->state == ISCSI_TASK_FREE) fail_mgmt_tasks()
3065 task->itt, task->state); fail_mgmt_tasks()
3067 if (task->state == ISCSI_TASK_PENDING) fail_mgmt_tasks()
3069 iscsi_complete_task(task, state); fail_mgmt_tasks()
3089 * up the login task and connection. We do not need to block and set iscsi_start_session_recovery()
648 iscsi_prep_mgmt_task(struct iscsi_conn *conn, struct iscsi_task *task) iscsi_prep_mgmt_task() argument
826 iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task, char *data, int datalen) iscsi_scsi_cmd_rsp() argument
932 iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task) iscsi_data_in_rsp() argument
H A Dlibiscsi_tcp.c447 * @task: iscsi task
451 void iscsi_tcp_cleanup_task(struct iscsi_task *task) iscsi_tcp_cleanup_task() argument
453 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_cleanup_task()
457 if (!task->sc) iscsi_tcp_cleanup_task()
461 /* flush task's r2t queues */ iscsi_tcp_cleanup_task()
465 ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n"); iscsi_tcp_cleanup_task()
481 * @task: scsi command task
483 static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) iscsi_tcp_data_in() argument
486 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_data_in()
489 unsigned total_in_length = scsi_in(task->sc)->length; iscsi_tcp_data_in()
502 ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->datasn(%d)" iscsi_tcp_data_in()
524 * @task: scsi command task
526 static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) iscsi_tcp_r2t_rsp() argument
529 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_r2t_rsp()
546 ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n", iscsi_tcp_r2t_rsp()
551 /* fill-in new R2T associated with the task */ iscsi_tcp_r2t_rsp()
554 if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) { iscsi_tcp_r2t_rsp()
557 task->itt); iscsi_tcp_r2t_rsp()
574 if (data_offset + data_length > scsi_out(task->sc)->length) { iscsi_tcp_r2t_rsp()
578 data_offset, scsi_out(task->sc)->length); iscsi_tcp_r2t_rsp()
605 iscsi_requeue_task(task); iscsi_tcp_r2t_rsp()
649 struct iscsi_task *task; iscsi_tcp_hdr_dissect() local
677 task = iscsi_itt_to_ctask(conn, hdr->itt); iscsi_tcp_hdr_dissect()
678 if (!task) iscsi_tcp_hdr_dissect()
681 rc = iscsi_tcp_data_in(conn, task); iscsi_tcp_hdr_dissect()
688 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_hdr_dissect()
690 struct scsi_data_buffer *sdb = scsi_in(task->sc); iscsi_tcp_hdr_dissect()
708 task->last_xfer = jiffies; iscsi_tcp_hdr_dissect()
731 task = iscsi_itt_to_ctask(conn, hdr->itt); iscsi_tcp_hdr_dissect()
733 if (!task) iscsi_tcp_hdr_dissect()
737 else if (task->sc->sc_data_direction == DMA_TO_DEVICE) { iscsi_tcp_hdr_dissect()
738 task->last_xfer = jiffies; iscsi_tcp_hdr_dissect()
740 rc = iscsi_tcp_r2t_rsp(conn, task); iscsi_tcp_hdr_dissect()
954 * @task: scsi command task
957 int iscsi_tcp_task_init(struct iscsi_task *task) iscsi_tcp_task_init() argument
959 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_task_init()
960 struct iscsi_conn *conn = task->conn; iscsi_tcp_task_init()
961 struct scsi_cmnd *sc = task->sc; iscsi_tcp_task_init()
969 ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt); iscsi_tcp_task_init()
971 return conn->session->tt->init_pdu(task, 0, task->data_count); iscsi_tcp_task_init()
978 ISCSI_DBG_TCP(conn, "task deq [itt 0x%x imm %d unsol %d]\n", iscsi_tcp_task_init()
979 task->itt, task->imm_count, task->unsol_r2t.data_length); iscsi_tcp_task_init()
981 err = conn->session->tt->init_pdu(task, 0, task->imm_count); iscsi_tcp_task_init()
984 task->imm_count = 0; iscsi_tcp_task_init()
989 static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) iscsi_tcp_get_curr_r2t() argument
991 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_get_curr_r2t()
994 if (iscsi_task_has_unsol_data(task)) iscsi_tcp_get_curr_r2t()
995 r2t = &task->unsol_r2t; iscsi_tcp_get_curr_r2t()
1002 ISCSI_DBG_TCP(task->conn, iscsi_tcp_get_curr_r2t()
1026 * iscsi_tcp_task_xmit - xmit normal PDU task
1027 * @task: iscsi command task
1033 int iscsi_tcp_task_xmit(struct iscsi_task *task) iscsi_tcp_task_xmit() argument
1035 struct iscsi_conn *conn = task->conn; iscsi_tcp_task_xmit()
1042 rc = session->tt->xmit_pdu(task); iscsi_tcp_task_xmit()
1047 if (!task->sc) { iscsi_tcp_task_xmit()
1048 if (task->hdr->itt == RESERVED_ITT) iscsi_tcp_task_xmit()
1049 iscsi_put_task(task); iscsi_tcp_task_xmit()
1054 if (task->sc->sc_data_direction != DMA_TO_DEVICE) iscsi_tcp_task_xmit()
1057 r2t = iscsi_tcp_get_curr_r2t(task); iscsi_tcp_task_xmit()
1064 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT); iscsi_tcp_task_xmit()
1067 iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr); iscsi_tcp_task_xmit()
1070 r2t, r2t->datasn - 1, task->hdr->itt, iscsi_tcp_task_xmit()
1073 rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent, iscsi_tcp_task_xmit()
1124 * initialize per-task: R2T pool and xmit queue iscsi_tcp_r2tpool_alloc()
1127 struct iscsi_task *task = session->cmds[cmd_i]; iscsi_tcp_r2tpool_alloc() local
1128 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_r2tpool_alloc()
1157 struct iscsi_task *task = session->cmds[i]; iscsi_tcp_r2tpool_alloc() local
1158 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_r2tpool_alloc()
1172 struct iscsi_task *task = session->cmds[i]; iscsi_tcp_r2tpool_free() local
1173 struct iscsi_tcp_task *tcp_task = task->dd_data; iscsi_tcp_r2tpool_free()
/linux-4.1.27/arch/parisc/kernel/
H A Dptrace.c38 void ptrace_disable(struct task_struct *task) ptrace_disable() argument
40 clear_tsk_thread_flag(task, TIF_SINGLESTEP); ptrace_disable()
41 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); ptrace_disable()
44 pa_psw(task)->r = 0; ptrace_disable()
45 pa_psw(task)->t = 0; ptrace_disable()
46 pa_psw(task)->h = 0; ptrace_disable()
47 pa_psw(task)->l = 0; ptrace_disable()
54 void user_disable_single_step(struct task_struct *task) user_disable_single_step() argument
56 ptrace_disable(task); user_disable_single_step()
59 void user_enable_single_step(struct task_struct *task) user_enable_single_step() argument
61 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); user_enable_single_step()
62 set_tsk_thread_flag(task, TIF_SINGLESTEP); user_enable_single_step()
64 if (pa_psw(task)->n) { user_enable_single_step()
68 task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; user_enable_single_step()
69 task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; user_enable_single_step()
70 task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; user_enable_single_step()
71 pa_psw(task)->n = 0; user_enable_single_step()
72 pa_psw(task)->x = 0; user_enable_single_step()
73 pa_psw(task)->y = 0; user_enable_single_step()
74 pa_psw(task)->z = 0; user_enable_single_step()
75 pa_psw(task)->b = 0; user_enable_single_step()
76 ptrace_disable(task); user_enable_single_step()
77 /* Don't wake up the task, but let the user_enable_single_step()
80 si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3); user_enable_single_step()
83 force_sig_info(SIGTRAP, &si, task); user_enable_single_step()
84 /* notify_parent(task, SIGCHLD); */ user_enable_single_step()
89 * itself will be set to zero on a task switch. If the user_enable_single_step()
90 * task is suspended on a syscall then the syscall return user_enable_single_step()
96 pa_psw(task)->r = 1; user_enable_single_step()
97 pa_psw(task)->t = 0; user_enable_single_step()
98 pa_psw(task)->h = 0; user_enable_single_step()
99 pa_psw(task)->l = 0; user_enable_single_step()
102 void user_enable_block_step(struct task_struct *task) user_enable_block_step() argument
104 clear_tsk_thread_flag(task, TIF_SINGLESTEP); user_enable_block_step()
105 set_tsk_thread_flag(task, TIF_BLOCKSTEP); user_enable_block_step()
108 pa_psw(task)->r = 0; user_enable_block_step()
109 pa_psw(task)->t = 1; user_enable_block_step()
110 pa_psw(task)->h = 0; user_enable_block_step()
111 pa_psw(task)->l = 0; user_enable_block_step()
H A Dstacktrace.c15 static void dump_trace(struct task_struct *task, struct stack_trace *trace) dump_trace() argument
20 if (task == current) { dump_trace()
29 unwind_frame_init(&info, task, &r); dump_trace()
31 unwind_frame_init_from_blocked_task(&info, task); dump_trace()
/linux-4.1.27/arch/powerpc/kernel/
H A Dsignal.h25 struct task_struct *task);
27 struct task_struct *task);
28 extern unsigned long copy_fpr_from_user(struct task_struct *task,
30 extern unsigned long copy_transact_fpr_from_user(struct task_struct *task,
34 struct task_struct *task);
36 struct task_struct *task);
37 extern unsigned long copy_vsx_from_user(struct task_struct *task,
39 extern unsigned long copy_transact_vsx_from_user(struct task_struct *task,
/linux-4.1.27/arch/blackfin/include/asm/
H A Dsyscall.h26 syscall_get_nr(struct task_struct *task, struct pt_regs *regs) syscall_get_nr() argument
32 syscall_rollback(struct task_struct *task, struct pt_regs *regs) syscall_rollback() argument
38 syscall_get_error(struct task_struct *task, struct pt_regs *regs) syscall_get_error() argument
44 syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) syscall_get_return_value() argument
50 syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, syscall_set_return_value() argument
58 * @task: unused
67 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, syscall_get_arguments() argument
85 syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, syscall_set_arguments() argument
H A Dptrace.h26 * Get the address of the live pt_regs for the specified task.
34 #define task_pt_regs(task) \
36 ((unsigned long)task_stack_page(task) + \
H A Dswitch_to.h14 * switch_to(n) should switch tasks to task ptr, first checking that
15 * ptr isn't the current task, in which case it does nothing.
H A Dthread_info.h34 * low level task data.
39 struct task_struct *task; /* main task structure */ member in struct:thread_info
54 .task = &tsk, \
62 /* Given a task stack pointer, you can find its corresponding
/linux-4.1.27/arch/arm/include/asm/
H A Dsyscall.h21 static inline int syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
24 return task_thread_info(task)->syscall; syscall_get_nr()
27 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
33 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
40 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
46 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
55 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
82 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
/linux-4.1.27/security/apparmor/include/
H A Dcontext.h63 * @previous: profile the task may return to (MAYBE NULL)
64 * @token: magic value the task must know for returning to @previous_profile
66 * Contains the task's current profile (which could change due to
69 * TODO: make so a task can be confined by a stack of contexts
86 struct aa_profile *aa_get_task_profile(struct task_struct *task);
105 * __aa_task_profile - retrieve another task's profile
106 * @task: task to query (NOT NULL)
108 * Returns: @task's profile without incrementing its ref count
110 * If @task != current needs to be called in RCU safe critical section
112 static inline struct aa_profile *__aa_task_profile(struct task_struct *task) __aa_task_profile() argument
114 return aa_cred_profile(__task_cred(task)); __aa_task_profile()
118 * __aa_task_is_confined - determine if @task has any confinement
119 * @task: task to check confinement of (NOT NULL)
121 * If @task != current needs to be called in RCU safe critical section
123 static inline bool __aa_task_is_confined(struct task_struct *task) __aa_task_is_confined() argument
125 return !unconfined(__aa_task_profile(task)); __aa_task_is_confined()
167 * @cxt: task context to clear (NOT NULL)
H A Dresource.h27 * @limits: rlimit values that override task limits
29 * AppArmor rlimits are used to set confined task rlimits. Only the
/linux-4.1.27/drivers/video/fbdev/
H A Duvesafb.c65 * find the kernel part of the task struct, copy the registers and
66 * the buffer contents and then complete the task.
71 struct uvesafb_ktask *task; uvesafb_cn_callback() local
80 task = uvfb_tasks[msg->seq]; uvesafb_cn_callback()
82 if (!task || msg->ack != task->ack) { uvesafb_cn_callback()
90 if (task->t.buf_len < utask->buf_len || uvesafb_cn_callback()
99 memcpy(&task->t, utask, sizeof(*utask)); uvesafb_cn_callback()
101 if (task->t.buf_len && task->buf) uvesafb_cn_callback()
102 memcpy(task->buf, utask + 1, task->t.buf_len); uvesafb_cn_callback()
104 complete(task->done); uvesafb_cn_callback()
125 * Execute a uvesafb task.
127 * Returns 0 if the task is executed successfully.
140 static int uvesafb_exec(struct uvesafb_ktask *task) uvesafb_exec() argument
145 int len = sizeof(task->t) + task->t.buf_len; uvesafb_exec()
153 "can't execute task\n", (int)(sizeof(*m) + len)); uvesafb_exec()
161 init_completion(task->done); uvesafb_exec()
169 memcpy(m + 1, &task->t, sizeof(task->t)); uvesafb_exec()
172 memcpy((u8 *)(m + 1) + sizeof(task->t), task->buf, task->t.buf_len); uvesafb_exec()
176 * part of this task when a reply is received from userspace. uvesafb_exec()
178 task->ack = m->ack; uvesafb_exec()
189 /* Save a pointer to the kernel part of the task struct. */ uvesafb_exec()
190 uvfb_tasks[seq] = task; uvesafb_exec()
214 if (!err && !(task->t.flags & TF_EXIT)) uvesafb_exec()
215 err = !wait_for_completion_timeout(task->done, uvesafb_exec()
233 static void uvesafb_free(struct uvesafb_ktask *task) uvesafb_free() argument
235 if (task) { uvesafb_free()
236 kfree(task->done); uvesafb_free()
237 kfree(task); uvesafb_free()
244 static void uvesafb_reset(struct uvesafb_ktask *task) uvesafb_reset() argument
246 struct completion *cpl = task->done; uvesafb_reset()
248 memset(task, 0, sizeof(*task)); uvesafb_reset()
249 task->done = cpl; uvesafb_reset()
257 struct uvesafb_ktask *task; uvesafb_prep() local
259 task = kzalloc(sizeof(*task), GFP_KERNEL); uvesafb_prep()
260 if (task) { uvesafb_prep()
261 task->done = kzalloc(sizeof(*task->done), GFP_KERNEL); uvesafb_prep()
262 if (!task->done) { uvesafb_prep()
263 kfree(task); uvesafb_prep()
264 task = NULL; uvesafb_prep()
267 return task; uvesafb_prep()
352 struct uvesafb_ktask *task; uvesafb_vbe_state_save() local
363 task = uvesafb_prep(); uvesafb_vbe_state_save()
364 if (!task) { uvesafb_vbe_state_save()
369 task->t.regs.eax = 0x4f04; uvesafb_vbe_state_save()
370 task->t.regs.ecx = 0x000f; uvesafb_vbe_state_save()
371 task->t.regs.edx = 0x0001; uvesafb_vbe_state_save()
372 task->t.flags = TF_BUF_RET | TF_BUF_ESBX; uvesafb_vbe_state_save()
373 task->t.buf_len = par->vbe_state_size; uvesafb_vbe_state_save()
374 task->buf = state; uvesafb_vbe_state_save()
375 err = uvesafb_exec(task); uvesafb_vbe_state_save()
377 if (err || (task->t.regs.eax & 0xffff) != 0x004f) { uvesafb_vbe_state_save()
380 task->t.regs.eax, err); uvesafb_vbe_state_save()
385 uvesafb_free(task); uvesafb_vbe_state_save()
391 struct uvesafb_ktask *task; uvesafb_vbe_state_restore() local
397 task = uvesafb_prep(); uvesafb_vbe_state_restore()
398 if (!task) uvesafb_vbe_state_restore()
401 task->t.regs.eax = 0x4f04; uvesafb_vbe_state_restore()
402 task->t.regs.ecx = 0x000f; uvesafb_vbe_state_restore()
403 task->t.regs.edx = 0x0002; uvesafb_vbe_state_restore()
404 task->t.buf_len = par->vbe_state_size; uvesafb_vbe_state_restore()
405 task->t.flags = TF_BUF_ESBX; uvesafb_vbe_state_restore()
406 task->buf = state_buf; uvesafb_vbe_state_restore()
408 err = uvesafb_exec(task); uvesafb_vbe_state_restore()
409 if (err || (task->t.regs.eax & 0xffff) != 0x004f) uvesafb_vbe_state_restore()
412 task->t.regs.eax, err); uvesafb_vbe_state_restore()
414 uvesafb_free(task); uvesafb_vbe_state_restore()
417 static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task, uvesafb_vbe_getinfo() argument
422 task->t.regs.eax = 0x4f00; uvesafb_vbe_getinfo()
423 task->t.flags = TF_VBEIB; uvesafb_vbe_getinfo()
424 task->t.buf_len = sizeof(struct vbe_ib); uvesafb_vbe_getinfo()
425 task->buf = &par->vbe_ib; uvesafb_vbe_getinfo()
428 err = uvesafb_exec(task); uvesafb_vbe_getinfo()
429 if (err || (task->t.regs.eax & 0xffff) != 0x004f) { uvesafb_vbe_getinfo()
431 "(eax=0x%x, err=%d)\n", (u32)task->t.regs.eax, uvesafb_vbe_getinfo()
456 ((char *)task->buf) + par->vbe_ib.oem_vendor_name_ptr); uvesafb_vbe_getinfo()
460 ((char *)task->buf) + par->vbe_ib.oem_product_name_ptr); uvesafb_vbe_getinfo()
464 ((char *)task->buf) + par->vbe_ib.oem_product_rev_ptr); uvesafb_vbe_getinfo()
468 ((char *)task->buf) + par->vbe_ib.oem_string_ptr); uvesafb_vbe_getinfo()
476 static int uvesafb_vbe_getmodes(struct uvesafb_ktask *task, uvesafb_vbe_getmodes() argument
501 uvesafb_reset(task); uvesafb_vbe_getmodes()
502 task->t.regs.eax = 0x4f01; uvesafb_vbe_getmodes()
503 task->t.regs.ecx = (u32) *mode; uvesafb_vbe_getmodes()
504 task->t.flags = TF_BUF_RET | TF_BUF_ESDI; uvesafb_vbe_getmodes()
505 task->t.buf_len = sizeof(struct vbe_mode_ib); uvesafb_vbe_getmodes()
506 task->buf = par->vbe_modes + off; uvesafb_vbe_getmodes()
508 err = uvesafb_exec(task); uvesafb_vbe_getmodes()
509 if (err || (task->t.regs.eax & 0xffff) != 0x004f) { uvesafb_vbe_getmodes()
512 *mode, (u32)task->t.regs.eax, err); uvesafb_vbe_getmodes()
518 mib = task->buf; uvesafb_vbe_getmodes()
555 static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task, uvesafb_vbe_getpmi() argument
560 uvesafb_reset(task); uvesafb_vbe_getpmi()
561 task->t.regs.eax = 0x4f0a; uvesafb_vbe_getpmi()
562 task->t.regs.ebx = 0x0; uvesafb_vbe_getpmi()
563 err = uvesafb_exec(task); uvesafb_vbe_getpmi()
565 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) { uvesafb_vbe_getpmi()
568 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4) uvesafb_vbe_getpmi()
569 + task->t.regs.edi); uvesafb_vbe_getpmi()
574 (u16)task->t.regs.es, (u16)task->t.regs.edi); uvesafb_vbe_getpmi()
617 static int uvesafb_vbe_getedid(struct uvesafb_ktask *task, struct fb_info *info) uvesafb_vbe_getedid() argument
625 task->t.regs.eax = 0x4f15; uvesafb_vbe_getedid()
626 task->t.regs.ebx = 0; uvesafb_vbe_getedid()
627 task->t.regs.ecx = 0; uvesafb_vbe_getedid()
628 task->t.buf_len = 0; uvesafb_vbe_getedid()
629 task->t.flags = 0; uvesafb_vbe_getedid()
631 err = uvesafb_exec(task); uvesafb_vbe_getedid()
633 if ((task->t.regs.eax & 0xffff) != 0x004f || err) uvesafb_vbe_getedid()
636 if ((task->t.regs.ebx & 0x3) == 3) { uvesafb_vbe_getedid()
639 } else if ((task->t.regs.ebx & 0x3) == 2) { uvesafb_vbe_getedid()
642 } else if ((task->t.regs.ebx & 0x3) == 1) { uvesafb_vbe_getedid()
651 task->t.regs.eax = 0x4f15; uvesafb_vbe_getedid()
652 task->t.regs.ebx = 1; uvesafb_vbe_getedid()
653 task->t.regs.ecx = task->t.regs.edx = 0; uvesafb_vbe_getedid()
654 task->t.flags = TF_BUF_RET | TF_BUF_ESDI; uvesafb_vbe_getedid()
655 task->t.buf_len = EDID_LENGTH; uvesafb_vbe_getedid()
656 task->buf = kzalloc(EDID_LENGTH, GFP_KERNEL); uvesafb_vbe_getedid()
657 if (!task->buf) uvesafb_vbe_getedid()
660 err = uvesafb_exec(task); uvesafb_vbe_getedid()
662 if ((task->t.regs.eax & 0xffff) == 0x004f && !err) { uvesafb_vbe_getedid()
663 fb_edid_to_monspecs(task->buf, &info->monspecs); uvesafb_vbe_getedid()
678 kfree(task->buf); uvesafb_vbe_getedid()
682 static void uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task, uvesafb_vbe_getmonspecs() argument
695 if (uvesafb_vbe_getedid(task, info)) { uvesafb_vbe_getmonspecs()
763 static void uvesafb_vbe_getstatesize(struct uvesafb_ktask *task, uvesafb_vbe_getstatesize() argument
768 uvesafb_reset(task); uvesafb_vbe_getstatesize()
774 task->t.regs.eax = 0x4f04; uvesafb_vbe_getstatesize()
775 task->t.regs.ecx = 0x000f; uvesafb_vbe_getstatesize()
776 task->t.regs.edx = 0x0000; uvesafb_vbe_getstatesize()
777 task->t.flags = 0; uvesafb_vbe_getstatesize()
779 err = uvesafb_exec(task); uvesafb_vbe_getstatesize()
781 if (err || (task->t.regs.eax & 0xffff) != 0x004f) { uvesafb_vbe_getstatesize()
784 task->t.regs.eax, err); uvesafb_vbe_getstatesize()
789 par->vbe_state_size = 64 * (task->t.regs.ebx & 0xffff); uvesafb_vbe_getstatesize()
794 struct uvesafb_ktask *task = NULL; uvesafb_vbe_init() local
798 task = uvesafb_prep(); uvesafb_vbe_init()
799 if (!task) uvesafb_vbe_init()
802 err = uvesafb_vbe_getinfo(task, par); uvesafb_vbe_init()
806 err = uvesafb_vbe_getmodes(task, par); uvesafb_vbe_init()
821 uvesafb_vbe_getpmi(task, par); uvesafb_vbe_init()
830 uvesafb_vbe_getmonspecs(task, info); uvesafb_vbe_init()
831 uvesafb_vbe_getstatesize(task, par); uvesafb_vbe_init()
833 out: uvesafb_free(task); uvesafb_vbe_init()
938 struct uvesafb_ktask *task; uvesafb_setpalette() local
979 task = uvesafb_prep(); uvesafb_setpalette()
980 if (!task) uvesafb_setpalette()
983 task->t.regs.eax = 0x4f09; uvesafb_setpalette()
984 task->t.regs.ebx = 0x0; uvesafb_setpalette()
985 task->t.regs.ecx = count; uvesafb_setpalette()
986 task->t.regs.edx = start; uvesafb_setpalette()
987 task->t.flags = TF_BUF_ESDI; uvesafb_setpalette()
988 task->t.buf_len = sizeof(struct uvesafb_pal_entry) * count; uvesafb_setpalette()
989 task->buf = entries; uvesafb_setpalette()
991 err = uvesafb_exec(task); uvesafb_setpalette()
992 if ((task->t.regs.eax & 0xffff) != 0x004f) uvesafb_setpalette()
995 uvesafb_free(task); uvesafb_setpalette()
1118 struct uvesafb_ktask *task; uvesafb_blank() local
1148 task = uvesafb_prep(); uvesafb_blank()
1149 if (!task) uvesafb_blank()
1152 task->t.regs.eax = 0x4f10; uvesafb_blank()
1155 task->t.regs.ebx = 0x0001; uvesafb_blank()
1158 task->t.regs.ebx = 0x0101; /* standby */ uvesafb_blank()
1161 task->t.regs.ebx = 0x0401; /* powerdown */ uvesafb_blank()
1167 err = uvesafb_exec(task); uvesafb_blank()
1168 if (err || (task->t.regs.eax & 0xffff) != 0x004f) uvesafb_blank()
1170 out: uvesafb_free(task); uvesafb_blank()
1197 struct uvesafb_ktask *task = NULL; uvesafb_release() local
1207 task = uvesafb_prep(); uvesafb_release()
1208 if (!task) uvesafb_release()
1212 task->t.regs.eax = 0x0003; uvesafb_release()
1213 uvesafb_exec(task); uvesafb_release()
1222 uvesafb_free(task); uvesafb_release()
1229 struct uvesafb_ktask *task = NULL; uvesafb_set_par() local
1245 task = uvesafb_prep(); uvesafb_set_par()
1246 if (!task) uvesafb_set_par()
1249 task->t.regs.eax = 0x4f02; uvesafb_set_par()
1250 task->t.regs.ebx = mode->mode_id | 0x4000; /* use LFB */ uvesafb_set_par()
1254 task->t.regs.ebx |= 0x0800; /* use CRTC data */ uvesafb_set_par()
1255 task->t.flags = TF_BUF_ESDI; uvesafb_set_par()
1286 task->t.buf_len = sizeof(struct vbe_crtc_ib); uvesafb_set_par()
1287 task->buf = &par->crtc; uvesafb_set_par()
1289 err = uvesafb_exec(task); uvesafb_set_par()
1290 if (err || (task->t.regs.eax & 0xffff) != 0x004f) { uvesafb_set_par()
1298 "default timings.\n", task->t.regs.eax, err); uvesafb_set_par()
1299 uvesafb_reset(task); uvesafb_set_par()
1306 "0x%x, err=%d)\n", task->t.regs.eax, err); uvesafb_set_par()
1316 uvesafb_reset(task); uvesafb_set_par()
1317 task->t.regs.eax = 0x4f08; uvesafb_set_par()
1318 task->t.regs.ebx = 0x0800; uvesafb_set_par()
1320 err = uvesafb_exec(task); uvesafb_set_par()
1321 if (err || (task->t.regs.eax & 0xffff) != 0x004f || uvesafb_set_par()
1322 ((task->t.regs.ebx & 0xff00) >> 8) != 8) { uvesafb_set_par()
1335 uvesafb_free(task); uvesafb_set_par()
1946 struct uvesafb_ktask *task; uvesafb_exit() local
1949 task = uvesafb_prep(); uvesafb_exit()
1950 if (task) { uvesafb_exit()
1951 task->t.flags = TF_EXIT; uvesafb_exit()
1952 uvesafb_exec(task); uvesafb_exit()
1953 uvesafb_free(task); uvesafb_exit()
/linux-4.1.27/arch/microblaze/include/asm/
H A Dsyscall.h10 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
16 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
22 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
28 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
34 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
82 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
91 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
H A Dprocessor.h145 # define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE)
146 # define task_regs(task) ((struct pt_regs *)task_tos(task) - 1)
151 # define task_sp(task) (task_regs(task)->r1)
152 # define task_pc(task) (task_regs(task)->pc)
154 # define KSTK_EIP(task) (task_pc(task))
155 # define KSTK_ESP(task) (task_sp(task))
H A Dcurrent.h15 * Register used to hold the current task pointer while in the kernel.
22 * Dedicate r31 to keeping the current task pointer
H A Dunwind.h26 void microblaze_unwind(struct task_struct *task, struct stack_trace *trace);
/linux-4.1.27/arch/microblaze/kernel/
H A Dtraps.c33 void show_stack(struct task_struct *task, unsigned long *sp) show_stack() argument
39 if (task) { show_stack()
41 (task->stack))->cpu_context.r1; show_stack()
70 microblaze_unwind(task, NULL); show_stack()
73 if (!task) show_stack()
74 task = current; show_stack()
76 debug_show_held_locks(task); show_stack()
H A Dunwind.c153 static void microblaze_unwind_inner(struct task_struct *task,
163 static inline void unwind_trap(struct task_struct *task, unsigned long pc, unwind_trap() argument
169 static inline void unwind_trap(struct task_struct *task, unsigned long pc, unwind_trap() argument
173 microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace); unwind_trap()
179 * @task : Task whose stack we are to unwind (may be NULL)
187 static void microblaze_unwind_inner(struct task_struct *task, microblaze_unwind_inner() argument
218 microblaze_unwind_inner(task, regs->r17 - 4, microblaze_unwind_inner()
231 unwind_trap(task, pc, fp, trace); microblaze_unwind_inner()
249 if (unlikely(pc == task_pt_regs(task)->pc)) { microblaze_unwind_inner()
252 (unsigned long) task->pid, microblaze_unwind_inner()
253 task->comm); microblaze_unwind_inner()
281 * @task : Task whose stack we are to unwind (NULL == current)
285 void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) microblaze_unwind() argument
287 if (task) { microblaze_unwind()
288 if (task == current) { microblaze_unwind()
289 const struct pt_regs *regs = task_pt_regs(task); microblaze_unwind()
290 microblaze_unwind_inner(task, regs->pc, regs->r1, microblaze_unwind()
294 (struct thread_info *)(task->stack); microblaze_unwind()
298 microblaze_unwind_inner(task, microblaze_unwind()
/linux-4.1.27/arch/sh/include/asm/
H A Dsyscall_64.h10 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
16 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
25 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
31 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
37 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
47 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
56 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
H A Dsyscall_32.h11 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
17 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
26 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
32 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
38 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
48 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
76 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
/linux-4.1.27/arch/openrisc/include/asm/
H A Dsyscall.h27 syscall_get_nr(struct task_struct *task, struct pt_regs *regs) syscall_get_nr() argument
33 syscall_rollback(struct task_struct *task, struct pt_regs *regs) syscall_rollback() argument
39 syscall_get_error(struct task_struct *task, struct pt_regs *regs) syscall_get_error() argument
45 syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) syscall_get_return_value() argument
51 syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, syscall_set_return_value() argument
58 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, syscall_get_arguments() argument
67 syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, syscall_set_arguments() argument
H A Dthread_info.h39 * low level task data that entry.S needs immediate access to
50 struct task_struct *task; /* main task structure */ member in struct:thread_info
74 .task = &tsk, \
88 #define get_thread_info(ti) get_task_struct((ti)->task)
89 #define put_thread_info(ti) put_task_struct((ti)->task)
/linux-4.1.27/arch/c6x/include/asm/
H A Dsyscall.h17 static inline int syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
23 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
29 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
35 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
41 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
48 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
85 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
H A Dthread_info.h39 * low level task data.
42 struct task_struct *task; /* main task structure */ member in struct:thread_info
56 .task = &tsk, \
66 /* get the thread information struct of current task */
77 #define get_thread_info(ti) get_task_struct((ti)->task)
78 #define put_thread_info(ti) put_task_struct((ti)->task)
H A Dprocessor.h77 #define task_pt_regs(task) \
78 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(task)) - 1)
120 #define KSTK_EIP(task) (task_pt_regs(task)->pc)
121 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
/linux-4.1.27/drivers/dma/bestcomm/
H A Dbestcomm.c59 /* Get and reserve a task num */ bcom_task_alloc()
83 /* Get IRQ of that task */ bcom_task_alloc()
123 /* Stop the task */ bcom_task_free()
139 bcom_load_image(int task, u32 *task_image) bcom_load_image() argument
153 if ((task < 0) || (task >= BCOM_MAX_TASKS)) { bcom_load_image()
155 ": Trying to load invalid task %d\n", task); bcom_load_image()
160 tdt = &bcom_eng->tdt[task]; bcom_load_image()
163 desc = bcom_task_desc(task); bcom_load_image()
164 if (hdr->desc_size != bcom_task_num_descs(task)) { bcom_load_image()
166 ": Trying to reload wrong task image " bcom_load_image()
168 task, bcom_load_image()
170 bcom_task_num_descs(task)); bcom_load_image()
184 var = bcom_task_var(task); bcom_load_image()
185 inc = bcom_task_inc(task); bcom_load_image()
204 bcom_set_initiator(int task, int initiator) bcom_set_initiator() argument
211 bcom_set_tcr_initiator(task, initiator); bcom_set_initiator()
217 desc = bcom_task_desc(task); bcom_set_initiator()
219 num_descs = bcom_task_num_descs(task); bcom_set_initiator()
255 /* this will need to be updated if Freescale changes their task code FDT */
278 int task; bcom_engine_init() local
313 for (task=0; task<BCOM_MAX_TASKS; task++) bcom_engine_init()
315 out_be16(&bcom_eng->regs->tcr[task], 0); bcom_engine_init()
316 out_8(&bcom_eng->regs->ipr[task], 0); bcom_engine_init()
318 bcom_eng->tdt[task].context = ctx_pa; bcom_engine_init()
319 bcom_eng->tdt[task].var = var_pa; bcom_engine_init()
320 bcom_eng->tdt[task].fdt = fdt_pa; bcom_engine_init()
344 int task; bcom_engine_cleanup() local
347 for (task=0; task<BCOM_MAX_TASKS; task++) bcom_engine_cleanup()
349 out_be16(&bcom_eng->regs->tcr[task], 0); bcom_engine_cleanup()
350 out_8(&bcom_eng->regs->ipr[task], 0); bcom_engine_cleanup()
H A Data.c2 * Bestcomm ATA task driver
30 /* ata task image */
33 /* ata task vars that need to be set before enabling the task */
35 u32 enable; /* (u16*) address of task's control register */
42 /* ata task incs that need to be set before enabling the task */
154 MODULE_DESCRIPTION("BestComm ATA task driver");
H A Dgen_bd.c37 /* rx task vars that need to be set before enabling the task */
39 u32 enable; /* (u16*) address of task's control register */
47 /* rx task incs that need to be set before enabling the task */
55 /* tx task vars that need to be set before enabling the task */
58 u32 enable; /* (u16*) address of task's control register */
65 /* tx task incs that need to be set before enabling the task */
124 /* Shutdown the task */ bcom_gen_bd_rx_reset()
208 /* Shutdown the task */ bcom_gen_bd_tx_reset()
312 * @psc_num: Number of the PSC to allocate a task for
313 * @queue_len: number of buffer descriptors to allocate for the task
317 * Allocate a bestcomm task structure for receiving data from a PSC.
334 * @psc_num: Number of the PSC to allocate a task for
335 * @queue_len: number of buffer descriptors to allocate for the task
338 * Allocate a bestcomm task structure for transmitting data to a PSC.
H A Dfec.c32 /* rx task vars that need to be set before enabling the task */
34 u32 enable; /* (u16*) address of task's control register */
42 /* rx task incs that need to be set before enabling the task */
52 /* tx task vars that need to be set before enabling the task */
56 u32 enable; /* (u16*) address of task's control register */
63 /* tx task incs that need to be set before enabling the task */
73 /* private structure in the task */
117 /* Shutdown the task */ bcom_fec_rx_reset()
136 inc->incr_dst = sizeof(u32); /* task image, but we stick */ bcom_fec_rx_reset()
218 /* Shutdown the task */ bcom_fec_tx_reset()
237 inc->incr_src = sizeof(u32); /* task image, but we stick */ bcom_fec_tx_reset()
/linux-4.1.27/drivers/scsi/mvsas/
H A Dmv_sas.c28 static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) mvs_find_tag() argument
30 if (task->lldd_task) { mvs_find_tag()
32 slot = task->lldd_task; mvs_find_tag()
320 struct sas_task *task = tei->task; mvs_task_prep_smp() local
322 struct domain_device *dev = task->dev; mvs_task_prep_smp()
338 sg_req = &task->smp_task.smp_req; mvs_task_prep_smp()
344 sg_resp = &task->smp_task.smp_resp; mvs_task_prep_smp()
413 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); mvs_task_prep_smp()
418 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, mvs_task_prep_smp()
421 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, mvs_task_prep_smp()
426 static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) mvs_get_ncq_tag() argument
428 struct ata_queued_cmd *qc = task->uldd_task; mvs_get_ncq_tag()
444 struct sas_task *task = tei->task; mvs_task_prep_ata() local
445 struct domain_device *dev = task->dev; mvs_task_prep_ata()
472 if (task->data_dir == DMA_FROM_DEVICE) mvs_task_prep_ata()
477 if (task->ata_task.use_ncq) mvs_task_prep_ata()
480 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) mvs_task_prep_ata()
486 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) mvs_task_prep_ata()
487 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); mvs_task_prep_ata()
493 hdr->data_len = cpu_to_le32(task->total_xfer_len); mvs_task_prep_ata()
542 if (likely(!task->ata_task.device_control_reg_update)) mvs_task_prep_ata()
543 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ mvs_task_prep_ata()
545 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); mvs_task_prep_ata()
548 task->ata_task.atapi_packet, 16); mvs_task_prep_ata()
558 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); mvs_task_prep_ata()
560 if (task->data_dir == DMA_FROM_DEVICE) mvs_task_prep_ata()
571 struct sas_task *task = tei->task; mvs_task_prep_ssp() local
574 struct domain_device *dev = task->dev; mvs_task_prep_ssp()
599 if (task->ssp_task.enable_first_burst) { mvs_task_prep_ssp()
610 hdr->data_len = cpu_to_le32(task->total_xfer_len); mvs_task_prep_ssp()
681 memcpy(buf_cmd, &task->ssp_task.LUN, 8); mvs_task_prep_ssp()
684 buf_cmd[9] = fburst | task->ssp_task.task_attr | mvs_task_prep_ssp()
685 (task->ssp_task.task_prio << 3); mvs_task_prep_ssp()
686 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, mvs_task_prep_ssp()
687 task->ssp_task.cmd->cmd_len); mvs_task_prep_ssp()
703 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); mvs_task_prep_ssp()
708 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, mvs_task_prep() argument
711 struct domain_device *dev = task->dev; mvs_task_prep()
719 struct task_status_struct *tsm = &task->task_status; mvs_task_prep()
728 task->task_done(task); mvs_task_prep()
745 if (sas_protocol_ata(task->task_proto)) { mvs_task_prep()
746 struct task_status_struct *ts = &task->task_status; mvs_task_prep()
752 task->task_done(task); mvs_task_prep()
755 struct task_status_struct *ts = &task->task_status; mvs_task_prep()
760 task->task_done(task); mvs_task_prep()
765 if (!sas_protocol_ata(task->task_proto)) { mvs_task_prep()
766 if (task->num_scatter) { mvs_task_prep()
768 task->scatter, mvs_task_prep()
769 task->num_scatter, mvs_task_prep()
770 task->data_dir); mvs_task_prep()
777 n_elem = task->num_scatter; mvs_task_prep()
786 task->lldd_task = NULL; mvs_task_prep()
795 tei.task = task; mvs_task_prep()
799 switch (task->task_proto) { mvs_task_prep()
814 task->task_proto); mvs_task_prep()
823 slot->task = task; mvs_task_prep()
825 task->lldd_task = slot; mvs_task_prep()
827 spin_lock(&task->task_state_lock); mvs_task_prep()
828 task->task_state_flags |= SAS_TASK_AT_INITIATOR; mvs_task_prep()
829 spin_unlock(&task->task_state_lock); mvs_task_prep()
844 if (!sas_protocol_ata(task->task_proto)) mvs_task_prep()
846 dma_unmap_sg(mvi->dev, task->scatter, n_elem, mvs_task_prep()
847 task->data_dir); mvs_task_prep()
852 static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags, mvs_task_exec() argument
861 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; mvs_task_exec()
864 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); mvs_task_exec()
876 int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) mvs_queue_command() argument
878 return mvs_task_exec(task, gfp_flags, NULL, 0, NULL); mvs_queue_command()
887 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, mvs_slot_task_free() argument
892 if (!slot->task) mvs_slot_task_free()
894 if (!sas_protocol_ata(task->task_proto)) mvs_slot_task_free()
896 dma_unmap_sg(mvi->dev, task->scatter, mvs_slot_task_free()
897 slot->n_elem, task->data_dir); mvs_slot_task_free()
899 switch (task->task_proto) { mvs_slot_task_free()
901 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, mvs_slot_task_free()
903 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, mvs_slot_task_free()
920 task->lldd_task = NULL; mvs_slot_task_free()
921 slot->task = NULL; mvs_slot_task_free()
1274 static void mvs_task_done(struct sas_task *task) mvs_task_done() argument
1276 if (!del_timer(&task->slow_task->timer)) mvs_task_done()
1278 complete(&task->slow_task->completion); mvs_task_done()
1283 struct sas_task *task = (struct sas_task *)data; mvs_tmf_timedout() local
1285 task->task_state_flags |= SAS_TASK_STATE_ABORTED; mvs_tmf_timedout()
1286 complete(&task->slow_task->completion); mvs_tmf_timedout()
1294 struct sas_task *task = NULL; mvs_exec_internal_tmf_task() local
1297 task = sas_alloc_slow_task(GFP_KERNEL); mvs_exec_internal_tmf_task()
1298 if (!task) mvs_exec_internal_tmf_task()
1301 task->dev = dev; mvs_exec_internal_tmf_task()
1302 task->task_proto = dev->tproto; mvs_exec_internal_tmf_task()
1304 memcpy(&task->ssp_task, parameter, para_len); mvs_exec_internal_tmf_task()
1305 task->task_done = mvs_task_done; mvs_exec_internal_tmf_task()
1307 task->slow_task->timer.data = (unsigned long) task; mvs_exec_internal_tmf_task()
1308 task->slow_task->timer.function = mvs_tmf_timedout; mvs_exec_internal_tmf_task()
1309 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; mvs_exec_internal_tmf_task()
1310 add_timer(&task->slow_task->timer); mvs_exec_internal_tmf_task()
1312 res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf); mvs_exec_internal_tmf_task()
1315 del_timer(&task->slow_task->timer); mvs_exec_internal_tmf_task()
1316 mv_printk("executing internal task failed:%d\n", res); mvs_exec_internal_tmf_task()
1320 wait_for_completion(&task->slow_task->completion); mvs_exec_internal_tmf_task()
1323 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { mvs_exec_internal_tmf_task()
1324 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { mvs_exec_internal_tmf_task()
1325 mv_printk("TMF task[%x] timeout.\n", tmf->tmf); mvs_exec_internal_tmf_task()
1330 if (task->task_status.resp == SAS_TASK_COMPLETE && mvs_exec_internal_tmf_task()
1331 task->task_status.stat == SAM_STAT_GOOD) { mvs_exec_internal_tmf_task()
1336 if (task->task_status.resp == SAS_TASK_COMPLETE && mvs_exec_internal_tmf_task()
1337 task->task_status.stat == SAS_DATA_UNDERRUN) { mvs_exec_internal_tmf_task()
1340 res = task->task_status.residual; mvs_exec_internal_tmf_task()
1344 if (task->task_status.resp == SAS_TASK_COMPLETE && mvs_exec_internal_tmf_task()
1345 task->task_status.stat == SAS_DATA_OVERRUN) { mvs_exec_internal_tmf_task()
1346 mv_dprintk("blocked task error.\n"); mvs_exec_internal_tmf_task()
1350 mv_dprintk(" task to dev %016llx response: 0x%x " mvs_exec_internal_tmf_task()
1353 task->task_status.resp, mvs_exec_internal_tmf_task()
1354 task->task_status.stat); mvs_exec_internal_tmf_task()
1355 sas_free_task(task); mvs_exec_internal_tmf_task()
1356 task = NULL; mvs_exec_internal_tmf_task()
1361 BUG_ON(retry == 3 && task != NULL); mvs_exec_internal_tmf_task()
1362 sas_free_task(task); mvs_exec_internal_tmf_task()
1439 int mvs_query_task(struct sas_task *task) mvs_query_task() argument
1446 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { mvs_query_task()
1447 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; mvs_query_task()
1448 struct domain_device *dev = task->dev; mvs_query_task()
1453 rc = mvs_find_tag(mvi, task, &tag); mvs_query_task()
1464 /* The task is still in Lun, release it then */ mvs_query_task()
1466 /* The task is not in Lun or failed, reset the phy */ mvs_query_task()
1476 /* mandatory SAM-3, still need free task/slot info */ mvs_abort_task()
1477 int mvs_abort_task(struct sas_task *task) mvs_abort_task() argument
1481 struct domain_device *dev = task->dev; mvs_abort_task()
1495 spin_lock_irqsave(&task->task_state_lock, flags); mvs_abort_task()
1496 if (task->task_state_flags & SAS_TASK_STATE_DONE) { mvs_abort_task()
1497 spin_unlock_irqrestore(&task->task_state_lock, flags); mvs_abort_task()
1501 spin_unlock_irqrestore(&task->task_state_lock, flags); mvs_abort_task()
1503 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { mvs_abort_task()
1504 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; mvs_abort_task()
1507 rc = mvs_find_tag(mvi, task, &tag); mvs_abort_task()
1519 /* if successful, clear the task and callback forwards.*/ mvs_abort_task()
1524 if (task->lldd_task) { mvs_abort_task()
1525 slot = task->lldd_task; mvs_abort_task()
1533 } else if (task->task_proto & SAS_PROTOCOL_SATA || mvs_abort_task()
1534 task->task_proto & SAS_PROTOCOL_STP) { mvs_abort_task()
1536 struct mvs_slot_info *slot = task->lldd_task; mvs_abort_task()
1538 mv_dprintk("mvs_abort_task() mvi=%p task=%p " mvs_abort_task()
1540 mvi, task, slot, slot_idx); mvs_abort_task()
1541 task->task_state_flags |= SAS_TASK_STATE_ABORTED; mvs_abort_task()
1542 mvs_slot_task_free(mvi, task, slot, slot_idx); mvs_abort_task()
1587 static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, mvs_sata_done() argument
1590 struct mvs_device *mvi_dev = task->dev->lldd_dev; mvs_sata_done()
1591 struct task_status_struct *tstat = &task->task_status; mvs_sata_done()
1661 static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, mvs_slot_err() argument
1677 switch (task->task_proto) { mvs_slot_err()
1685 sas_ssp_task_response(mvi->dev, task, iu); mvs_slot_err()
1700 task->ata_task.use_ncq = 0; mvs_slot_err()
1702 mvs_sata_done(mvi, task, slot_idx, err_dw0); mvs_slot_err()
1716 struct sas_task *task = slot->task; mvs_slot_complete() local
1725 if (unlikely(!task || !task->lldd_task || !task->dev)) mvs_slot_complete()
1728 tstat = &task->task_status; mvs_slot_complete()
1729 dev = task->dev; mvs_slot_complete()
1732 spin_lock(&task->task_state_lock); mvs_slot_complete()
1733 task->task_state_flags &= mvs_slot_complete()
1735 task->task_state_flags |= SAS_TASK_STATE_DONE; mvs_slot_complete()
1737 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; mvs_slot_complete()
1738 spin_unlock(&task->task_state_lock); mvs_slot_complete()
1747 if (sas_protocol_ata(task->task_proto)) mvs_slot_complete()
1750 mvs_slot_task_free(mvi, task, slot, slot_idx); mvs_slot_complete()
1772 tstat->stat = mvs_slot_err(mvi, task, slot_idx); mvs_slot_complete()
1777 switch (task->task_proto) { mvs_slot_complete()
1788 sas_ssp_task_response(mvi->dev, task, iu); mvs_slot_complete()
1794 struct scatterlist *sg_resp = &task->smp_task.smp_resp; mvs_slot_complete()
1807 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); mvs_slot_complete()
1824 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) mvs_slot_complete()
1827 mvs_slot_task_free(mvi, task, slot, slot_idx); mvs_slot_complete()
1831 if (task->task_done) mvs_slot_complete()
1832 task->task_done(task); mvs_slot_complete()
1857 struct sas_task *task; mvs_do_release_task() local
1859 task = slot->task; mvs_do_release_task()
1861 if (dev && task->dev != dev) mvs_do_release_task()
1864 mv_printk("Release slot [%x] tag[%x], task [%p]:\n", mvs_do_release_task()
1865 slot_idx, slot->slot_tag, task); mvs_do_release_task()
/linux-4.1.27/drivers/scsi/libsas/
H A Dsas_task.c7 void sas_ssp_task_response(struct device *dev, struct sas_task *task, sas_ssp_task_response() argument
10 struct task_status_struct *tstat = &task->task_status; sas_ssp_task_response()
29 SAS_ADDR(task->dev->sas_addr), sas_ssp_task_response()
H A Dsas_scsi_host.c52 /* record final status and free the task */ sas_end_task()
53 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) sas_end_task() argument
55 struct task_status_struct *ts = &task->task_status; sas_end_task()
62 /* task delivered, what happened afterwards? */ sas_end_task()
96 "task; please report this\n", sas_end_task()
97 task->dev->port->ha->sas_ha_name); sas_end_task()
115 sas_free_task(task); sas_end_task()
118 static void sas_scsi_task_done(struct sas_task *task) sas_scsi_task_done() argument
120 struct scsi_cmnd *sc = task->uldd_task; sas_scsi_task_done()
121 struct domain_device *dev = task->dev; sas_scsi_task_done()
127 task = NULL; sas_scsi_task_done()
132 if (unlikely(!task)) { sas_scsi_task_done()
133 /* task will be completed by the error handler */ sas_scsi_task_done()
134 SAS_DPRINTK("task done but aborted\n"); sas_scsi_task_done()
140 sas_free_task(task); sas_scsi_task_done()
144 sas_end_task(sc, task); sas_scsi_task_done()
152 struct sas_task *task = sas_alloc_task(gfp_flags); sas_create_task() local
155 if (!task) sas_create_task()
158 task->uldd_task = cmd; sas_create_task()
159 ASSIGN_SAS_TASK(cmd, task); sas_create_task()
161 task->dev = dev; sas_create_task()
162 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ sas_create_task()
164 task->ssp_task.retry_count = 1; sas_create_task()
166 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); sas_create_task()
167 task->ssp_task.task_attr = TASK_ATTR_SIMPLE; sas_create_task()
168 task->ssp_task.cmd = cmd; sas_create_task()
170 task->scatter = scsi_sglist(cmd); sas_create_task()
171 task->num_scatter = scsi_sg_count(cmd); sas_create_task()
172 task->total_xfer_len = scsi_bufflen(cmd); sas_create_task()
173 task->data_dir = cmd->sc_data_direction; sas_create_task()
175 task->task_done = sas_scsi_task_done; sas_create_task()
177 return task; sas_create_task()
184 struct sas_task *task; sas_queuecommand() local
200 task = sas_create_task(cmd, dev, GFP_ATOMIC); sas_queuecommand()
201 if (!task) sas_queuecommand()
204 res = i->dft->lldd_execute_task(task, GFP_ATOMIC); sas_queuecommand()
212 sas_free_task(task); sas_queuecommand()
225 struct sas_task *task = TO_SAS_TASK(cmd); sas_eh_finish_cmd() local
228 * of the task, so we should be guaranteed not to be racing with sas_eh_finish_cmd()
231 sas_end_task(cmd, task); sas_eh_finish_cmd()
244 struct sas_task *task = TO_SAS_TASK(cmd); sas_eh_defer_cmd() local
252 sas_end_task(cmd, task); sas_eh_defer_cmd()
302 static enum task_disposition sas_scsi_find_task(struct sas_task *task) sas_scsi_find_task() argument
307 to_sas_internal(task->dev->port->ha->core.shost->transportt); sas_scsi_find_task()
310 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task); sas_scsi_find_task()
311 res = si->dft->lldd_abort_task(task); sas_scsi_find_task()
313 spin_lock_irqsave(&task->task_state_lock, flags); sas_scsi_find_task()
314 if (task->task_state_flags & SAS_TASK_STATE_DONE) { sas_scsi_find_task()
315 spin_unlock_irqrestore(&task->task_state_lock, flags); sas_scsi_find_task()
316 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, sas_scsi_find_task()
317 task); sas_scsi_find_task()
320 spin_unlock_irqrestore(&task->task_state_lock, flags); sas_scsi_find_task()
323 SAS_DPRINTK("%s: task 0x%p is aborted\n", sas_scsi_find_task()
324 __func__, task); sas_scsi_find_task()
327 SAS_DPRINTK("%s: querying task 0x%p\n", sas_scsi_find_task()
328 __func__, task); sas_scsi_find_task()
329 res = si->dft->lldd_query_task(task); sas_scsi_find_task()
332 SAS_DPRINTK("%s: task 0x%p at LU\n", sas_scsi_find_task()
333 __func__, task); sas_scsi_find_task()
336 SAS_DPRINTK("%s: task 0x%p not at LU\n", sas_scsi_find_task()
337 __func__, task); sas_scsi_find_task()
340 SAS_DPRINTK("%s: task 0x%p failed to abort\n", sas_scsi_find_task()
341 __func__, task); sas_scsi_find_task()
359 SAS_DPRINTK("eh: device %llx LUN %llx has the task\n", sas_recover_lu()
490 struct sas_task *task = TO_SAS_TASK(cmd); sas_eh_abort_handler() local
500 res = i->dft->lldd_abort_task(task); sas_eh_abort_handler()
586 struct sas_task *task; list_for_each_entry_safe() local
590 * SAS_HA_FROZEN and is leaving the task alone, or has list_for_each_entry_safe()
593 task = TO_SAS_TASK(cmd); list_for_each_entry_safe()
596 if (!task) list_for_each_entry_safe()
602 struct sas_task *task = TO_SAS_TASK(cmd); list_for_each_entry_safe() local
606 spin_lock_irqsave(&task->task_state_lock, flags); list_for_each_entry_safe()
607 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; list_for_each_entry_safe()
608 spin_unlock_irqrestore(&task->task_state_lock, flags); list_for_each_entry_safe()
611 SAS_DPRINTK("%s: task 0x%p requests reset\n", list_for_each_entry_safe()
612 __func__, task); list_for_each_entry_safe()
616 SAS_DPRINTK("trying to find task 0x%p\n", task); list_for_each_entry_safe()
617 res = sas_scsi_find_task(task); list_for_each_entry_safe()
623 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, list_for_each_entry_safe()
624 task); list_for_each_entry_safe()
628 SAS_DPRINTK("%s: task 0x%p is aborted\n", list_for_each_entry_safe()
629 __func__, task); list_for_each_entry_safe()
633 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); list_for_each_entry_safe()
635 tmf_resp = sas_recover_lu(task->dev, cmd); list_for_each_entry_safe()
639 SAS_ADDR(task->dev), list_for_each_entry_safe()
648 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", list_for_each_entry_safe()
649 task); list_for_each_entry_safe()
650 tmf_resp = sas_recover_I_T(task->dev); list_for_each_entry_safe()
653 struct domain_device *dev = task->dev; list_for_each_entry_safe()
655 SAS_ADDR(task->dev->sas_addr)); list_for_each_entry_safe()
663 struct asd_sas_port *port = task->dev->port; list_for_each_entry_safe()
692 SAS_ADDR(task->dev->sas_addr), list_for_each_entry_safe()
922 * Tell an upper layer that it needs to initiate an abort for a given task.
925 void sas_task_abort(struct sas_task *task) sas_task_abort() argument
927 struct scsi_cmnd *sc = task->uldd_task; sas_task_abort()
931 struct sas_task_slow *slow = task->slow_task; sas_task_abort()
941 if (dev_is_sata(task->dev)) { sas_task_abort()
942 sas_ata_task_abort(task); sas_task_abort()
H A Dsas_ata.c50 /* task delivered, what happened afterwards? */ sas_to_ata_err()
95 static void sas_ata_task_done(struct sas_task *task) sas_ata_task_done() argument
97 struct ata_queued_cmd *qc = task->uldd_task; sas_ata_task_done()
98 struct domain_device *dev = task->dev; sas_ata_task_done()
99 struct task_status_struct *stat = &task->task_status; sas_ata_task_done()
109 task = NULL; sas_ata_task_done()
114 /* check if libsas-eh got to the task before us */ sas_ata_task_done()
115 if (unlikely(!task)) sas_ata_task_done()
174 sas_free_task(task); sas_ata_task_done()
180 struct sas_task *task; sas_ata_qc_issue() local
200 task = sas_alloc_task(GFP_ATOMIC); sas_ata_qc_issue()
201 if (!task) sas_ata_qc_issue()
203 task->dev = dev; sas_ata_qc_issue()
204 task->task_proto = SAS_PROTOCOL_STP; sas_ata_qc_issue()
205 task->task_done = sas_ata_task_done; sas_ata_qc_issue()
213 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); sas_ata_qc_issue()
214 task->uldd_task = qc; sas_ata_qc_issue()
216 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); sas_ata_qc_issue()
217 task->total_xfer_len = qc->nbytes; sas_ata_qc_issue()
218 task->num_scatter = qc->n_elem; sas_ata_qc_issue()
223 task->total_xfer_len = xfer; sas_ata_qc_issue()
224 task->num_scatter = si; sas_ata_qc_issue()
227 task->data_dir = qc->dma_dir; sas_ata_qc_issue()
228 task->scatter = qc->sg; sas_ata_qc_issue()
229 task->ata_task.retry_count = 1; sas_ata_qc_issue()
230 task->task_state_flags = SAS_TASK_STATE_PENDING; sas_ata_qc_issue()
231 qc->lldd_task = task; sas_ata_qc_issue()
235 task->ata_task.use_ncq = 1; sas_ata_qc_issue()
239 task->ata_task.dma_xfer = 1; sas_ata_qc_issue()
244 ASSIGN_SAS_TASK(qc->scsicmd, task); sas_ata_qc_issue()
246 ret = i->dft->lldd_execute_task(task, GFP_ATOMIC); sas_ata_qc_issue()
252 sas_free_task(task); sas_ata_qc_issue()
431 static void sas_ata_internal_abort(struct sas_task *task) sas_ata_internal_abort() argument
433 struct sas_internal *si = dev_to_sas_internal(task->dev); sas_ata_internal_abort()
437 spin_lock_irqsave(&task->task_state_lock, flags); sas_ata_internal_abort()
438 if (task->task_state_flags & SAS_TASK_STATE_ABORTED || sas_ata_internal_abort()
439 task->task_state_flags & SAS_TASK_STATE_DONE) { sas_ata_internal_abort()
440 spin_unlock_irqrestore(&task->task_state_lock, flags); sas_ata_internal_abort()
442 task); sas_ata_internal_abort()
445 task->task_state_flags |= SAS_TASK_STATE_ABORTED; sas_ata_internal_abort()
446 spin_unlock_irqrestore(&task->task_state_lock, flags); sas_ata_internal_abort()
448 res = si->dft->lldd_abort_task(task); sas_ata_internal_abort()
450 spin_lock_irqsave(&task->task_state_lock, flags); sas_ata_internal_abort()
451 if (task->task_state_flags & SAS_TASK_STATE_DONE || sas_ata_internal_abort()
453 spin_unlock_irqrestore(&task->task_state_lock, flags); sas_ata_internal_abort()
459 * aborted ata tasks, otherwise we (likely) leak the sas task sas_ata_internal_abort()
462 SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task); sas_ata_internal_abort()
464 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) sas_ata_internal_abort()
465 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; sas_ata_internal_abort()
466 spin_unlock_irqrestore(&task->task_state_lock, flags); sas_ata_internal_abort()
470 sas_free_task(task); sas_ata_internal_abort()
484 * ->lldd_abort_task) that the task is dead and free it sas_ata_post_internal()
487 struct sas_task *task = qc->lldd_task; sas_ata_post_internal() local
490 if (!task) sas_ata_post_internal()
492 task->uldd_task = NULL; sas_ata_post_internal()
493 sas_ata_internal_abort(task); sas_ata_post_internal()
587 void sas_ata_task_abort(struct sas_task *task) sas_ata_task_abort() argument
589 struct ata_queued_cmd *qc = task->uldd_task; sas_ata_task_abort()
/linux-4.1.27/arch/mn10300/include/asm/
H A Dcurrent.h1 /* MN10300 Current task structure accessor
17 * dedicate E2 to keeping the current task pointer
31 return current_thread_info()->task; get_current()
H A Dsyscall.h22 static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) syscall_get_nr() argument
27 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
33 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
40 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
46 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
53 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
85 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
H A Dprocessor.h104 #define THREAD_USING_FPU 0x00000001 /* T if this task is using the FPU */
105 #define THREAD_HAS_FPU 0x00000002 /* T if this task owns the FPU right now */
144 #define task_pt_regs(task) ((task)->thread.uregs)
145 #define KSTK_EIP(task) (task_pt_regs(task)->pc)
146 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
/linux-4.1.27/arch/nios2/include/asm/
H A Dswitch_to.h12 * switch_to(n) should switch tasks to task ptr, first checking that
13 * ptr isn't the current task, in which case it does nothing. This
14 * also clears the TS-flag if the task we switched to has used the
H A Dsyscall.h23 static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) syscall_get_nr() argument
28 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
35 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
41 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
47 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
60 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
99 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
/linux-4.1.27/arch/powerpc/include/asm/
H A Dcurrent.h20 struct task_struct *task; get_current() local
23 : "=r" (task) get_current()
26 return task; get_current()
H A Dsyscall.h25 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
31 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
37 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
43 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
49 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
62 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
69 if (test_tsk_thread_flag(task, TIF_32BIT)) { syscall_get_arguments()
82 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
/linux-4.1.27/fs/nfs/flexfilelayout/
H A Dflexfilelayout.c611 struct rpc_task *task = &hdr->task; ff_layout_reset_write() local
616 dprintk("%s Reset task %5u for i/o through pNFS " ff_layout_reset_write()
618 hdr->task.tk_pid, ff_layout_reset_write()
640 dprintk("%s Reset task %5u for i/o through MDS " ff_layout_reset_write()
642 hdr->task.tk_pid, ff_layout_reset_write()
648 task->tk_status = pnfs_write_done_resend_to_mds(hdr); ff_layout_reset_write()
654 struct rpc_task *task = &hdr->task; ff_layout_reset_read() local
659 dprintk("%s Reset task %5u for i/o through MDS " ff_layout_reset_read()
661 hdr->task.tk_pid, ff_layout_reset_read()
667 task->tk_status = pnfs_read_done_resend_to_mds(hdr); ff_layout_reset_read()
671 static int ff_layout_async_handle_error_v4(struct rpc_task *task, ff_layout_async_handle_error_v4() argument
685 if (task->tk_status >= 0) ff_layout_async_handle_error_v4()
688 switch (task->tk_status) { ff_layout_async_handle_error_v4()
718 "flags 0x%x\n", __func__, task->tk_status, ff_layout_async_handle_error_v4()
720 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); ff_layout_async_handle_error_v4()
724 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); ff_layout_async_handle_error_v4()
736 task->tk_status); ff_layout_async_handle_error_v4()
756 task->tk_status); ff_layout_async_handle_error_v4()
765 task->tk_status); ff_layout_async_handle_error_v4()
769 task->tk_status = 0; ff_layout_async_handle_error_v4()
772 task->tk_status = -EIO; ff_layout_async_handle_error_v4()
775 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL); ff_layout_async_handle_error_v4()
777 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task); ff_layout_async_handle_error_v4()
782 static int ff_layout_async_handle_error_v3(struct rpc_task *task, ff_layout_async_handle_error_v3() argument
788 if (task->tk_status >= 0) ff_layout_async_handle_error_v3()
791 if (task->tk_status != -EJUKEBOX) { ff_layout_async_handle_error_v3()
793 task->tk_status); ff_layout_async_handle_error_v3()
801 if (task->tk_status == -EJUKEBOX) ff_layout_async_handle_error_v3()
803 task->tk_status = 0; ff_layout_async_handle_error_v3()
804 rpc_restart_call(task); ff_layout_async_handle_error_v3()
805 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); ff_layout_async_handle_error_v3()
809 static int ff_layout_async_handle_error(struct rpc_task *task, ff_layout_async_handle_error() argument
819 return ff_layout_async_handle_error_v3(task, lseg, idx); ff_layout_async_handle_error()
821 return ff_layout_async_handle_error_v4(task, state, clp, ff_layout_async_handle_error()
846 static int ff_layout_read_done_cb(struct rpc_task *task, ff_layout_read_done_cb() argument
852 trace_nfs4_pnfs_read(hdr, task->tk_status); ff_layout_read_done_cb()
853 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status) ff_layout_read_done_cb()
855 if (task->tk_status < 0 && hdr->res.op_status) ff_layout_read_done_cb()
859 err = ff_layout_async_handle_error(task, hdr->args.context->state, ff_layout_read_done_cb()
868 return task->tk_status; ff_layout_read_done_cb()
873 return task->tk_status; ff_layout_read_done_cb()
875 rpc_restart_call_prepare(task); ff_layout_read_done_cb()
909 static int ff_layout_read_prepare_common(struct rpc_task *task, ff_layout_read_prepare_common() argument
913 rpc_exit(task, -EIO); ff_layout_read_prepare_common()
917 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); ff_layout_read_prepare_common()
922 rpc_exit(task, 0); ff_layout_read_prepare_common()
935 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) ff_layout_read_prepare_v3() argument
939 if (ff_layout_read_prepare_common(task, hdr)) ff_layout_read_prepare_v3()
942 rpc_call_start(task); ff_layout_read_prepare_v3()
948 struct rpc_task *task) ff_layout_setup_sequence()
954 task); ff_layout_setup_sequence()
958 task); ff_layout_setup_sequence()
961 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) ff_layout_read_prepare_v4() argument
965 if (ff_layout_read_prepare_common(task, hdr)) ff_layout_read_prepare_v4()
971 task)) ff_layout_read_prepare_v4()
976 rpc_exit(task, -EIO); /* lost lock, terminate I/O */ ff_layout_read_prepare_v4()
979 static void ff_layout_read_call_done(struct rpc_task *task, void *data) ff_layout_read_call_done() argument
983 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); ff_layout_read_call_done()
986 task->tk_status == 0) { ff_layout_read_call_done()
987 nfs4_sequence_done(task, &hdr->res.seq_res); ff_layout_read_call_done()
992 hdr->mds_ops->rpc_call_done(task, hdr); ff_layout_read_call_done()
995 static void ff_layout_read_count_stats(struct rpc_task *task, void *data) ff_layout_read_count_stats() argument
999 rpc_count_iostats_metrics(task, ff_layout_read_count_stats()
1003 static int ff_layout_write_done_cb(struct rpc_task *task, ff_layout_write_done_cb() argument
1009 trace_nfs4_pnfs_write(hdr, task->tk_status); ff_layout_write_done_cb()
1010 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status) ff_layout_write_done_cb()
1012 if (task->tk_status < 0 && hdr->res.op_status) ff_layout_write_done_cb()
1016 err = ff_layout_async_handle_error(task, hdr->args.context->state, ff_layout_write_done_cb()
1032 return task->tk_status; ff_layout_write_done_cb()
1034 rpc_restart_call_prepare(task); ff_layout_write_done_cb()
1044 if (task->tk_status >= 0) ff_layout_write_done_cb()
1050 static int ff_layout_commit_done_cb(struct rpc_task *task, ff_layout_commit_done_cb() argument
1056 trace_nfs4_pnfs_commit_ds(data, task->tk_status); ff_layout_commit_done_cb()
1057 if (task->tk_status == -ETIMEDOUT && !data->res.op_status) ff_layout_commit_done_cb()
1059 if (task->tk_status < 0 && data->res.op_status) ff_layout_commit_done_cb()
1063 err = ff_layout_async_handle_error(task, NULL, data->ds_clp, ff_layout_commit_done_cb()
1078 rpc_restart_call_prepare(task); ff_layout_commit_done_cb()
1088 static int ff_layout_write_prepare_common(struct rpc_task *task, ff_layout_write_prepare_common() argument
1092 rpc_exit(task, -EIO); ff_layout_write_prepare_common()
1100 dprintk("%s task %u reset io to %s\n", __func__, ff_layout_write_prepare_common()
1101 task->tk_pid, retry_pnfs ? "pNFS" : "MDS"); ff_layout_write_prepare_common()
1103 rpc_exit(task, 0); ff_layout_write_prepare_common()
1110 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data) ff_layout_write_prepare_v3() argument
1114 if (ff_layout_write_prepare_common(task, hdr)) ff_layout_write_prepare_v3()
1117 rpc_call_start(task); ff_layout_write_prepare_v3()
1120 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) ff_layout_write_prepare_v4() argument
1124 if (ff_layout_write_prepare_common(task, hdr)) ff_layout_write_prepare_v4()
1130 task)) ff_layout_write_prepare_v4()
1135 rpc_exit(task, -EIO); /* lost lock, terminate I/O */ ff_layout_write_prepare_v4()
1138 static void ff_layout_write_call_done(struct rpc_task *task, void *data) ff_layout_write_call_done() argument
1143 task->tk_status == 0) { ff_layout_write_call_done()
1144 nfs4_sequence_done(task, &hdr->res.seq_res); ff_layout_write_call_done()
1149 hdr->mds_ops->rpc_call_done(task, hdr); ff_layout_write_call_done()
1152 static void ff_layout_write_count_stats(struct rpc_task *task, void *data) ff_layout_write_count_stats() argument
1156 rpc_count_iostats_metrics(task, ff_layout_write_count_stats()
1160 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) ff_layout_commit_prepare_v3() argument
1162 rpc_call_start(task); ff_layout_commit_prepare_v3()
1165 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) ff_layout_commit_prepare_v4() argument
1172 task); ff_layout_commit_prepare_v4()
1175 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data) ff_layout_commit_count_stats() argument
1179 rpc_count_iostats_metrics(task, ff_layout_commit_count_stats()
945 ff_layout_setup_sequence(struct nfs_client *ds_clp, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) ff_layout_setup_sequence() argument
/linux-4.1.27/include/linux/sched/
H A Drt.h21 extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
22 extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
34 static inline int rt_mutex_get_effective_prio(struct task_struct *task, rt_mutex_get_effective_prio() argument
40 static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) rt_mutex_get_top_task() argument
/linux-4.1.27/arch/m32r/kernel/
H A Dasm-offsets.c6 OFFSET(TI_TASK, thread_info, task); foo()
/linux-4.1.27/arch/x86/kernel/
H A Ddumpstack_32.c41 void dump_trace(struct task_struct *task, struct pt_regs *regs, dump_trace() argument
49 if (!task) dump_trace()
50 task = current; dump_trace()
56 if (task != current) dump_trace()
57 stack = (unsigned long *)task->thread.sp; dump_trace()
61 bp = stack_frame(task, regs); dump_trace()
71 context = task_thread_info(task); dump_trace()
94 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, show_stack_log_lvl() argument
101 if (task) show_stack_log_lvl()
102 sp = (unsigned long *)task->thread.sp; show_stack_log_lvl()
120 show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_stack_log_lvl()
H A Ddumpstack.c47 struct task_struct *task; print_ftrace_graph_addr() local
54 task = tinfo->task; print_ftrace_graph_addr()
55 index = task->curr_ret_stack; print_ftrace_graph_addr()
57 if (!task->ret_stack || index < *graph) print_ftrace_graph_addr()
61 ret_addr = task->ret_stack[index].ret; print_ftrace_graph_addr()
170 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, show_trace_log_lvl() argument
174 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); show_trace_log_lvl()
177 void show_trace(struct task_struct *task, struct pt_regs *regs, show_trace() argument
180 show_trace_log_lvl(task, regs, stack, bp, ""); show_trace()
183 void show_stack(struct task_struct *task, unsigned long *sp) show_stack() argument
192 if (!sp && (!task || task == current)) { show_stack()
197 show_stack_log_lvl(task, NULL, sp, bp, ""); show_stack()
H A Ddumpstack_64.c117 analyze_stack(int cpu, struct task_struct *task, unsigned long *stack, analyze_stack() argument
124 if ((unsigned long)task_stack_page(task) == addr) analyze_stack()
151 void dump_trace(struct task_struct *task, struct pt_regs *regs, dump_trace() argument
163 if (!task) dump_trace()
164 task = current; dump_trace()
169 else if (task != current) dump_trace()
170 stack = (unsigned long *)task->thread.sp; dump_trace()
176 bp = stack_frame(task, regs); dump_trace()
182 tinfo = task_thread_info(task); dump_trace()
188 stype = analyze_stack(cpu, task, stack, &stack_end, dump_trace()
249 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, show_stack_log_lvl() argument
269 if (task) show_stack_log_lvl()
270 sp = (unsigned long *)task->thread.sp; show_stack_log_lvl()
297 show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_stack_log_lvl()
H A Dptrace.c206 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) get_segment_reg() argument
213 retval = *pt_regs_access(task_pt_regs(task), offset); get_segment_reg()
215 if (task == current) get_segment_reg()
216 retval = get_user_gs(task_pt_regs(task)); get_segment_reg()
218 retval = task_user_gs(task); get_segment_reg()
223 static int set_segment_reg(struct task_struct *task, set_segment_reg() argument
248 *pt_regs_access(task_pt_regs(task), offset) = value; set_segment_reg()
252 if (task == current) set_segment_reg()
253 set_user_gs(task_pt_regs(task), value); set_segment_reg()
255 task_user_gs(task) = value; set_segment_reg()
271 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) get_segment_reg() argument
280 if (task == current) { offsetof()
285 return task->thread.fsindex; offsetof()
287 if (task == current) { offsetof()
291 return task->thread.gsindex; offsetof()
293 if (task == current) { offsetof()
297 return task->thread.ds; offsetof()
299 if (task == current) { offsetof()
303 return task->thread.es;
309 return *pt_regs_access(task_pt_regs(task), offset);
312 static int set_segment_reg(struct task_struct *task, set_segment_reg() argument
327 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && set_segment_reg()
328 task->thread.fs != 0) || set_segment_reg()
329 (value == 0 && task->thread.fsindex == FS_TLS_SEL && set_segment_reg()
330 task->thread.fs == 0)) set_segment_reg()
332 task->thread.fsindex = value; set_segment_reg()
333 if (task == current) set_segment_reg()
334 loadsegment(fs, task->thread.fsindex); set_segment_reg()
341 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && set_segment_reg()
342 task->thread.gs != 0) || set_segment_reg()
343 (value == 0 && task->thread.gsindex == GS_TLS_SEL && set_segment_reg()
344 task->thread.gs == 0)) set_segment_reg()
346 task->thread.gsindex = value; set_segment_reg()
347 if (task == current) set_segment_reg()
348 load_gs_index(task->thread.gsindex); set_segment_reg()
351 task->thread.ds = value; set_segment_reg()
352 if (task == current) set_segment_reg()
353 loadsegment(ds, task->thread.ds); set_segment_reg()
356 task->thread.es = value; set_segment_reg()
357 if (task == current) set_segment_reg()
358 loadsegment(es, task->thread.es); set_segment_reg()
367 task_pt_regs(task)->cs = value; set_segment_reg()
372 task_pt_regs(task)->ss = value; set_segment_reg()
381 static unsigned long get_flags(struct task_struct *task) get_flags() argument
383 unsigned long retval = task_pt_regs(task)->flags; get_flags()
388 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) get_flags()
394 static int set_flags(struct task_struct *task, unsigned long value) set_flags() argument
396 struct pt_regs *regs = task_pt_regs(task); set_flags()
404 clear_tsk_thread_flag(task, TIF_FORCED_TF); set_flags()
405 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) set_flags()
456 static unsigned long getreg(struct task_struct *task, unsigned long offset) getreg() argument
465 return get_segment_reg(task, offset); getreg()
468 return get_flags(task); getreg()
477 unsigned int seg = task->thread.fsindex; offsetof()
478 if (task->thread.fs != 0) offsetof()
479 return task->thread.fs; offsetof()
480 if (task == current) offsetof()
484 return get_desc_base(&task->thread.tls_array[FS_TLS]); offsetof()
490 unsigned int seg = task->thread.gsindex; offsetof()
491 if (task->thread.gs != 0) offsetof()
492 return task->thread.gs; offsetof()
493 if (task == current) offsetof()
497 return get_desc_base(&task->thread.tls_array[GS_TLS]); offsetof()
502 return *pt_regs_access(task_pt_regs(task), offset);
728 * a valid task virtual addr. The new one will return -EINVAL in ptrace_set_breakpoint_addr()
776 * These access the current or another (stopped) task's io permission
976 * the state of the task restarting a 32-bit syscall. putreg32()
978 * in case the task is not actually still sitting at the putreg32()
1394 const struct user_regset_view *task_user_regset_view(struct task_struct *task) task_user_regset_view() argument
1397 if (test_tsk_thread_flag(task, TIF_IA32)) task_user_regset_view()
/linux-4.1.27/arch/s390/include/asm/
H A Dsyscall.h28 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
35 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
41 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
47 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
53 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
60 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
69 if (test_tsk_thread_flag(task, TIF_31BIT)) syscall_get_arguments()
79 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
H A Dthread_info.h25 * low level task data that entry.S needs immediate access to
31 struct task_struct *task; /* main task structure */ member in struct:thread_info
47 .task = &tsk, \
82 #define TIF_SINGLE_STEP 19 /* This task is single stepped */
83 #define TIF_BLOCK_STEP 20 /* This task is block stepped */
84 #define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
H A Dswitch_to.h14 extern void update_cr_regs(struct task_struct *task);
128 static inline void save_fp_vx_regs(struct task_struct *task) save_fp_vx_regs() argument
130 if (task->thread.vxrs) save_fp_vx_regs()
131 save_vx_regs(task->thread.vxrs); save_fp_vx_regs()
133 save_fp_regs(task->thread.fp_regs.fprs); save_fp_vx_regs()
136 static inline void restore_fp_vx_regs(struct task_struct *task) restore_fp_vx_regs() argument
138 if (task->thread.vxrs) restore_fp_vx_regs()
139 restore_vx_regs(task->thread.vxrs); restore_fp_vx_regs()
141 restore_fp_regs(task->thread.fp_regs.fprs); restore_fp_vx_regs()
/linux-4.1.27/arch/arc/include/asm/
H A Dsyscall.h18 syscall_get_nr(struct task_struct *task, struct pt_regs *regs) syscall_get_nr() argument
27 syscall_rollback(struct task_struct *task, struct pt_regs *regs) syscall_rollback() argument
33 syscall_get_error(struct task_struct *task, struct pt_regs *regs) syscall_get_error() argument
40 syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) syscall_get_return_value() argument
46 syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, syscall_set_return_value() argument
57 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, syscall_get_arguments() argument
/linux-4.1.27/arch/arc/kernel/
H A Dctx_sw_asm.S27 /* Save regs on kernel mode stack of task */
32 /* Save the now KSP in task->thread.ksp */
41 * Return last task in r0 (return reg)
43 * Since we already have last task in r0,
51 * switch to new task, contained in r1
56 /* reload SP with kernel mode stack pointer in task->thread.ksp */
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
H A Dobd.h64 struct task_struct *task; member in struct:__anon10001
75 LASSERT(lock->task == NULL); __client_obd_list_lock()
76 lock->task = current; __client_obd_list_lock()
85 struct task_struct *task = lock->task; __client_obd_list_lock() local
87 if (task == NULL) __client_obd_list_lock()
92 lock, task->comm, task->pid, __client_obd_list_lock()
110 LASSERT(lock->task != NULL); client_obd_list_unlock()
111 lock->task = NULL; client_obd_list_unlock()
/linux-4.1.27/drivers/connector/
H A Dcn_proc.c67 void proc_fork_connector(struct task_struct *task) proc_fork_connector() argument
84 parent = rcu_dereference(task->real_parent); proc_fork_connector()
88 ev->event_data.fork.child_pid = task->pid; proc_fork_connector()
89 ev->event_data.fork.child_tgid = task->tgid; proc_fork_connector()
99 void proc_exec_connector(struct task_struct *task) proc_exec_connector() argument
114 ev->event_data.exec.process_pid = task->pid; proc_exec_connector()
115 ev->event_data.exec.process_tgid = task->tgid; proc_exec_connector()
124 void proc_id_connector(struct task_struct *task, int which_id) proc_id_connector() argument
138 ev->event_data.id.process_pid = task->pid; proc_id_connector()
139 ev->event_data.id.process_tgid = task->tgid; proc_id_connector()
141 cred = __task_cred(task); proc_id_connector()
163 void proc_sid_connector(struct task_struct *task) proc_sid_connector() argument
178 ev->event_data.sid.process_pid = task->pid; proc_sid_connector()
179 ev->event_data.sid.process_tgid = task->tgid; proc_sid_connector()
188 void proc_ptrace_connector(struct task_struct *task, int ptrace_id) proc_ptrace_connector() argument
203 ev->event_data.ptrace.process_pid = task->pid; proc_ptrace_connector()
204 ev->event_data.ptrace.process_tgid = task->tgid; proc_ptrace_connector()
221 void proc_comm_connector(struct task_struct *task) proc_comm_connector() argument
236 ev->event_data.comm.process_pid = task->pid; proc_comm_connector()
237 ev->event_data.comm.process_tgid = task->tgid; proc_comm_connector()
238 get_task_comm(ev->event_data.comm.comm, task); proc_comm_connector()
247 void proc_coredump_connector(struct task_struct *task) proc_coredump_connector() argument
262 ev->event_data.coredump.process_pid = task->pid; proc_coredump_connector()
263 ev->event_data.coredump.process_tgid = task->tgid; proc_coredump_connector()
272 void proc_exit_connector(struct task_struct *task) proc_exit_connector() argument
287 ev->event_data.exit.process_pid = task->pid; proc_exit_connector()
288 ev->event_data.exit.process_tgid = task->tgid; proc_exit_connector()
289 ev->event_data.exit.exit_code = task->exit_code; proc_exit_connector()
290 ev->event_data.exit.exit_signal = task->exit_signal; proc_exit_connector()
/linux-4.1.27/arch/metag/include/asm/
H A Dsyscall.h22 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
41 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
47 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
54 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
60 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
67 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
83 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
/linux-4.1.27/arch/arm64/include/asm/
H A Dsyscall.h25 static inline int syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
31 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
38 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
45 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
51 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
60 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
86 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
/linux-4.1.27/arch/s390/kernel/
H A Ddumpstack.c67 static void show_trace(struct task_struct *task, unsigned long *stack) show_trace() argument
76 sp = task ? task->thread.ksp : __r15; show_trace()
86 if (task) show_trace()
87 __show_trace(sp, (unsigned long) task_stack_page(task), show_trace()
88 (unsigned long) task_stack_page(task) + THREAD_SIZE); show_trace()
92 if (!task) show_trace()
93 task = current; show_trace()
94 debug_show_held_locks(task); show_trace()
97 void show_stack(struct task_struct *task, unsigned long *sp) show_stack() argument
104 stack = task ? (unsigned long *) task->thread.ksp : __r15; show_stack()
116 show_trace(task, sp); show_stack()
H A Druntime_instr.c54 struct task_struct *task = current; exit_thread_runtime_instr() local
56 if (!task->thread.ri_cb) exit_thread_runtime_instr()
59 kfree(task->thread.ri_cb); exit_thread_runtime_instr()
60 task->thread.ri_signum = 0; exit_thread_runtime_instr()
61 task->thread.ri_cb = NULL; exit_thread_runtime_instr()
/linux-4.1.27/drivers/isdn/hardware/eicon/
H A Dsdp_hdr.h53 This function is called in order to set GP register of this task
55 task is called
59 This function is called to clear .bss at task initialization step
64 task, that will be used by calls from the task to the master
69 to the task
74 This function is called to set task PID
78 This function is called for run-time task init
86 This callback is used by task to get current time im mS
/linux-4.1.27/include/uapi/linux/
H A Dbsg.h24 __u32 subprotocol; /* [i] 0 -> SCSI command, 1 -> SCSI task
29 __u64 request_tag; /* [i] {SCSI: task tag (only if flagged)} */
30 __u32 request_attr; /* [i] {SCSI: task attribute} */
31 __u32 request_priority; /* [i] {SCSI: task priority} */
59 __u64 generated_tag; /* [o] {SCSI: transport generated task tag} */
/linux-4.1.27/arch/alpha/kernel/
H A Dptrace.c49 * +================================+ <---- task + 2*PAGE_SIZE
104 * Get address of register REGNO in task TASK.
107 get_reg_addr(struct task_struct * task, unsigned long regno) get_reg_addr() argument
112 addr = &task_thread_info(task)->pcb.usp; get_reg_addr()
114 addr = &task_thread_info(task)->pcb.unique; get_reg_addr()
119 addr = task_stack_page(task) + regoff[regno]; get_reg_addr()
125 * Get contents of register REGNO in task TASK.
128 get_reg(struct task_struct * task, unsigned long regno) get_reg() argument
132 unsigned long fpcr = *get_reg_addr(task, regno); get_reg()
134 = task_thread_info(task)->ieee_state & IEEE_SW_MASK; get_reg()
138 return *get_reg_addr(task, regno); get_reg()
142 * Write contents of register REGNO in task TASK.
145 put_reg(struct task_struct *task, unsigned long regno, unsigned long data) put_reg() argument
148 task_thread_info(task)->ieee_state put_reg()
149 = ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK) put_reg()
153 *get_reg_addr(task, regno) = data; put_reg()
158 read_int(struct task_struct *task, unsigned long addr, int * data) read_int() argument
160 int copied = access_process_vm(task, addr, data, sizeof(int), 0); read_int()
165 write_int(struct task_struct *task, unsigned long addr, int data) write_int() argument
167 int copied = access_process_vm(task, addr, &data, sizeof(int), 1); write_int()
/linux-4.1.27/lib/
H A Dis_single_threaded.c16 * Returns true if the task does not share ->mm with another thread/process.
20 struct task_struct *task = current; current_is_single_threaded() local
21 struct mm_struct *mm = task->mm; current_is_single_threaded()
25 if (atomic_read(&task->signal->live) != 1) current_is_single_threaded()
36 if (unlikely(p == task->group_leader)) for_each_process()
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Dtask_event_vs_ebb_test.c18 * Tests a per-task event vs an EBB - in that order. The EBB should push the
19 * per-task event off the PMU.
52 /* We setup the task event first */ task_event_vs_ebb()
68 /* The EBB event should push the task event off so the child should succeed */ task_event_vs_ebb()
75 /* The task event may have run, or not so we can't assert anything about it */ task_event_vs_ebb()
/linux-4.1.27/drivers/media/pci/cx18/
H A Dcx23418.h32 /* Description: This command creates a new instance of a certain task
34 the processor on which the task YYY will be created
36 dispatch to the right instance of the task
40 /* Description: This command destroys an instance of a task
41 IN[0] - Task handle. Hanlde of the task to destroy
79 IN[0] - Task handle. Handle of the task
97 IN[0] - Task handle. Handle of the task to start
102 IN[0] - Task handle. Handle of the task to stop
108 IN[0] - Task handle. Handle of the task to pause
113 IN[0] - Task handle. Handle of the task to resume
129 IN[0] - Task handle. Handle of the task to start
135 IN[0] - task handle. Handle of the task to start
141 IN[0] - task handle
151 IN[0] - task handle. Handle of the task to start
160 IN[0] - task handle
167 IN[0] - Task handle. Handle of the task
203 IN[0] - task handle. Handle of the task to start
209 IN[0] - task handle. Handle of the task to start
218 IN[0] - task handle. Handle of the task to start
224 IN[0] - task handle. Handle of the task to start
252 IN[0] - task handle. Handle of the task to start
259 IN[0] - task handle. Handle of the task to start
265 IN[0] - task handle. Handle of the task to start
271 IN[0] - task handle. Handle of the task to start
277 IN[0] - task handle. Handle of the task to start
283 IN[0] - task handle. Handle of the task to start
290 IN[0] - task handle. Handle of the task to start
296 IN[0] - task handle. Handle of the task to start
302 IN[0] - task handle. Handle of the task to start
346 IN[0] - task handle
362 IN[0] - Task handle. Handle of the task to start
371 IN[0] - Task handle. Handle of the task to start
381 IN[0] - Task handle. Handle of the task to start
387 IN[0] - Task handle. Handle of the task
/linux-4.1.27/block/
H A Dblk-ioc.c199 /* Called by the exiting task */ exit_io_context()
200 void exit_io_context(struct task_struct *task) exit_io_context() argument
204 task_lock(task); exit_io_context()
205 ioc = task->io_context; exit_io_context()
206 task->io_context = NULL; exit_io_context()
207 task_unlock(task); exit_io_context()
234 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) create_task_io_context() argument
255 * already did or @task, which isn't %current, is exiting. Note create_task_io_context()
260 task_lock(task); create_task_io_context()
261 if (!task->io_context && create_task_io_context()
262 (task == current || !(task->flags & PF_EXITING))) create_task_io_context()
263 task->io_context = ioc; create_task_io_context()
267 ret = task->io_context ? 0 : -EBUSY; create_task_io_context()
269 task_unlock(task); create_task_io_context()
275 * get_task_io_context - get io_context of a task
276 * @task: task of interest
280 * Return io_context of @task. If it doesn't exist, it is created with
287 struct io_context *get_task_io_context(struct task_struct *task, get_task_io_context() argument
295 task_lock(task); get_task_io_context()
296 ioc = task->io_context; get_task_io_context()
299 task_unlock(task); get_task_io_context()
302 task_unlock(task); get_task_io_context()
303 } while (!create_task_io_context(task, gfp_flags, node)); get_task_io_context()
/linux-4.1.27/arch/x86/include/asm/
H A Dsyscall.h31 static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) syscall_get_nr() argument
36 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
42 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
51 if (task_thread_info(task)->status & TS_COMPAT) syscall_get_error()
61 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
67 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
76 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
85 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
101 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
107 if (task_thread_info(task)->status & TS_COMPAT) syscall_get_arguments()
162 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
168 if (task_thread_info(task)->status & TS_COMPAT) syscall_set_arguments()
H A Dstacktrace.h60 stack_frame(struct task_struct *task, struct pt_regs *regs) stack_frame() argument
67 if (task == current) { stack_frame()
74 return *(unsigned long *)task->thread.sp; stack_frame()
78 stack_frame(struct task_struct *task, struct pt_regs *regs) stack_frame() argument
85 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
89 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
H A Dproto.h20 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
H A Dmmu.h15 /* True if mm supports a task running in 32 bit compatibility mode. */
/linux-4.1.27/fs/nfs/
H A Dunlink.c74 * @task: rpc_task of the sillydelete
78 static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) nfs_async_unlink_done() argument
83 trace_nfs_sillyrename_unlink(data, task->tk_status); nfs_async_unlink_done()
84 if (!NFS_PROTO(dir)->unlink_done(task, dir)) nfs_async_unlink_done()
85 rpc_restart_call_prepare(task); nfs_async_unlink_done()
90 * @task: rpc_task of the sillydelete
92 * We need to call nfs_put_unlinkdata as a 'tk_release' task since the
105 static void nfs_unlink_prepare(struct rpc_task *task, void *calldata) nfs_unlink_prepare() argument
108 NFS_PROTO(data->dir)->unlink_rpc_prepare(task, data); nfs_unlink_prepare()
131 struct rpc_task *task; nfs_do_call_unlink() local
177 task = rpc_run_task(&task_setup_data); nfs_do_call_unlink()
178 if (!IS_ERR(task)) nfs_do_call_unlink()
179 rpc_put_task_async(task); nfs_do_call_unlink()
338 * @task: rpc_task of the sillyrename
343 static void nfs_async_rename_done(struct rpc_task *task, void *calldata) nfs_async_rename_done() argument
351 new_dir, data->new_dentry, task->tk_status); nfs_async_rename_done()
352 if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) { nfs_async_rename_done()
353 rpc_restart_call_prepare(task); nfs_async_rename_done()
358 data->complete(task, data); nfs_async_rename_done()
382 static void nfs_rename_prepare(struct rpc_task *task, void *calldata) nfs_rename_prepare() argument
385 NFS_PROTO(data->old_dir)->rename_rpc_prepare(task, data); nfs_rename_prepare()
425 struct rpc_task *task = ERR_CAST(data->cred); nfs_async_rename() local
427 return task; nfs_async_rename()
467 nfs_complete_sillyrename(struct rpc_task *task, struct nfs_renamedata *data) nfs_complete_sillyrename() argument
471 if (task->tk_status != 0) { nfs_complete_sillyrename()
519 struct rpc_task *task; nfs_sillyrename() local
574 /* run the rename task, undo unlink if it fails */ nfs_sillyrename()
575 task = nfs_async_rename(dir, dir, dentry, sdentry, nfs_sillyrename()
577 if (IS_ERR(task)) { nfs_sillyrename()
583 /* wait for the RPC task to complete, unless a SIGKILL intervenes */ nfs_sillyrename()
584 error = rpc_wait_for_completion_task(task); nfs_sillyrename()
586 error = task->tk_status; nfs_sillyrename()
599 rpc_put_task(task); nfs_sillyrename()
H A Dnfs4proc.c503 struct rpc_task *task) nfs40_setup_sequence()
518 task->tk_timeout = HZ >> 2; nfs40_setup_sequence()
527 rpc_call_start(task); nfs40_setup_sequence()
532 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, nfs40_setup_sequence()
535 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); nfs40_setup_sequence()
541 static int nfs40_sequence_done(struct rpc_task *task, nfs40_sequence_done() argument
595 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) nfs41_sequence_done() argument
605 /* don't increment the sequence number if the task wasn't sent */ nfs41_sequence_done()
606 if (!RPC_WAS_SENT(task)) nfs41_sequence_done()
686 if (rpc_restart_call_prepare(task)) { nfs41_sequence_done()
687 task->tk_status = 0; nfs41_sequence_done()
692 if (!rpc_restart_call(task)) nfs41_sequence_done()
694 rpc_delay(task, NFS4_POLL_RETRY_MAX); nfs41_sequence_done()
699 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) nfs4_sequence_done() argument
704 return nfs40_sequence_done(task, res); nfs4_sequence_done()
705 return nfs41_sequence_done(task, res); nfs4_sequence_done()
712 struct rpc_task *task) nfs41_setup_sequence()
724 task->tk_timeout = 0; nfs41_setup_sequence()
738 task->tk_timeout = HZ >> 2; nfs41_setup_sequence()
759 rpc_call_start(task); nfs41_setup_sequence()
764 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, nfs41_setup_sequence()
767 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); nfs41_setup_sequence()
776 struct rpc_task *task) nfs4_setup_sequence()
783 args, res, task); nfs4_setup_sequence()
789 ret = nfs41_setup_sequence(session, args, res, task); nfs4_setup_sequence()
795 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) nfs41_call_sync_prepare() argument
802 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); nfs41_call_sync_prepare()
805 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) nfs41_call_sync_done() argument
809 nfs41_sequence_done(task, data->seq_res); nfs41_call_sync_done()
822 struct rpc_task *task) nfs4_setup_sequence()
825 args, res, task); nfs4_setup_sequence()
828 int nfs4_sequence_done(struct rpc_task *task, nfs4_sequence_done() argument
831 return nfs40_sequence_done(task, res); nfs4_sequence_done()
837 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) nfs40_call_sync_prepare() argument
841 data->seq_args, data->seq_res, task); nfs40_call_sync_prepare()
844 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) nfs40_call_sync_done() argument
847 nfs4_sequence_done(task, data->seq_res); nfs40_call_sync_done()
862 struct rpc_task *task; nfs4_call_sync_sequence() local
876 task = rpc_run_task(&task_setup); nfs4_call_sync_sequence()
877 if (IS_ERR(task)) nfs4_call_sync_sequence()
878 ret = PTR_ERR(task); nfs4_call_sync_sequence()
880 ret = task->tk_status; nfs4_call_sync_sequence()
881 rpc_put_task(task); nfs4_call_sync_sequence()
1118 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) nfs4_wait_for_completion_rpc_task() argument
1122 ret = rpc_wait_for_completion_task(task); nfs4_wait_for_completion_rpc_task()
1756 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) nfs4_open_confirm_prepare() argument
1761 &data->c_arg.seq_args, &data->c_res.seq_res, task); nfs4_open_confirm_prepare()
1764 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) nfs4_open_confirm_done() argument
1768 nfs40_sequence_done(task, &data->c_res.seq_res); nfs4_open_confirm_done()
1770 data->rpc_status = task->tk_status; nfs4_open_confirm_done()
1809 struct rpc_task *task; _nfs4_proc_open_confirm() local
1831 task = rpc_run_task(&task_setup_data); _nfs4_proc_open_confirm()
1832 if (IS_ERR(task)) _nfs4_proc_open_confirm()
1833 return PTR_ERR(task); _nfs4_proc_open_confirm()
1834 status = nfs4_wait_for_completion_rpc_task(task); _nfs4_proc_open_confirm()
1840 rpc_put_task(task); _nfs4_proc_open_confirm()
1844 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) nfs4_open_prepare() argument
1850 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) nfs4_open_prepare()
1877 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; nfs4_open_prepare()
1884 task) != 0) nfs4_open_prepare()
1900 task->tk_action = NULL; nfs4_open_prepare()
1902 nfs4_sequence_done(task, &data->o_res.seq_res); nfs4_open_prepare()
1905 static void nfs4_open_done(struct rpc_task *task, void *calldata) nfs4_open_done() argument
1909 data->rpc_status = task->tk_status; nfs4_open_done()
1911 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) nfs4_open_done()
1914 if (task->tk_status == 0) { nfs4_open_done()
1969 struct rpc_task *task; nfs4_run_open_task() local
1996 task = rpc_run_task(&task_setup_data); nfs4_run_open_task()
1997 if (IS_ERR(task)) nfs4_run_open_task()
1998 return PTR_ERR(task); nfs4_run_open_task()
1999 status = nfs4_wait_for_completion_rpc_task(task); nfs4_run_open_task()
2005 rpc_put_task(task); nfs4_run_open_task()
2641 static void nfs4_close_done(struct rpc_task *task, void *data) nfs4_close_done() argument
2649 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) nfs4_close_done()
2651 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); nfs4_close_done()
2655 switch (task->tk_status) { nfs4_close_done()
2670 rpc_restart_call_prepare(task); nfs4_close_done()
2676 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { nfs4_close_done()
2677 rpc_restart_call_prepare(task); nfs4_close_done()
2686 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); nfs4_close_done()
2689 static void nfs4_close_prepare(struct rpc_task *task, void *data) nfs4_close_prepare() argument
2698 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) nfs4_close_prepare()
2701 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; nfs4_close_prepare()
2734 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; nfs4_close_prepare()
2736 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) { nfs4_close_prepare()
2750 task) != 0) nfs4_close_prepare()
2755 task->tk_action = NULL; nfs4_close_prepare()
2757 nfs4_sequence_done(task, &calldata->res.seq_res); nfs4_close_prepare()
2790 struct rpc_task *task; nfs4_do_close() local
2830 task = rpc_run_task(&task_setup_data); nfs4_do_close()
2831 if (IS_ERR(task)) nfs4_do_close()
2832 return PTR_ERR(task); nfs4_do_close()
2835 status = rpc_wait_for_completion_task(task); nfs4_do_close()
2836 rpc_put_task(task); nfs4_do_close()
3618 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) nfs4_proc_unlink_rpc_prepare() argument
3623 task); nfs4_proc_unlink_rpc_prepare() local
3626 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) nfs4_proc_unlink_done() argument
3628 struct nfs_unlinkdata *data = task->tk_calldata; nfs4_proc_unlink_done()
3631 if (!nfs4_sequence_done(task, &res->seq_res)) nfs4_proc_unlink_done()
3633 if (nfs4_async_handle_error(task, res->server, NULL, nfs4_proc_unlink_done()
3651 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) nfs4_proc_rename_rpc_prepare() argument
3656 task); nfs4_proc_rename_rpc_prepare() local
3659 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, nfs4_proc_rename_done() argument
3662 struct nfs_renamedata *data = task->tk_calldata; nfs4_proc_rename_done()
3665 if (!nfs4_sequence_done(task, &res->seq_res)) nfs4_proc_rename_done()
3667 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) nfs4_proc_rename_done()
4165 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) nfs4_read_done_cb() argument
4169 trace_nfs4_read(hdr, task->tk_status); nfs4_read_done_cb()
4170 if (nfs4_async_handle_error(task, server, nfs4_read_done_cb()
4173 rpc_restart_call_prepare(task); nfs4_read_done_cb()
4178 if (task->tk_status > 0) nfs4_read_done_cb()
4183 static bool nfs4_read_stateid_changed(struct rpc_task *task, nfs4_read_stateid_changed() argument
4187 if (!nfs4_error_stateid_expired(task->tk_status) || nfs4_read_stateid_changed()
4193 rpc_restart_call_prepare(task); nfs4_read_stateid_changed()
4197 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) nfs4_read_done() argument
4202 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) nfs4_read_done()
4204 if (nfs4_read_stateid_changed(task, &hdr->args)) nfs4_read_done()
4206 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : nfs4_read_done()
4207 nfs4_read_done_cb(task, hdr); nfs4_read_done()
4219 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, nfs4_proc_pgio_rpc_prepare() argument
4225 task)) nfs4_proc_pgio_rpc_prepare()
4236 static int nfs4_write_done_cb(struct rpc_task *task, nfs4_write_done_cb() argument
4241 trace_nfs4_write(hdr, task->tk_status); nfs4_write_done_cb()
4242 if (nfs4_async_handle_error(task, NFS_SERVER(inode), nfs4_write_done_cb()
4245 rpc_restart_call_prepare(task); nfs4_write_done_cb()
4248 if (task->tk_status >= 0) { nfs4_write_done_cb()
4255 static bool nfs4_write_stateid_changed(struct rpc_task *task, nfs4_write_stateid_changed() argument
4259 if (!nfs4_error_stateid_expired(task->tk_status) || nfs4_write_stateid_changed()
4265 rpc_restart_call_prepare(task); nfs4_write_stateid_changed()
4269 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) nfs4_write_done() argument
4271 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) nfs4_write_done()
4273 if (nfs4_write_stateid_changed(task, &hdr->args)) nfs4_write_done()
4275 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : nfs4_write_done()
4276 nfs4_write_done_cb(task, hdr); nfs4_write_done()
4311 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) nfs4_proc_commit_rpc_prepare() argument
4316 task); nfs4_proc_commit_rpc_prepare() local
4319 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) nfs4_commit_done_cb() argument
4323 trace_nfs4_commit(data, task->tk_status); nfs4_commit_done_cb()
4324 if (nfs4_async_handle_error(task, NFS_SERVER(inode), nfs4_commit_done_cb()
4326 rpc_restart_call_prepare(task); nfs4_commit_done_cb()
4332 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) nfs4_commit_done() argument
4334 if (!nfs4_sequence_done(task, &data->res.seq_res)) nfs4_commit_done()
4336 return data->commit_done_cb(task, data); nfs4_commit_done()
4370 static void nfs4_renew_done(struct rpc_task *task, void *calldata) nfs4_renew_done() argument
4376 trace_nfs4_renew_async(clp, task->tk_status); nfs4_renew_done()
4377 switch (task->tk_status) { nfs4_renew_done()
4387 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { nfs4_renew_done()
4878 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, nfs4_async_handle_error() argument
4883 if (task->tk_status >= 0) nfs4_async_handle_error()
4885 switch(task->tk_status) { nfs4_async_handle_error()
4920 task->tk_status); nfs4_async_handle_error()
4921 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); nfs4_async_handle_error()
4926 rpc_delay(task, nfs4_update_delay(timeout)); nfs4_async_handle_error()
4929 rpc_delay(task, NFS4_POLL_RETRY_MAX); nfs4_async_handle_error()
4934 task->tk_status = nfs4_map_errors(task->tk_status); nfs4_async_handle_error()
4937 task->tk_status = -EIO; nfs4_async_handle_error()
4940 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); nfs4_async_handle_error()
4942 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); nfs4_async_handle_error()
4946 task->tk_status = 0; nfs4_async_handle_error()
5027 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) nfs4_setclientid_done() argument
5031 if (task->tk_status == 0) nfs4_setclientid_done()
5032 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); nfs4_setclientid_done()
5065 struct rpc_task *task; nfs4_proc_setclientid() local
5099 task = rpc_run_task(&task_setup_data); nfs4_proc_setclientid()
5100 if (IS_ERR(task)) { nfs4_proc_setclientid()
5101 status = PTR_ERR(task); nfs4_proc_setclientid()
5104 status = task->tk_status; nfs4_proc_setclientid()
5109 rpc_put_task(task); nfs4_proc_setclientid()
5157 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) nfs4_delegreturn_done() argument
5161 if (!nfs4_sequence_done(task, &data->res.seq_res)) nfs4_delegreturn_done()
5164 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); nfs4_delegreturn_done()
5165 switch (task->tk_status) { nfs4_delegreturn_done()
5174 task->tk_status = 0; nfs4_delegreturn_done()
5179 if (nfs4_async_handle_error(task, data->res.server, nfs4_delegreturn_done()
5181 rpc_restart_call_prepare(task); nfs4_delegreturn_done()
5185 data->rpc_status = task->tk_status; nfs4_delegreturn_done()
5201 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) nfs4_delegreturn_prepare() argument
5208 pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task)) nfs4_delegreturn_prepare()
5214 task); nfs4_delegreturn_prepare()
5227 struct rpc_task *task; _nfs4_proc_delegreturn() local
5261 task = rpc_run_task(&task_setup_data); _nfs4_proc_delegreturn()
5262 if (IS_ERR(task)) _nfs4_proc_delegreturn()
5263 return PTR_ERR(task); _nfs4_proc_delegreturn()
5266 status = nfs4_wait_for_completion_rpc_task(task); _nfs4_proc_delegreturn()
5275 rpc_put_task(task); _nfs4_proc_delegreturn()
5429 static void nfs4_locku_done(struct rpc_task *task, void *data) nfs4_locku_done() argument
5433 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) nfs4_locku_done()
5435 switch (task->tk_status) { nfs4_locku_done()
5448 rpc_restart_call_prepare(task); nfs4_locku_done()
5451 if (nfs4_async_handle_error(task, calldata->server, nfs4_locku_done()
5453 rpc_restart_call_prepare(task); nfs4_locku_done()
5458 static void nfs4_locku_prepare(struct rpc_task *task, void *data) nfs4_locku_prepare() argument
5462 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) nfs4_locku_prepare()
5473 task) != 0) nfs4_locku_prepare()
5477 task->tk_action = NULL; nfs4_locku_prepare()
5479 nfs4_sequence_done(task, &calldata->res.seq_res); nfs4_locku_prepare()
5534 struct rpc_task *task; nfs4_proc_unlck() local
5564 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); nfs4_proc_unlck()
5565 status = PTR_ERR(task); nfs4_proc_unlck()
5566 if (IS_ERR(task)) nfs4_proc_unlck()
5568 status = nfs4_wait_for_completion_rpc_task(task); nfs4_proc_unlck()
5569 rpc_put_task(task); nfs4_proc_unlck()
5628 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) nfs4_lock_prepare() argument
5634 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) nfs4_lock_prepare()
5638 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { nfs4_lock_prepare()
5652 task->tk_action = NULL; nfs4_lock_prepare()
5659 task) == 0) nfs4_lock_prepare()
5666 nfs4_sequence_done(task, &data->res.seq_res); nfs4_lock_prepare()
5670 static void nfs4_lock_done(struct rpc_task *task, void *calldata) nfs4_lock_done() argument
5677 if (!nfs4_sequence_done(task, &data->res.seq_res)) nfs4_lock_done()
5680 data->rpc_status = task->tk_status; nfs4_lock_done()
5681 switch (task->tk_status) { nfs4_lock_done()
5688 rpc_restart_call_prepare(task); nfs4_lock_done()
5697 rpc_restart_call_prepare(task); nfs4_lock_done()
5706 rpc_restart_call_prepare(task); nfs4_lock_done()
5709 rpc_restart_call_prepare(task); nfs4_lock_done()
5721 struct rpc_task *task; nfs4_lock_release() local
5722 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, nfs4_lock_release()
5724 if (!IS_ERR(task)) nfs4_lock_release()
5725 rpc_put_task_async(task); nfs4_lock_release()
5762 struct rpc_task *task; _nfs4_do_setlk() local
5794 task = rpc_run_task(&task_setup_data); _nfs4_do_setlk()
5795 if (IS_ERR(task)) _nfs4_do_setlk()
5796 return PTR_ERR(task); _nfs4_do_setlk()
5797 ret = nfs4_wait_for_completion_rpc_task(task); _nfs4_do_setlk()
5805 rpc_put_task(task); _nfs4_do_setlk()
6045 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) nfs4_release_lockowner_prepare() argument
6050 &data->args.seq_args, &data->res.seq_res, task); nfs4_release_lockowner_prepare()
6055 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) nfs4_release_lockowner_done() argument
6060 nfs40_sequence_done(task, &data->res.seq_res); nfs4_release_lockowner_done()
6062 switch (task->tk_status) { nfs4_release_lockowner_done()
6072 if (nfs4_async_handle_error(task, server, nfs4_release_lockowner_done()
6074 rpc_restart_call_prepare(task); nfs4_release_lockowner_done()
7056 static void nfs4_get_lease_time_prepare(struct rpc_task *task, nfs4_get_lease_time_prepare() argument
7068 task); nfs4_get_lease_time_prepare()
7076 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) nfs4_get_lease_time_done() argument
7082 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) nfs4_get_lease_time_done()
7084 switch (task->tk_status) { nfs4_get_lease_time_done()
7087 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); nfs4_get_lease_time_done()
7088 rpc_delay(task, NFS4_POLL_RETRY_MIN); nfs4_get_lease_time_done()
7089 task->tk_status = 0; nfs4_get_lease_time_done()
7092 rpc_restart_call_prepare(task); nfs4_get_lease_time_done()
7105 struct rpc_task *task; nfs4_proc_get_lease_time() local
7132 task = rpc_run_task(&task_setup); nfs4_proc_get_lease_time()
7134 if (IS_ERR(task)) nfs4_proc_get_lease_time()
7135 status = PTR_ERR(task); nfs4_proc_get_lease_time()
7137 status = task->tk_status; nfs4_proc_get_lease_time()
7138 rpc_put_task(task); nfs4_proc_get_lease_time()
7381 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) nfs41_sequence_handle_errors() argument
7383 switch(task->tk_status) { nfs41_sequence_handle_errors()
7385 rpc_delay(task, NFS4_POLL_RETRY_MAX); nfs41_sequence_handle_errors()
7393 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) nfs41_sequence_call_done() argument
7398 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) nfs41_sequence_call_done()
7401 trace_nfs4_sequence(clp, task->tk_status); nfs41_sequence_call_done()
7402 if (task->tk_status < 0) { nfs41_sequence_call_done()
7403 dprintk("%s ERROR %d\n", __func__, task->tk_status); nfs41_sequence_call_done()
7407 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { nfs41_sequence_call_done()
7408 rpc_restart_call_prepare(task); nfs41_sequence_call_done()
7412 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); nfs41_sequence_call_done()
7417 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) nfs41_sequence_prepare() argument
7424 args = task->tk_msg.rpc_argp; nfs41_sequence_prepare()
7425 res = task->tk_msg.rpc_resp; nfs41_sequence_prepare()
7427 nfs41_setup_sequence(clp->cl_session, args, res, task); nfs41_sequence_prepare()
7472 struct rpc_task *task; nfs41_proc_async_sequence() local
7477 task = _nfs41_proc_sequence(clp, cred, false); nfs41_proc_async_sequence()
7478 if (IS_ERR(task)) nfs41_proc_async_sequence()
7479 ret = PTR_ERR(task); nfs41_proc_async_sequence()
7481 rpc_put_task_async(task); nfs41_proc_async_sequence()
7488 struct rpc_task *task; nfs4_proc_sequence() local
7491 task = _nfs41_proc_sequence(clp, cred, true); nfs4_proc_sequence()
7492 if (IS_ERR(task)) { nfs4_proc_sequence()
7493 ret = PTR_ERR(task); nfs4_proc_sequence()
7496 ret = rpc_wait_for_completion_task(task); nfs4_proc_sequence()
7498 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; nfs4_proc_sequence()
7500 if (task->tk_status == 0) nfs4_proc_sequence()
7502 ret = task->tk_status; nfs4_proc_sequence()
7504 rpc_put_task(task); nfs4_proc_sequence()
7516 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) nfs4_reclaim_complete_prepare() argument
7523 task); nfs4_reclaim_complete_prepare()
7526 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) nfs41_reclaim_complete_handle_errors() argument
7528 switch(task->tk_status) { nfs41_reclaim_complete_handle_errors()
7534 rpc_delay(task, NFS4_POLL_RETRY_MAX); nfs41_reclaim_complete_handle_errors()
7544 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) nfs4_reclaim_complete_done() argument
7551 if (!nfs41_sequence_done(task, res)) nfs4_reclaim_complete_done()
7554 trace_nfs4_reclaim_complete(clp, task->tk_status); nfs4_reclaim_complete_done()
7555 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { nfs4_reclaim_complete_done()
7556 rpc_restart_call_prepare(task); nfs4_reclaim_complete_done()
7582 struct rpc_task *task; nfs41_proc_reclaim_complete() local
7607 task = rpc_run_task(&task_setup_data); nfs41_proc_reclaim_complete()
7608 if (IS_ERR(task)) { nfs41_proc_reclaim_complete()
7609 status = PTR_ERR(task); nfs41_proc_reclaim_complete()
7612 status = nfs4_wait_for_completion_rpc_task(task); nfs41_proc_reclaim_complete()
7614 status = task->tk_status; nfs41_proc_reclaim_complete()
7615 rpc_put_task(task); nfs41_proc_reclaim_complete()
7623 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) nfs4_layoutget_prepare() argument
7636 &lgp->res.seq_res, task)) nfs4_layoutget_prepare()
7642 rpc_exit(task, NFS4_OK); nfs4_layoutget_prepare()
7646 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) nfs4_layoutget_done() argument
7655 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); nfs4_layoutget_done()
7657 if (!nfs41_sequence_done(task, &lgp->res.seq_res)) nfs4_layoutget_done()
7660 switch (task->tk_status) { nfs4_layoutget_done()
7673 timeo = rpc_get_timeout(task->tk_client); nfs4_layoutget_done()
7690 rpc_delay(task, delay); nfs4_layoutget_done()
7691 task->tk_status = 0; nfs4_layoutget_done()
7692 rpc_restart_call_prepare(task); nfs4_layoutget_done()
7715 task->tk_status = 0; nfs4_layoutget_done()
7716 rpc_restart_call_prepare(task); nfs4_layoutget_done()
7719 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) nfs4_layoutget_done()
7720 rpc_restart_call_prepare(task); nfs4_layoutget_done()
7796 struct rpc_task *task; nfs4_proc_layoutget() local
7830 task = rpc_run_task(&task_setup_data); nfs4_proc_layoutget()
7831 if (IS_ERR(task)) nfs4_proc_layoutget()
7832 return ERR_CAST(task); nfs4_proc_layoutget()
7833 status = nfs4_wait_for_completion_rpc_task(task); nfs4_proc_layoutget()
7835 status = task->tk_status; nfs4_proc_layoutget()
7843 rpc_put_task(task); nfs4_proc_layoutget()
7851 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) nfs4_layoutreturn_prepare() argument
7859 task); nfs4_layoutreturn_prepare()
7862 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) nfs4_layoutreturn_done() argument
7869 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) nfs4_layoutreturn_done()
7873 switch (task->tk_status) { nfs4_layoutreturn_done()
7875 task->tk_status = 0; nfs4_layoutreturn_done()
7879 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) nfs4_layoutreturn_done()
7881 rpc_restart_call_prepare(task); nfs4_layoutreturn_done()
7915 struct rpc_task *task; nfs4_proc_layoutreturn() local
7940 task = rpc_run_task(&task_setup_data); nfs4_proc_layoutreturn()
7941 if (IS_ERR(task)) nfs4_proc_layoutreturn()
7942 return PTR_ERR(task); nfs4_proc_layoutreturn()
7944 status = task->tk_status; nfs4_proc_layoutreturn()
7947 rpc_put_task(task); nfs4_proc_layoutreturn()
8000 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) nfs4_layoutcommit_prepare() argument
8009 task); nfs4_layoutcommit_prepare()
8013 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) nfs4_layoutcommit_done() argument
8018 if (!nfs41_sequence_done(task, &data->res.seq_res)) nfs4_layoutcommit_done()
8021 switch (task->tk_status) { /* Just ignore these failures */ nfs4_layoutcommit_done()
8026 task->tk_status = 0; nfs4_layoutcommit_done()
8030 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { nfs4_layoutcommit_done()
8031 rpc_restart_call_prepare(task); nfs4_layoutcommit_done()
8065 .task = &data->task, nfs4_proc_layoutcommit()
8071 struct rpc_task *task; nfs4_proc_layoutcommit() local
8076 data->task.tk_pid, sync, nfs4_proc_layoutcommit()
8089 task = rpc_run_task(&task_setup_data); nfs4_proc_layoutcommit()
8090 if (IS_ERR(task)) nfs4_proc_layoutcommit()
8091 return PTR_ERR(task); nfs4_proc_layoutcommit()
8093 status = task->tk_status; nfs4_proc_layoutcommit()
8096 rpc_put_task(task); nfs4_proc_layoutcommit()
8312 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) nfs41_free_stateid_prepare() argument
8318 task); nfs41_free_stateid_prepare() local
8321 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) nfs41_free_stateid_done() argument
8325 nfs41_sequence_done(task, &data->res.seq_res); nfs41_free_stateid_done()
8327 switch (task->tk_status) { nfs41_free_stateid_done()
8329 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) nfs41_free_stateid_done()
8330 rpc_restart_call_prepare(task); nfs41_free_stateid_done()
8397 struct rpc_task *task; nfs41_free_stateid() local
8400 task = _nfs41_free_stateid(server, stateid, cred, true); nfs41_free_stateid()
8401 if (IS_ERR(task)) nfs41_free_stateid()
8402 return PTR_ERR(task); nfs41_free_stateid()
8403 ret = rpc_wait_for_completion_task(task); nfs41_free_stateid()
8405 ret = task->tk_status; nfs41_free_stateid()
8406 rpc_put_task(task); nfs41_free_stateid()
8413 struct rpc_task *task; nfs41_free_lock_state() local
8416 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); nfs41_free_lock_state()
8418 if (IS_ERR(task)) nfs41_free_lock_state()
8420 rpc_put_task(task); nfs41_free_lock_state()
500 nfs40_setup_sequence(struct nfs4_slot_table *tbl, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) nfs40_setup_sequence() argument
709 nfs41_setup_sequence(struct nfs4_session *session, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) nfs41_setup_sequence() argument
773 nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) nfs4_setup_sequence() argument
819 nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) nfs4_setup_sequence() argument
/linux-4.1.27/arch/tile/include/asm/
H A Dsyscall.h41 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
47 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
54 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
60 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
67 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
76 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
H A Dprocessor.h81 /* Which hardwall is this task tied to? (or NULL if none) */
83 /* Chains this task into the list at info->task_head. */
114 /* Is this task currently doing a backtrace? */
196 /* Kernel stack top for the task that first boots on this cpu. */
228 /* Return initial ksp value for given task. */
229 #define task_ksp0(task) \
230 ((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
233 #define task_pt_regs(task) \
234 ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
238 #define task_sp(task) (task_pt_regs(task)->sp)
239 #define task_pc(task) (task_pt_regs(task)->pc)
241 #define KSTK_EIP(task) task_pc(task)
242 #define KSTK_ESP(task) task_sp(task)
356 #define next_current_ksp0(task) ({ \
357 unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
368 #define next_current_ksp0(task) ({ \
369 unsigned long __ksp0 = task_ksp0(task); \
H A Dswitch_to.h21 * switch_to(n) should switch tasks to task nr n, first
22 * checking that n isn't the current task, in which case it does nothing.
34 * Pause the DMA engine and static network before task switching.
68 /* Support function for forking a new task. */
/linux-4.1.27/drivers/media/pci/saa7134/
H A Dsaa7134-vbi.c51 int task) task_init()
56 saa_writeb(SAA7134_VBI_H_START1(task), norm->h_start & 0xff); task_init()
57 saa_writeb(SAA7134_VBI_H_START2(task), norm->h_start >> 8); task_init()
58 saa_writeb(SAA7134_VBI_H_STOP1(task), norm->h_stop & 0xff); task_init()
59 saa_writeb(SAA7134_VBI_H_STOP2(task), norm->h_stop >> 8); task_init()
60 saa_writeb(SAA7134_VBI_V_START1(task), norm->vbi_v_start_0 & 0xff); task_init()
61 saa_writeb(SAA7134_VBI_V_START2(task), norm->vbi_v_start_0 >> 8); task_init()
62 saa_writeb(SAA7134_VBI_V_STOP1(task), norm->vbi_v_stop_0 & 0xff); task_init()
63 saa_writeb(SAA7134_VBI_V_STOP2(task), norm->vbi_v_stop_0 >> 8); task_init()
65 saa_writeb(SAA7134_VBI_H_SCALE_INC1(task), VBI_SCALE & 0xff); task_init()
66 saa_writeb(SAA7134_VBI_H_SCALE_INC2(task), VBI_SCALE >> 8); task_init()
67 saa_writeb(SAA7134_VBI_PHASE_OFFSET_LUMA(task), 0x00); task_init()
68 saa_writeb(SAA7134_VBI_PHASE_OFFSET_CHROMA(task), 0x00); task_init()
70 saa_writeb(SAA7134_VBI_H_LEN1(task), dev->vbi_hlen & 0xff); task_init()
71 saa_writeb(SAA7134_VBI_H_LEN2(task), dev->vbi_hlen >> 8); task_init()
72 saa_writeb(SAA7134_VBI_V_LEN1(task), dev->vbi_vlen & 0xff); task_init()
73 saa_writeb(SAA7134_VBI_V_LEN2(task), dev->vbi_vlen >> 8); task_init()
75 saa_andorb(SAA7134_DATA_PATH(task), 0xc0, 0x00); task_init()
50 task_init(struct saa7134_dev *dev, struct saa7134_buf *buf, int task) task_init() argument
/linux-4.1.27/arch/sparc/include/asm/
H A Dsyscall.h18 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
26 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
66 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
74 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
82 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
95 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
104 if (test_tsk_thread_flag(task, TIF_32BIT)) syscall_get_arguments()
118 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
/linux-4.1.27/arch/mips/include/asm/
H A Dsyscall.h29 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
36 struct task_struct *task, struct pt_regs *regs, unsigned int n) mips_get_syscall_arg()
70 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
76 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
82 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
95 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
103 test_tsk_thread_flag(task, TIF_32BIT_REGS)) && syscall_get_arguments()
108 ret |= mips_get_syscall_arg(args++, task, regs, i++); syscall_get_arguments()
35 mips_get_syscall_arg(unsigned long *arg, struct task_struct *task, struct pt_regs *regs, unsigned int n) mips_get_syscall_arg() argument
H A Dstacktrace.h8 extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
16 static inline unsigned long unwind_stack(struct task_struct *task, unwind_stack() argument
/linux-4.1.27/arch/frv/include/asm/
H A Dcurrent.h1 /* current.h: FRV current task pointer
18 * dedicate GR29 to keeping the current task pointer
H A Dsyscall.h21 static inline long syscall_get_nr(struct task_struct *task, syscall_get_nr() argument
31 static inline void syscall_rollback(struct task_struct *task, syscall_rollback() argument
41 static inline long syscall_get_error(struct task_struct *task, syscall_get_error() argument
50 static inline long syscall_get_return_value(struct task_struct *task, syscall_get_return_value() argument
59 static inline void syscall_set_return_value(struct task_struct *task, syscall_set_return_value() argument
72 static inline void syscall_get_arguments(struct task_struct *task, syscall_get_arguments() argument
102 static inline void syscall_set_arguments(struct task_struct *task, syscall_set_arguments() argument
H A Dptrace.h36 #define task_pt_regs(task) ((task)->thread.frame0)
H A Dswitch_to.h1 /* FR-V CPU basic task switching
18 * switch_to(prev, next) should switch from task `prev' to `next'
/linux-4.1.27/drivers/infiniband/ulp/iser/
H A Discsi_iser.c150 * @task: iscsi task
153 * Netes: This routine can't fail, just assign iscsi task
157 iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) iscsi_iser_pdu_alloc() argument
159 struct iscsi_iser_task *iser_task = task->dd_data; iscsi_iser_pdu_alloc()
161 task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header; iscsi_iser_pdu_alloc()
162 task->hdr_max = sizeof(iser_task->desc.iscsi_header); iscsi_iser_pdu_alloc()
168 * iser_initialize_task_headers() - Initialize task headers
169 * @task: iscsi task
179 iser_initialize_task_headers(struct iscsi_task *task, iser_initialize_task_headers() argument
182 struct iser_conn *iser_conn = task->conn->dd_data; iser_initialize_task_headers()
184 struct iscsi_iser_task *iser_task = task->dd_data; iser_initialize_task_headers()
186 const bool mgmt_task = !task->sc && !in_interrupt(); iser_initialize_task_headers()
219 * iscsi_iser_task_init() - Initialize iscsi-iser task
220 * @task: iscsi task
222 * Initialize the task for the scsi command or mgmt command.
225 * to init task headers (dma mapping error).
228 iscsi_iser_task_init(struct iscsi_task *task) iscsi_iser_task_init() argument
230 struct iscsi_iser_task *iser_task = task->dd_data; iscsi_iser_task_init()
233 ret = iser_initialize_task_headers(task, &iser_task->desc); iscsi_iser_task_init()
235 iser_err("Failed to init task %p, err = %d\n", iscsi_iser_task_init()
240 /* mgmt task */ iscsi_iser_task_init()
241 if (!task->sc) iscsi_iser_task_init()
246 iser_task->sc = task->sc; iscsi_iser_task_init()
252 * iscsi_iser_mtask_xmit() - xmit management (immediate) task
254 * @task: task management task
263 iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) iscsi_iser_mtask_xmit() argument
267 iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt); iscsi_iser_mtask_xmit()
269 error = iser_send_control(conn, task); iscsi_iser_mtask_xmit()
274 * - if yes, the task is recycled at iscsi_complete_pdu iscsi_iser_mtask_xmit()
275 * - if no, the task is recycled at iser_snd_completion iscsi_iser_mtask_xmit()
282 struct iscsi_task *task) iscsi_iser_task_xmit_unsol_data()
284 struct iscsi_r2t_info *r2t = &task->unsol_r2t; iscsi_iser_task_xmit_unsol_data()
289 while (iscsi_task_has_unsol_data(task)) { iscsi_iser_task_xmit_unsol_data()
290 iscsi_prep_data_out_pdu(task, r2t, &hdr); iscsi_iser_task_xmit_unsol_data()
296 error = iser_send_data_out(conn, task, &hdr); iscsi_iser_task_xmit_unsol_data()
311 * iscsi_iser_task_xmit() - xmit iscsi-iser task
312 * @task: iscsi task
317 iscsi_iser_task_xmit(struct iscsi_task *task) iscsi_iser_task_xmit() argument
319 struct iscsi_conn *conn = task->conn; iscsi_iser_task_xmit()
320 struct iscsi_iser_task *iser_task = task->dd_data; iscsi_iser_task_xmit()
323 if (!task->sc) iscsi_iser_task_xmit()
324 return iscsi_iser_mtask_xmit(conn, task); iscsi_iser_task_xmit()
326 if (task->sc->sc_data_direction == DMA_TO_DEVICE) { iscsi_iser_task_xmit()
327 BUG_ON(scsi_bufflen(task->sc) == 0); iscsi_iser_task_xmit()
330 task->itt, scsi_bufflen(task->sc), iscsi_iser_task_xmit()
331 task->imm_count, task->unsol_r2t.data_length); iscsi_iser_task_xmit()
335 conn->id, task->itt); iscsi_iser_task_xmit()
339 error = iser_send_command(conn, task); iscsi_iser_task_xmit()
346 if (iscsi_task_has_unsol_data(task)) iscsi_iser_task_xmit()
347 error = iscsi_iser_task_xmit_unsol_data(conn, task); iscsi_iser_task_xmit()
354 * iscsi_iser_cleanup_task() - cleanup an iscsi-iser task
355 * @task: iscsi task
361 static void iscsi_iser_cleanup_task(struct iscsi_task *task) iscsi_iser_cleanup_task() argument
363 struct iscsi_iser_task *iser_task = task->dd_data; iscsi_iser_cleanup_task()
365 struct iser_conn *iser_conn = task->conn->dd_data; iscsi_iser_cleanup_task()
379 if (!task->sc) iscsi_iser_cleanup_task()
389 * iscsi_iser_check_protection() - check protection information status of task.
390 * @task: iscsi task
401 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) iscsi_iser_check_protection() argument
403 struct iscsi_iser_task *iser_task = task->dd_data; iscsi_iser_check_protection()
281 iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, struct iscsi_task *task) iscsi_iser_task_xmit_unsol_data() argument
H A Diser_initiator.c45 * task->data[ISER_DIR_IN].data_len, Protection size
46 * os stored in task->prot[ISER_DIR_IN].data_len
48 static int iser_prepare_read_cmd(struct iscsi_task *task) iser_prepare_read_cmd() argument
51 struct iscsi_iser_task *iser_task = task->dd_data; iser_prepare_read_cmd()
88 task->itt, mem_reg->rkey, iser_prepare_read_cmd()
96 * task->data[ISER_DIR_OUT].data_len, Protection size
97 * is stored at task->prot[ISER_DIR_OUT].data_len
100 iser_prepare_write_cmd(struct iscsi_task *task, iser_prepare_write_cmd() argument
105 struct iscsi_iser_task *iser_task = task->dd_data; iser_prepare_write_cmd()
146 task->itt, mem_reg->rkey, iser_prepare_write_cmd()
152 task->itt, imm_sz); iser_prepare_write_cmd()
377 struct iscsi_task *task) iser_send_command()
380 struct iscsi_iser_task *iser_task = task->dd_data; iser_send_command()
384 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; iser_send_command()
385 struct scsi_cmnd *sc = task->sc; iser_send_command()
417 err = iser_prepare_read_cmd(task); iser_send_command()
422 err = iser_prepare_write_cmd(task, iser_send_command()
423 task->imm_count, iser_send_command()
424 task->imm_count + iser_send_command()
425 task->unsol_r2t.data_length, iser_send_command()
439 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); iser_send_command()
447 struct iscsi_task *task, iser_send_data_out()
451 struct iscsi_iser_task *iser_task = task->dd_data; iser_send_data_out()
478 err = iser_initialize_task_headers(task, tx_desc); iser_send_data_out()
512 struct iscsi_task *task) iser_send_control()
515 struct iscsi_iser_task *iser_task = task->dd_data; iser_send_control()
527 data_seg_len = ntoh24(task->hdr->dlength); iser_send_control()
531 if (task != conn->login_task) { iser_send_control()
532 iser_err("data present on non login task!!!\n"); iser_send_control()
537 iser_conn->login_req_dma, task->data_count, iser_send_control()
540 memcpy(iser_conn->login_req_buf, task->data, task->data_count); iser_send_control()
543 iser_conn->login_req_dma, task->data_count, iser_send_control()
547 tx_dsg->length = task->data_count; iser_send_control()
552 if (task == conn->login_task) { iser_send_control()
554 task->hdr->opcode, data_seg_len); iser_send_control()
558 err = iser_post_rx_bufs(conn, task->hdr); iser_send_control()
609 * task eliminates the need to worry on tasks which are completed in * iser_rcv_completion()
630 struct iscsi_task *task; iser_snd_completion() local
642 task = (void *) ((long)(void *)tx_desc - iser_snd_completion()
644 if (task->hdr->itt == RESERVED_ITT) iser_snd_completion()
645 iscsi_put_task(task); iser_snd_completion()
376 iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task) iser_send_command() argument
446 iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task, struct iscsi_data *hdr) iser_send_data_out() argument
511 iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task) iser_send_control() argument
/linux-4.1.27/fs/nfs/filelayout/
H A Dfilelayout.c89 struct rpc_task *task = &hdr->task; filelayout_reset_write() local
92 dprintk("%s Reset task %5u for i/o through MDS " filelayout_reset_write()
94 hdr->task.tk_pid, filelayout_reset_write()
100 task->tk_status = pnfs_write_done_resend_to_mds(hdr); filelayout_reset_write()
106 struct rpc_task *task = &hdr->task; filelayout_reset_read() local
109 dprintk("%s Reset task %5u for i/o through MDS " filelayout_reset_read()
111 hdr->task.tk_pid, filelayout_reset_read()
117 task->tk_status = pnfs_read_done_resend_to_mds(hdr); filelayout_reset_read()
121 static int filelayout_async_handle_error(struct rpc_task *task, filelayout_async_handle_error() argument
133 if (task->tk_status >= 0) filelayout_async_handle_error()
136 switch (task->tk_status) { filelayout_async_handle_error()
163 "flags 0x%x\n", __func__, task->tk_status, filelayout_async_handle_error()
165 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); filelayout_async_handle_error()
169 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); filelayout_async_handle_error()
181 task->tk_status); filelayout_async_handle_error()
201 task->tk_status); filelayout_async_handle_error()
209 task->tk_status); filelayout_async_handle_error()
213 task->tk_status = 0; filelayout_async_handle_error()
216 task->tk_status = -EIO; filelayout_async_handle_error()
219 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL); filelayout_async_handle_error()
221 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task); filelayout_async_handle_error()
227 static int filelayout_read_done_cb(struct rpc_task *task, filelayout_read_done_cb() argument
232 trace_nfs4_pnfs_read(hdr, task->tk_status); filelayout_read_done_cb()
233 err = filelayout_async_handle_error(task, hdr->args.context->state, filelayout_read_done_cb()
239 return task->tk_status; filelayout_read_done_cb()
241 rpc_restart_call_prepare(task); filelayout_read_done_cb()
287 static void filelayout_read_prepare(struct rpc_task *task, void *data) filelayout_read_prepare() argument
292 rpc_exit(task, -EIO); filelayout_read_prepare()
296 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); filelayout_read_prepare()
298 rpc_exit(task, 0); filelayout_read_prepare()
306 task)) filelayout_read_prepare()
310 rpc_exit(task, -EIO); /* lost lock, terminate I/O */ filelayout_read_prepare()
313 static void filelayout_read_call_done(struct rpc_task *task, void *data) filelayout_read_call_done() argument
317 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); filelayout_read_call_done()
320 task->tk_status == 0) { filelayout_read_call_done()
321 nfs41_sequence_done(task, &hdr->res.seq_res); filelayout_read_call_done()
326 hdr->mds_ops->rpc_call_done(task, data); filelayout_read_call_done()
329 static void filelayout_read_count_stats(struct rpc_task *task, void *data) filelayout_read_count_stats() argument
333 rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics); filelayout_read_count_stats()
336 static int filelayout_write_done_cb(struct rpc_task *task, filelayout_write_done_cb() argument
341 trace_nfs4_pnfs_write(hdr, task->tk_status); filelayout_write_done_cb()
342 err = filelayout_async_handle_error(task, hdr->args.context->state, filelayout_write_done_cb()
348 return task->tk_status; filelayout_write_done_cb()
350 rpc_restart_call_prepare(task); filelayout_write_done_cb()
358 static int filelayout_commit_done_cb(struct rpc_task *task, filelayout_commit_done_cb() argument
363 trace_nfs4_pnfs_commit_ds(data, task->tk_status); filelayout_commit_done_cb()
364 err = filelayout_async_handle_error(task, NULL, data->ds_clp, filelayout_commit_done_cb()
372 rpc_restart_call_prepare(task); filelayout_commit_done_cb()
382 static void filelayout_write_prepare(struct rpc_task *task, void *data) filelayout_write_prepare() argument
387 rpc_exit(task, -EIO); filelayout_write_prepare()
391 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); filelayout_write_prepare()
393 rpc_exit(task, 0); filelayout_write_prepare()
399 task)) filelayout_write_prepare()
403 rpc_exit(task, -EIO); /* lost lock, terminate I/O */ filelayout_write_prepare()
406 static void filelayout_write_call_done(struct rpc_task *task, void *data) filelayout_write_call_done() argument
411 task->tk_status == 0) { filelayout_write_call_done()
412 nfs41_sequence_done(task, &hdr->res.seq_res); filelayout_write_call_done()
417 hdr->mds_ops->rpc_call_done(task, data); filelayout_write_call_done()
420 static void filelayout_write_count_stats(struct rpc_task *task, void *data) filelayout_write_count_stats() argument
424 rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics); filelayout_write_count_stats()
427 static void filelayout_commit_prepare(struct rpc_task *task, void *data) filelayout_commit_prepare() argument
434 task); filelayout_commit_prepare()
437 static void filelayout_commit_count_stats(struct rpc_task *task, void *data) filelayout_commit_count_stats() argument
441 rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics); filelayout_commit_count_stats()
/linux-4.1.27/drivers/staging/iio/trigger/
H A Diio-trig-periodic-rtc.c28 struct rtc_task task; member in struct:iio_prtc_trigger_info
41 ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, state); iio_trig_periodic_rtc_set_state()
73 ret = rtc_irq_set_freq(trig_info->rtc, &trig_info->task, val); iio_trig_periodic_write_freq()
76 &trig_info->task, 1); iio_trig_periodic_write_freq()
78 ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 0); iio_trig_periodic_write_freq()
149 trig_info->task.func = iio_prtc_trigger_poll; iio_trig_periodic_rtc_probe()
150 trig_info->task.private_data = trig; iio_trig_periodic_rtc_probe()
151 ret = rtc_irq_register(trig_info->rtc, &trig_info->task); iio_trig_periodic_rtc_probe()
161 rtc_irq_unregister(trig_info->rtc, &trig_info->task); iio_trig_periodic_rtc_probe()
175 rtc_irq_unregister(trig_info->rtc, &trig_info->task); iio_trig_periodic_rtc_probe()
194 rtc_irq_unregister(trig_info->rtc, &trig_info->task); iio_trig_periodic_rtc_remove()
/linux-4.1.27/include/linux/sunrpc/
H A Dxprt.h72 struct rpc_task * rq_task; /* RPC task data */
121 int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
122 void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
123 void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
124 void (*rpcbind)(struct rpc_task *task);
126 void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
127 void * (*buf_alloc)(struct rpc_task *task, size_t size);
129 int (*send_request)(struct rpc_task *task);
130 void (*set_retrans_timeout)(struct rpc_task *task);
131 void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
132 void (*release_request)(struct rpc_task *task);
293 void xprt_connect(struct rpc_task *task);
294 void xprt_reserve(struct rpc_task *task);
295 void xprt_retry_reserve(struct rpc_task *task);
296 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
297 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
298 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
299 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
300 bool xprt_prepare_transmit(struct rpc_task *task);
301 void xprt_transmit(struct rpc_task *task);
302 void xprt_end_transmit(struct rpc_task *task);
304 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
305 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
306 void xprt_release(struct rpc_task *task);
336 void xprt_set_retrans_timeout_def(struct rpc_task *task);
337 void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
339 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
341 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
343 void xprt_complete_rqst(struct rpc_task *task, int copied);
344 void xprt_release_rqst_cong(struct rpc_task *task);
H A Dsched.h41 * This is the RPC task struct
71 struct work_struct tk_work; /* Async task work queue */
75 ktime_t tk_start; /* RPC task init timestamp */
101 struct rpc_task *task; member in struct:rpc_task_setup
112 * RPC task flags
114 #define RPC_TASK_ASYNC 0x0001 /* is an async task */
118 #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
119 #define RPC_TASK_KILLED 0x0100 /* task was killed */
164 * the task initialization definitions below.
183 struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
184 pid_t owner; /* process id of last task serviced */
239 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
247 void rpc_prepare_task(struct rpc_task *task);
249 static inline int rpc_wait_for_completion_task(struct rpc_task *task) rpc_wait_for_completion_task() argument
251 return __rpc_wait_for_completion_task(task, NULL); rpc_wait_for_completion_task()
/linux-4.1.27/arch/arm/kernel/
H A Diwmmxt.S89 add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
90 ldr r2, [sp, #60] @ current task pc value
92 str r0, [r3] @ this task now owns Concan regs
192 * r0 = struct thread_info pointer of target task or NULL for any
204 add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
243 * r0 = struct thread_info pointer of target task
256 add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
258 teq r2, r3 @ does this task own it...
261 @ current Concan values are in the task save area
268 1: @ this task owns Concan regs -- grab a copy from there
281 * r0 = struct thread_info pointer of target task
294 add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
297 teq r2, r3 @ does this task own it...
300 @ this task doesn't own Concan regs -- use its save area
306 1: @ this task owns Concan regs -- load them directly
317 * Concan handling on task switch
321 * Called only from the iwmmxt notifier with task preemption disabled.
330 bne 1f @ yes: block them for next task
333 add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area
335 teq r2, r3 @ next task owns it?
350 * Remove Concan ownership of given task
360 add r0, r0, #TI_IWMMXT_STATE @ get task Concan save area
/linux-4.1.27/arch/arm64/kernel/
H A Dfpsimd.c40 * (a) for each task, we need to remember which CPU was the last one to have
41 * the task's FPSIMD state loaded into its FPSIMD registers;
42 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
49 * address of the userland FPSIMD state of the task that was loaded onto the CPU
55 * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we
59 * indicate whether or not the userland FPSIMD state of the current task is
62 * task.
64 * For a certain task, the sequence may look something like this:
65 * - the task gets scheduled in; if both the task's fpsimd_state.cpu field
67 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
70 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
71 * userland FPSIMD state is copied from memory to the registers, the task's
73 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
76 * - the task executes an ordinary syscall; upon return to userland, the
80 * - the task executes a syscall which executes some NEON instructions; this is
81 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
85 * - the task gets preempted after kernel_neon_end() is called; as we have not
139 * If we are switching to a task whose most recent userland fpsimd_thread_switch()
213 * Invalidate live CPU copies of task t's FPSIMD state
H A Dperf_regs.c46 u64 perf_reg_abi(struct task_struct *task) perf_reg_abi() argument
48 if (is_compat_thread(task_thread_info(task))) perf_reg_abi()
/linux-4.1.27/drivers/misc/cxl/
H A Dfault.c175 struct task_struct *task; cxl_handle_fault() local
197 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { cxl_handle_fault()
198 pr_devel("cxl_handle_fault unable to get task %i\n", cxl_handle_fault()
203 if (!(mm = get_task_mm(task))) { cxl_handle_fault()
219 put_task_struct(task); cxl_handle_fault()
225 struct task_struct *task; cxl_prefault_one() local
228 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { cxl_prefault_one()
229 pr_devel("cxl_prefault_one unable to get task %i\n", cxl_prefault_one()
233 if (!(mm = get_task_mm(task))) { cxl_prefault_one()
236 put_task_struct(task); cxl_prefault_one()
243 put_task_struct(task); cxl_prefault_one()
262 struct task_struct *task; cxl_prefault_vma() local
265 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { cxl_prefault_vma()
266 pr_devel("cxl_prefault_vma unable to get task %i\n", cxl_prefault_vma()
270 if (!(mm = get_task_mm(task))) { cxl_prefault_vma()
295 put_task_struct(task); cxl_prefault_vma()
/linux-4.1.27/drivers/staging/unisys/uislib/
H A Duisthread.c43 thrinfo->task = kthread_run(threadfn, thrcontext, name); uisthread_start()
44 if (IS_ERR(thrinfo->task)) { uisthread_start()
48 thrinfo->id = thrinfo->task->pid; uisthread_start()
61 kthread_stop(thrinfo->task); uisthread_stop()
/linux-4.1.27/arch/x86/um/
H A Dtls_64.c3 void clear_flushed_tls(struct task_struct *task) clear_flushed_tls() argument
/linux-4.1.27/arch/m68k/include/asm/
H A Dcurrent.h21 return(current_thread_info()->task); get_current()
H A Dswitch_to.h5 * switch_to(n) should switch tasks to task ptr, first checking that
6 * ptr isn't the current task, in which case it does nothing. This
7 * also clears the TS-flag if the task we switched to has used the
14 * and so we might get see unexpected behaviors when a task returns
/linux-4.1.27/kernel/sched/
H A Dstop_task.c4 * stop-task scheduling class.
6 * The stop task is the highest priority task in the system, it preempts
55 BUG(); /* the stop task should never yield, its pointless. */ yield_task_stop()
100 get_rr_interval_stop(struct rq *rq, struct task_struct *task) get_rr_interval_stop() argument
H A Dcompletion.c27 * changing the task state if and only if any tasks are woken up.
47 * changing the task state if and only if any tasks are woken up.
111 * wait_for_completion: - waits for completion of a task
114 * This waits to be signaled for completion of a specific task. It is NOT
127 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
131 * This waits for either a completion of a specific task to be signaled or for a
146 * wait_for_completion_io: - waits for completion of a task
149 * This waits to be signaled for completion of a specific task. It is NOT
160 * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
164 * This waits for either a completion of a specific task to be signaled or for a
180 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
183 * This waits for completion of a specific task to be signaled. It is
202 * This waits for either a completion of a specific task to be signaled or for a
217 * wait_for_completion_killable: - waits for completion of a task (killable)
220 * This waits to be signaled for completion of a specific task. It can be
235 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
239 * This waits for either a completion of a specific task to be
/linux-4.1.27/drivers/scsi/bnx2fc/
H A Dbnx2fc_constants.h102 * start processing the task. In case no timer facilities are required then the
106 * After driver has initialize the task in case timer services required *
109 /* After driver has initialize the task in case timer services required */
112 * start processing the task. In case no timer facilities are required then the
117 * pending WQEs on this task
124 /* For completed unsolicited task */
126 /* For exchange cleanup request task */
128 /* For sequence cleanup request task */
130 /* For completion the ABTS task. */
145 /* Special completion indication in case of task was aborted. */
149 /* FW only: Special completion indication in case of task was cleaned. */
151 /* Not in used: Special completion indication (in task requested the exchange
152 * cleanup) in case cleaned task is in non-valid.
155 /* Special completion indication (in task requested the sequence cleanup) in
156 * case cleaned task was already returned to normal.
H A Dbnx2fc_hwi.c635 struct fcoe_task_ctx_entry *task, *task_page; bnx2fc_process_unsol_compl() local
715 task = &(task_page[index]); bnx2fc_process_unsol_compl()
843 task = &(task_page[index]); bnx2fc_process_unsol_compl()
874 struct fcoe_task_ctx_entry *task; bnx2fc_process_cq_compl() local
896 task = &(task_page[index]); bnx2fc_process_cq_compl()
898 num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & bnx2fc_process_cq_compl()
913 rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & bnx2fc_process_cq_compl()
921 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); bnx2fc_process_cq_compl()
927 bnx2fc_process_abts_compl(io_req, task, num_rq); bnx2fc_process_cq_compl()
930 bnx2fc_process_cleanup_compl(io_req, task, num_rq); bnx2fc_process_cq_compl()
938 bnx2fc_process_tm_compl(io_req, task, num_rq); bnx2fc_process_cq_compl()
944 * will be delivered to the task belonging to the IO bnx2fc_process_cq_compl()
953 bnx2fc_process_els_compl(io_req, task, num_rq); bnx2fc_process_cq_compl()
955 bnx2fc_process_abts_compl(io_req, task, num_rq); bnx2fc_process_cq_compl()
958 bnx2fc_process_cleanup_compl(io_req, task, num_rq); bnx2fc_process_cq_compl()
972 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); bnx2fc_process_cq_compl()
1460 struct fcoe_task_ctx_entry *task, bnx2fc_init_seq_cleanup_task()
1481 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); bnx2fc_init_seq_cleanup_task()
1489 task->txwr_rxrd.const_ctx.tx_flags = bnx2fc_init_seq_cleanup_task()
1493 task->txwr_rxrd.const_ctx.init_flags = task_type << bnx2fc_init_seq_cleanup_task()
1495 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << bnx2fc_init_seq_cleanup_task()
1497 task->rxwr_txrd.const_ctx.init_flags = context_id << bnx2fc_init_seq_cleanup_task()
1499 task->rxwr_txrd.const_ctx.init_flags = context_id << bnx2fc_init_seq_cleanup_task()
1502 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; bnx2fc_init_seq_cleanup_task()
1504 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; bnx2fc_init_seq_cleanup_task()
1505 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; bnx2fc_init_seq_cleanup_task()
1518 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = bnx2fc_init_seq_cleanup_task()
1520 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = bnx2fc_init_seq_cleanup_task()
1522 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = bnx2fc_init_seq_cleanup_task()
1524 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = bnx2fc_init_seq_cleanup_task()
1526 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; bnx2fc_init_seq_cleanup_task()
1536 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; bnx2fc_init_seq_cleanup_task()
1543 memset(&task->rxwr_only.rx_seq_ctx, 0, bnx2fc_init_seq_cleanup_task()
1545 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; bnx2fc_init_seq_cleanup_task()
1546 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; bnx2fc_init_seq_cleanup_task()
1550 struct fcoe_task_ctx_entry *task, bnx2fc_init_cleanup_task()
1557 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); bnx2fc_init_cleanup_task()
1561 task->txwr_rxrd.const_ctx.init_flags = task_type << bnx2fc_init_cleanup_task()
1563 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << bnx2fc_init_cleanup_task()
1566 task->txwr_rxrd.const_ctx.init_flags |= bnx2fc_init_cleanup_task()
1570 task->txwr_rxrd.const_ctx.init_flags |= bnx2fc_init_cleanup_task()
1573 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; bnx2fc_init_cleanup_task()
1576 task->txwr_rxrd.const_ctx.tx_flags = bnx2fc_init_cleanup_task()
1581 task->rxwr_txrd.const_ctx.init_flags = context_id << bnx2fc_init_cleanup_task()
1583 task->rxwr_txrd.var_ctx.rx_flags |= 1 << bnx2fc_init_cleanup_task()
1588 struct fcoe_task_ctx_entry *task) bnx2fc_init_mp_task()
1608 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); bnx2fc_init_mp_task()
1610 /* Setup the task from io_req for easy reference */ bnx2fc_init_mp_task()
1611 io_req->task = task; bnx2fc_init_mp_task()
1613 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", bnx2fc_init_mp_task()
1619 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = bnx2fc_init_mp_task()
1621 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = bnx2fc_init_mp_task()
1623 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; bnx2fc_init_mp_task()
1628 task->txwr_rxrd.const_ctx.init_flags = task_type << bnx2fc_init_mp_task()
1631 task->txwr_rxrd.const_ctx.init_flags |= bnx2fc_init_mp_task()
1635 task->txwr_rxrd.const_ctx.init_flags |= bnx2fc_init_mp_task()
1638 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << bnx2fc_init_mp_task()
1642 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << bnx2fc_init_mp_task()
1646 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; bnx2fc_init_mp_task()
1649 task->rxwr_txrd.var_ctx.rx_flags |= 1 << bnx2fc_init_mp_task()
1653 task->rxwr_txrd.const_ctx.init_flags = context_id << bnx2fc_init_mp_task()
1660 task->rxwr_txrd.var_ctx.rx_id = 0xffff; bnx2fc_init_mp_task()
1666 hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; bnx2fc_init_mp_task()
1674 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; bnx2fc_init_mp_task()
1684 struct fcoe_task_ctx_entry *task) bnx2fc_init_task()
1699 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); bnx2fc_init_task()
1701 /* Setup the task from io_req for easy reference */ bnx2fc_init_task()
1702 io_req->task = task; bnx2fc_init_task()
1711 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; bnx2fc_init_task()
1716 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = bnx2fc_init_task()
1719 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = bnx2fc_init_task()
1722 task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = bnx2fc_init_task()
1726 task->txwr_rxrd.const_ctx.init_flags |= 1 << bnx2fc_init_task()
1729 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = bnx2fc_init_task()
1731 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = bnx2fc_init_task()
1733 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = bnx2fc_init_task()
1740 task->txwr_rxrd.const_ctx.init_flags |= task_type << bnx2fc_init_task()
1743 task->txwr_rxrd.const_ctx.init_flags |= bnx2fc_init_task()
1749 task->txwr_rxrd.const_ctx.init_flags |= bnx2fc_init_task()
1752 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << bnx2fc_init_task()
1755 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << bnx2fc_init_task()
1759 task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; bnx2fc_init_task()
1763 task->txwr_rxrd.union_ctx.fcp_cmd.opaque; bnx2fc_init_task()
1775 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; bnx2fc_init_task()
1778 task->rxwr_txrd.const_ctx.init_flags = context_id << bnx2fc_init_task()
1783 task->rxwr_txrd.var_ctx.rx_flags |= 1 << bnx2fc_init_task()
1786 task->rxwr_txrd.var_ctx.rx_id = 0xffff; bnx2fc_init_task()
1792 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; bnx2fc_init_task()
1803 task->txwr_rxrd.const_ctx.init_flags |= 1 << bnx2fc_init_task()
1818 task->txwr_rxrd.const_ctx.init_flags |= 1 << bnx2fc_init_task()
1836 * bnx2fc_setup_task_ctx - allocate and map task context
1840 * allocate memory for task context, and associated BD table to be used
1853 * Allocate task context bd table. A page size of bd table bnx2fc_setup_task_ctx()
1854 * can map 256 buffers. Each buffer contains 32 task context bnx2fc_setup_task_ctx()
1855 * entries. Hence the limit with one page is 8192 task context bnx2fc_setup_task_ctx()
1863 printk(KERN_ERR PFX "unable to allocate task context BDT\n"); bnx2fc_setup_task_ctx()
1871 * a page containing 32 task contexts bnx2fc_setup_task_ctx()
1877 printk(KERN_ERR PFX "unable to allocate task context array\n"); bnx2fc_setup_task_ctx()
1901 printk(KERN_ERR PFX "unable to alloc task context\n"); bnx2fc_setup_task_ctx()
1459 bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, struct fcoe_task_ctx_entry *task, struct bnx2fc_cmd *orig_io_req, u32 offset) bnx2fc_init_seq_cleanup_task() argument
1549 bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u16 orig_xid) bnx2fc_init_cleanup_task() argument
1587 bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) bnx2fc_init_mp_task() argument
1683 bnx2fc_init_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) bnx2fc_init_task() argument
/linux-4.1.27/drivers/md/persistent-data/
H A Ddm-block-manager.c51 struct task_struct *task; member in struct:waiter
56 struct task_struct *task) __find_holder()
61 if (lock->holders[i] == task) __find_holder()
69 static void __add_holder(struct block_lock *lock, struct task_struct *task) __add_holder() argument
76 get_task_struct(task); __add_holder()
77 lock->holders[h] = task; __add_holder()
90 static void __del_holder(struct block_lock *lock, struct task_struct *task) __del_holder() argument
92 unsigned h = __find_holder(lock, task); __del_holder()
94 put_task_struct(task); __del_holder()
132 if (!w->task) __wait()
143 struct task_struct *task; __wake_waiter() local
146 task = w->task; __wake_waiter()
148 w->task = NULL; __wake_waiter()
149 wake_up_process(task); __wake_waiter()
169 __add_holder(lock, w->task); __wake_many()
175 __add_holder(lock, w->task); __wake_many()
219 w.task = current; bl_down_read()
281 w.task = current; bl_down_write()
55 __find_holder(struct block_lock *lock, struct task_struct *task) __find_holder() argument
/linux-4.1.27/include/scsi/fc/
H A Dfc_fcp.h37 #define FCP_SPPF_TASK_RETRY_ID 0x0200 /* task retry ID requested */
52 __u8 fc_pri_ta; /* priority and task attribute */
53 __u8 fc_tm_flags; /* task management flags */
64 __u8 fc_pri_ta; /* priority and task attribute */
65 __u8 fc_tm_flags; /* task management flags */
77 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
78 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
79 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
81 #define FCP_PTA_MASK 7 /* mask for task attribute field */
86 * fc_tm_flags - task management flags field.
89 #define FCP_TMF_TGT_RESET 0x20 /* target reset task management,
91 #define FCP_TMF_LUN_RESET 0x10 /* logical unit reset task management */
92 #define FCP_TMF_CLR_TASK_SET 0x04 /* clear task set */
93 #define FCP_TMF_ABT_TASK_SET 0x02 /* abort task set */
/linux-4.1.27/security/apparmor/
H A Dcontext.c16 * AppArmor sets confinement on every task, via the the aa_task_cxt and
23 * If a task uses change_hat it currently does not return to the old
24 * cred or task context but instead creates a new one. Ideally the task
59 * aa_dup_task_context - duplicate a task context, incrementing reference counts
60 * @new: a blank task context (NOT NULL)
61 * @old: the task context to copy (NOT NULL)
72 * aa_get_task_profile - Get another task's profile
73 * @task: task to query (NOT NULL)
75 * Returns: counted reference to @task's profile
77 struct aa_profile *aa_get_task_profile(struct task_struct *task) aa_get_task_profile() argument
82 p = aa_get_profile(__aa_task_profile(task)); aa_get_task_profile()
153 * Do switch of tasks hat. If the task is currently in a hat
H A Dresource.c82 * @profile - profile confining the task (NOT NULL)
83 * @task - task the resource is being set on
91 int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task, aa_task_setrlimit() argument
98 task_profile = aa_get_profile(aa_cred_profile(__task_cred(task))); aa_task_setrlimit()
103 * that the task is setting the resource of a task confined with aa_task_setrlimit()
118 * @old: old profile on task (NOT NULL)
/linux-4.1.27/arch/sh/kernel/
H A Ddumpstack.c59 struct task_struct *task = tinfo->task; print_ftrace_graph_addr() local
61 int index = task->curr_ret_stack; print_ftrace_graph_addr()
66 if (!task->ret_stack || index < *graph) print_ftrace_graph_addr()
70 ret_addr = task->ret_stack[index].ret; print_ftrace_graph_addr()
85 stack_reader_dump(struct task_struct *task, struct pt_regs *regs, stack_reader_dump() argument
/linux-4.1.27/arch/score/kernel/
H A Dprocess.c109 unsigned long get_wchan(struct task_struct *task) get_wchan() argument
111 if (!task || task == current || task->state == TASK_RUNNING) get_wchan()
114 if (!task_stack_page(task)) get_wchan()
117 return task_pt_regs(task)->cp0_epc; get_wchan()
/linux-4.1.27/tools/perf/
H A Dbuiltin-sched.c135 * Track the current task - that way we can know whether there's any
136 * weird events, such as a task being switched away that is not current.
240 get_new_event(struct task_desc *task, u64 timestamp) get_new_event() argument
243 unsigned long idx = task->nr_events; get_new_event()
249 task->nr_events++; get_new_event()
250 size = sizeof(struct sched_atom *) * task->nr_events; get_new_event()
251 task->atoms = realloc(task->atoms, size); get_new_event()
252 BUG_ON(!task->atoms); get_new_event()
254 task->atoms[idx] = event; get_new_event()
259 static struct sched_atom *last_event(struct task_desc *task) last_event() argument
261 if (!task->nr_events) last_event()
264 return task->atoms[task->nr_events - 1]; last_event()
267 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, add_sched_event_run() argument
270 struct sched_atom *event, *curr_event = last_event(task); add_sched_event_run()
282 event = get_new_event(task, timestamp); add_sched_event_run()
290 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, add_sched_event_wakeup() argument
295 event = get_new_event(task, timestamp); add_sched_event_wakeup()
317 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, add_sched_event_sleep() argument
320 struct sched_atom *event = get_new_event(task, timestamp); add_sched_event_sleep()
330 struct task_desc *task; register_pid() local
345 task = sched->pid_to_task[pid]; register_pid()
347 if (task) register_pid()
348 return task; register_pid()
350 task = zalloc(sizeof(*task)); register_pid()
351 task->pid = pid; register_pid()
352 task->nr = sched->nr_tasks; register_pid()
353 strcpy(task->comm, comm); register_pid()
355 * every task starts in sleeping state - this gets ignored register_pid()
358 add_sched_event_sleep(sched, task, 0, 0); register_pid()
360 sched->pid_to_task[pid] = task; register_pid()
364 sched->tasks[task->nr] = task; register_pid()
367 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); register_pid()
369 return task; register_pid()
375 struct task_desc *task; print_task_traces() local
379 task = sched->tasks[i]; print_task_traces()
380 printf("task %6ld (%20s:%10ld), nr_events: %ld\n", print_task_traces()
381 task->nr, task->comm, task->pid, task->nr_events); print_task_traces()
495 struct task_desc *task; member in struct:sched_thread_parms
503 struct task_desc *this_task = parms->task; thread_func()
546 struct task_desc *task; create_tasks() local
563 parms->task = task = sched->tasks[i]; create_tasks()
566 sem_init(&task->sleep_sem, 0, 0); create_tasks()
567 sem_init(&task->ready_for_work, 0, 0); create_tasks()
568 sem_init(&task->work_done_sem, 0, 0); create_tasks()
569 task->curr_event = 0; create_tasks()
570 err = pthread_create(&task->thread, &attr, thread_func, parms); create_tasks()
578 struct task_desc *task; wait_for_tasks() local
586 task = sched->tasks[i]; wait_for_tasks()
587 ret = sem_wait(&task->ready_for_work); wait_for_tasks()
589 sem_init(&task->ready_for_work, 0, 0); wait_for_tasks()
599 task = sched->tasks[i]; wait_for_tasks()
600 ret = sem_wait(&task->work_done_sem); wait_for_tasks()
602 sem_init(&task->work_done_sem, 0, 0); wait_for_tasks()
603 sched->cpu_usage += task->cpu_usage; wait_for_tasks()
604 task->cpu_usage = 0; wait_for_tasks()
622 task = sched->tasks[i]; wait_for_tasks()
623 sem_init(&task->sleep_sem, 0, 0); wait_for_tasks()
624 task->curr_event = 0; wait_for_tasks()
1074 * task is out of run queue, also may happen when task is latency_wakeup_event()
1077 * task which is on run queue. latency_wakeup_event()
/linux-4.1.27/arch/mips/kernel/
H A Dprocess.c507 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, unwind_stack() argument
510 unsigned long stack_page = (unsigned long)task_stack_page(task); unwind_stack()
518 unsigned long get_wchan(struct task_struct *task) get_wchan() argument
526 if (!task || task == current || task->state == TASK_RUNNING) get_wchan()
528 if (!task_stack_page(task)) get_wchan()
531 pc = thread_saved_pc(task); get_wchan()
534 sp = task->thread.reg29 + schedule_mfi.frame_size; get_wchan()
537 pc = unwind_stack(task, &sp, pc, &ra); get_wchan()
573 int mips_get_process_fp_mode(struct task_struct *task) mips_get_process_fp_mode() argument
577 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) mips_get_process_fp_mode()
579 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) mips_get_process_fp_mode()
585 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) mips_set_process_fp_mode() argument
610 if (task->signal == current->signal) mips_set_process_fp_mode()
614 atomic_set(&task->mm->context.fp_mode_switching, 1); mips_set_process_fp_mode()
627 spin_lock_irq(&task->sighand->siglock); mips_set_process_fp_mode()
629 for_each_thread(task, t) { for_each_thread()
636 spin_unlock_irq(&task->sighand->siglock); for_each_thread()
638 spin_lock_irq(&task->sighand->siglock); for_each_thread()
642 spin_unlock_irq(&task->sighand->siglock);
649 for_each_thread(task, t) { for_each_thread()
666 atomic_set(&task->mm->context.fp_mode_switching, 0);
/linux-4.1.27/arch/um/kernel/
H A Dsysrq.c30 void show_stack(struct task_struct *task, unsigned long *stack) show_stack() argument
43 sp = get_stack_pointer(task, segv_regs); show_stack()
/linux-4.1.27/ipc/
H A Dnamespace.c109 * If this is the last task in the namespace exiting, and
111 * a task in another ipc namespace but in a mounts namespace
117 * (Clearly, a task raising the refcount on its own ipc_ns
118 * needn't take mq_lock since it can't race with the last task
136 static struct ns_common *ipcns_get(struct task_struct *task) ipcns_get() argument
141 task_lock(task); ipcns_get()
142 nsproxy = task->nsproxy; ipcns_get()
145 task_unlock(task); ipcns_get()
/linux-4.1.27/arch/cris/arch-v10/kernel/
H A Dptrace.c27 * Get contents of register REGNO in task TASK.
29 inline long get_reg(struct task_struct *task, unsigned int regno) get_reg() argument
36 return task->thread.usp; get_reg()
38 return ((unsigned long *)task_pt_regs(task))[regno]; get_reg()
44 * Write contents of register REGNO in task TASK.
46 inline int put_reg(struct task_struct *task, unsigned int regno, put_reg() argument
50 task->thread.usp = data; put_reg()
52 ((unsigned long *)task_pt_regs(task))[regno] = data; put_reg()
/linux-4.1.27/tools/testing/fault-injection/
H A Dfailcmd.sh33 --oom-kill-allocating-task=value
41 --interval=value, --space=value, --verbose=value, --task-filter=value,
86 LONGOPTS=probability:,interval:,times:,space:,verbose:,task-filter:
88 LONGOPTS=$LONGOPTS,reject-start:,reject-end:,oom-kill-allocating-task:,help
107 echo N > $FAULTATTR/task-filter
153 --task-filter)
177 --oom-kill-allocating-task)
212 echo $task_filter > $FAULTATTR/task-filter
/linux-4.1.27/drivers/net/wireless/rsi/
H A Drsi_common.h66 thread->task = kthread_run(func_ptr, common, "%s", name); rsi_create_kthread()
67 if (IS_ERR(thread->task)) rsi_create_kthread()
68 return (int)PTR_ERR(thread->task); rsi_create_kthread()
79 return kthread_stop(handle->task); rsi_kill_thread()
/linux-4.1.27/init/
H A Dinit_task.c17 /* Initial task structure */

Completed in 3156 milliseconds

123456789