task 119 arch/alpha/include/asm/elf.h extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task); task 125 arch/alpha/include/asm/elf.h extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task); task 14 arch/alpha/include/asm/ptrace.h #define task_pt_regs(task) \ task 15 arch/alpha/include/asm/ptrace.h ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) task 7 arch/alpha/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 18 arch/alpha/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 37 arch/alpha/include/asm/thread_info.h .task = &tsk, \ task 88 arch/alpha/include/asm/thread_info.h #define SET_UNALIGN_CTL(task,value) ({ \ task 89 arch/alpha/include/asm/thread_info.h __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ task 96 arch/alpha/include/asm/thread_info.h task_thread_info(task)->status = status; \ task 99 arch/alpha/include/asm/thread_info.h #define GET_UNALIGN_CTL(task,value) ({ \ task 100 arch/alpha/include/asm/thread_info.h __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ task 17 arch/alpha/kernel/asm-offsets.c DEFINE(TI_TASK, offsetof(struct thread_info, task)); task 335 arch/alpha/kernel/process.c dump_elf_task(elf_greg_t *dest, struct task_struct *task) task 337 arch/alpha/kernel/process.c dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task)); task 343 arch/alpha/kernel/process.c dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task) task 345 arch/alpha/kernel/process.c struct switch_stack *sw = (struct switch_stack *)task_pt_regs(task) - 1; task 109 arch/alpha/kernel/ptrace.c get_reg_addr(struct task_struct * task, unsigned long regno) task 114 arch/alpha/kernel/ptrace.c addr = &task_thread_info(task)->pcb.usp; task 116 arch/alpha/kernel/ptrace.c addr = &task_thread_info(task)->pcb.unique; task 121 arch/alpha/kernel/ptrace.c addr = task_stack_page(task) + regoff[regno]; task 130 arch/alpha/kernel/ptrace.c get_reg(struct task_struct * task, unsigned long regno) task 134 arch/alpha/kernel/ptrace.c unsigned long fpcr = *get_reg_addr(task, regno); task 136 arch/alpha/kernel/ptrace.c = task_thread_info(task)->ieee_state & IEEE_SW_MASK; task 140 arch/alpha/kernel/ptrace.c return *get_reg_addr(task, regno); task 147 arch/alpha/kernel/ptrace.c put_reg(struct task_struct *task, unsigned long regno, unsigned long data) task 150 arch/alpha/kernel/ptrace.c task_thread_info(task)->ieee_state task 151 arch/alpha/kernel/ptrace.c = ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK) task 155 arch/alpha/kernel/ptrace.c *get_reg_addr(task, regno) = data; task 160 arch/alpha/kernel/ptrace.c read_int(struct task_struct *task, unsigned long addr, int * data) task 162 arch/alpha/kernel/ptrace.c int copied = access_process_vm(task, addr, data, sizeof(int), task 168 arch/alpha/kernel/ptrace.c write_int(struct task_struct *task, unsigned long addr, int data) task 170 arch/alpha/kernel/ptrace.c int copied = access_process_vm(task, addr, &data, sizeof(int), task 147 arch/alpha/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *sp) task 16 arch/arc/include/asm/syscall.h syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 25 arch/arc/include/asm/syscall.h syscall_rollback(struct task_struct *task, struct pt_regs *regs) task 31 arch/arc/include/asm/syscall.h syscall_get_error(struct task_struct *task, struct pt_regs *regs) task 38 arch/arc/include/asm/syscall.h syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) task 44 arch/arc/include/asm/syscall.h syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, task 55 arch/arc/include/asm/syscall.h syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, task 69 arch/arc/include/asm/syscall.h syscall_get_arch(struct task_struct *task) task 42 arch/arc/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 55 arch/arc/include/asm/thread_info.h .task = &tsk, \ task 51 arch/arc/include/asm/unwind.h struct task_struct *task; task 70 arch/arc/kernel/kgdb.c struct task_struct *task) task 72 arch/arc/kernel/kgdb.c if (task) task 73 arch/arc/kernel/kgdb.c to_gdb_regs(gdb_regs, task_pt_regs(task), task 74 arch/arc/kernel/kgdb.c (struct callee_regs *) task->thread.callee_reg); task 422 arch/arc/kernel/kprobes.c if (ri->task != current) task 262 arch/arc/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 51 arch/arc/kernel/stacktrace.c frame_info->task = current; task 73 arch/arc/kernel/stacktrace.c frame_info->task = tsk; task 98 arch/arc/kernel/stacktrace.c frame_info->task = tsk; task 1095 arch/arc/kernel/unwind.c top = STACK_TOP_UNW(frame->task); task 1096 arch/arc/kernel/unwind.c bottom = STACK_BOTTOM_UNW(frame->task); task 260 arch/arm/common/bL_switcher.c struct task_struct *task; task 309 arch/arm/common/bL_switcher.c struct task_struct *task; task 311 arch/arm/common/bL_switcher.c task = kthread_create_on_node(bL_switcher_thread, arg, task 313 arch/arm/common/bL_switcher.c if (!IS_ERR(task)) { task 314 arch/arm/common/bL_switcher.c kthread_bind(task, cpu); task 315 arch/arm/common/bL_switcher.c wake_up_process(task); task 318 arch/arm/common/bL_switcher.c return task; task 356 arch/arm/common/bL_switcher.c if (IS_ERR(t->task)) task 357 arch/arm/common/bL_switcher.c return PTR_ERR(t->task); task 358 arch/arm/common/bL_switcher.c if (!t->task) task 583 arch/arm/common/bL_switcher.c t->task = bL_switcher_thread_create(cpu, t); task 607 arch/arm/common/bL_switcher.c struct task_struct *task; task 632 arch/arm/common/bL_switcher.c task = t->task; task 633 arch/arm/common/bL_switcher.c t->task = NULL; task 634 arch/arm/common/bL_switcher.c if (!task || IS_ERR(task)) task 636 arch/arm/common/bL_switcher.c kthread_stop(task); task 643 arch/arm/common/bL_switcher.c task = bL_switcher_thread_create(cpu, t); task 644 arch/arm/common/bL_switcher.c if (!IS_ERR(task)) { task 646 arch/arm/common/bL_switcher.c kthread_stop(task); task 22 arch/arm/include/asm/syscall.h static inline int syscall_get_nr(struct task_struct *task, task 25 arch/arm/include/asm/syscall.h return task_thread_info(task)->syscall; task 28 arch/arm/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 34 arch/arm/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 41 arch/arm/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 47 arch/arm/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 56 arch/arm/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 66 arch/arm/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 76 arch/arm/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 50 arch/arm/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 72 arch/arm/include/asm/thread_info.h .task = &tsk, \ task 57 arch/arm/kernel/asm-offsets.c DEFINE(TI_TASK, offsetof(struct thread_info, task)); task 76 arch/arm/kernel/kgdb.c sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) task 82 arch/arm/kernel/kgdb.c if (task == NULL) task 90 arch/arm/kernel/kgdb.c ti = task_thread_info(task); task 29 arch/arm/kernel/perf_regs.c u64 perf_reg_abi(struct task_struct *task) task 162 arch/arm/kernel/ptrace.c static inline long get_user_reg(struct task_struct *task, int offset) task 164 arch/arm/kernel/ptrace.c return task_pt_regs(task)->uregs[offset]; task 174 arch/arm/kernel/ptrace.c put_user_reg(struct task_struct *task, int offset, long data) task 176 arch/arm/kernel/ptrace.c struct pt_regs newregs, *regs = task_pt_regs(task); task 779 arch/arm/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 990 arch/arm/mach-rpc/ecard.c struct task_struct *task; task 997 arch/arm/mach-rpc/ecard.c task = kthread_run(ecard_task, NULL, "kecardd"); task 998 arch/arm/mach-rpc/ecard.c if (IS_ERR(task)) { task 1000 arch/arm/mach-rpc/ecard.c PTR_ERR(task)); task 1002 arch/arm/mach-rpc/ecard.c return PTR_ERR(task); task 112 arch/arm/mm/context.c pid = task_pid_nr(thread->task) << ASID_BITS; task 440 arch/arm/probes/kprobes/core.c if (ri->task != current) task 459 arch/arm/probes/kprobes/core.c if (ri->task != current) task 110 arch/arm64/include/asm/debug-monitors.h void user_rewind_single_step(struct task_struct *task); task 111 arch/arm64/include/asm/debug-monitors.h void user_fastforward_single_step(struct task_struct *task); task 106 arch/arm64/include/asm/fpsimd.h extern size_t sve_state_size(struct task_struct const *task); task 108 arch/arm64/include/asm/fpsimd.h extern void sve_alloc(struct task_struct *task); task 109 arch/arm64/include/asm/fpsimd.h extern void fpsimd_release_task(struct task_struct *task); task 110 arch/arm64/include/asm/fpsimd.h extern void fpsimd_sync_to_sve(struct task_struct *task); task 111 arch/arm64/include/asm/fpsimd.h extern void sve_sync_to_fpsimd(struct task_struct *task); task 112 arch/arm64/include/asm/fpsimd.h extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); task 114 arch/arm64/include/asm/fpsimd.h extern int sve_set_vector_length(struct task_struct *task, task 141 arch/arm64/include/asm/fpsimd.h static inline void sve_alloc(struct task_struct *task) { } task 142 arch/arm64/include/asm/fpsimd.h static inline void fpsimd_release_task(struct task_struct *task) { } task 143 arch/arm64/include/asm/fpsimd.h static inline void sve_sync_to_fpsimd(struct task_struct *task) { } task 144 arch/arm64/include/asm/fpsimd.h static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { } task 129 arch/arm64/include/asm/hw_breakpoint.h extern void ptrace_hw_copy_thread(struct task_struct *task); task 134 arch/arm64/include/asm/hw_breakpoint.h static inline void ptrace_hw_copy_thread(struct task_struct *task) task 334 arch/arm64/include/asm/ptrace.h int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); task 89 arch/arm64/include/asm/smp.h struct task_struct *task; task 20 arch/arm64/include/asm/syscall.h static inline int syscall_get_nr(struct task_struct *task, task 26 arch/arm64/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 33 arch/arm64/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 40 arch/arm64/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 46 arch/arm64/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 55 arch/arm64/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 65 arch/arm64/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 79 arch/arm64/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 81 arch/arm64/include/asm/syscall.h if (is_compat_thread(task_thread_info(task))) task 89 arch/arm64/kernel/asm-offsets.c DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); task 390 arch/arm64/kernel/debug-monitors.c void user_rewind_single_step(struct task_struct *task) task 396 arch/arm64/kernel/debug-monitors.c if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) task 397 arch/arm64/kernel/debug-monitors.c set_regs_spsr_ss(task_pt_regs(task)); task 401 arch/arm64/kernel/debug-monitors.c void user_fastforward_single_step(struct task_struct *task) task 403 arch/arm64/kernel/debug-monitors.c if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) task 404 arch/arm64/kernel/debug-monitors.c clear_regs_spsr_ss(task_pt_regs(task)); task 433 arch/arm64/kernel/debug-monitors.c void user_enable_single_step(struct task_struct *task) task 435 arch/arm64/kernel/debug-monitors.c struct thread_info *ti = task_thread_info(task); task 438 arch/arm64/kernel/debug-monitors.c set_regs_spsr_ss(task_pt_regs(task)); task 442 arch/arm64/kernel/debug-monitors.c void user_disable_single_step(struct task_struct *task) task 444 arch/arm64/kernel/debug-monitors.c clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); task 203 arch/arm64/kernel/fpsimd.c static void __sve_free(struct task_struct *task) task 205 arch/arm64/kernel/fpsimd.c kfree(task->thread.sve_state); task 206 arch/arm64/kernel/fpsimd.c task->thread.sve_state = NULL; task 209 arch/arm64/kernel/fpsimd.c static void sve_free(struct task_struct *task) task 211 arch/arm64/kernel/fpsimd.c WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); task 213 arch/arm64/kernel/fpsimd.c __sve_free(task); task 435 arch/arm64/kernel/fpsimd.c static void fpsimd_to_sve(struct task_struct *task) task 438 arch/arm64/kernel/fpsimd.c void *sst = task->thread.sve_state; task 439 arch/arm64/kernel/fpsimd.c struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; task 444 arch/arm64/kernel/fpsimd.c vq = sve_vq_from_vl(task->thread.sve_vl); task 459 arch/arm64/kernel/fpsimd.c static void sve_to_fpsimd(struct task_struct *task) task 462 arch/arm64/kernel/fpsimd.c void const *sst = task->thread.sve_state; task 463 arch/arm64/kernel/fpsimd.c struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; task 470 arch/arm64/kernel/fpsimd.c vq = sve_vq_from_vl(task->thread.sve_vl); task 483 arch/arm64/kernel/fpsimd.c size_t sve_state_size(struct task_struct const *task) task 485 arch/arm64/kernel/fpsimd.c return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl)); task 498 arch/arm64/kernel/fpsimd.c void sve_alloc(struct task_struct *task) task 500 arch/arm64/kernel/fpsimd.c if (task->thread.sve_state) { task 501 arch/arm64/kernel/fpsimd.c memset(task->thread.sve_state, 0, sve_state_size(current)); task 506 arch/arm64/kernel/fpsimd.c task->thread.sve_state = task 507 arch/arm64/kernel/fpsimd.c kzalloc(sve_state_size(task), GFP_KERNEL); task 513 arch/arm64/kernel/fpsimd.c BUG_ON(!task->thread.sve_state); task 525 arch/arm64/kernel/fpsimd.c void fpsimd_sync_to_sve(struct task_struct *task) task 527 arch/arm64/kernel/fpsimd.c if (!test_tsk_thread_flag(task, TIF_SVE)) task 528 arch/arm64/kernel/fpsimd.c fpsimd_to_sve(task); task 539 arch/arm64/kernel/fpsimd.c void sve_sync_to_fpsimd(struct task_struct *task) task 541 arch/arm64/kernel/fpsimd.c if (test_tsk_thread_flag(task, TIF_SVE)) task 542 arch/arm64/kernel/fpsimd.c sve_to_fpsimd(task); task 557 arch/arm64/kernel/fpsimd.c void sve_sync_from_fpsimd_zeropad(struct task_struct *task) task 560 arch/arm64/kernel/fpsimd.c void *sst = task->thread.sve_state; task 561 arch/arm64/kernel/fpsimd.c struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; task 563 arch/arm64/kernel/fpsimd.c if (!test_tsk_thread_flag(task, TIF_SVE)) task 566 arch/arm64/kernel/fpsimd.c vq = sve_vq_from_vl(task->thread.sve_vl); task 572 arch/arm64/kernel/fpsimd.c int sve_set_vector_length(struct task_struct *task, task 594 arch/arm64/kernel/fpsimd.c task->thread.sve_vl_onexec = vl; task 597 arch/arm64/kernel/fpsimd.c task->thread.sve_vl_onexec = 0; task 603 arch/arm64/kernel/fpsimd.c if (vl == task->thread.sve_vl) task 611 arch/arm64/kernel/fpsimd.c if (task == current) { task 617 arch/arm64/kernel/fpsimd.c fpsimd_flush_task_state(task); task 618 arch/arm64/kernel/fpsimd.c if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) task 619 arch/arm64/kernel/fpsimd.c sve_to_fpsimd(task); task 621 arch/arm64/kernel/fpsimd.c if (task == current) task 628 arch/arm64/kernel/fpsimd.c sve_free(task); task 630 arch/arm64/kernel/fpsimd.c task->thread.sve_vl = vl; task 633 arch/arm64/kernel/fpsimd.c update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT, task 128 arch/arm64/kernel/kgdb.c sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) task 130 arch/arm64/kernel/kgdb.c struct cpu_context *cpu_context = &task->thread.cpu_context; task 48 arch/arm64/kernel/perf_regs.c u64 perf_reg_abi(struct task_struct *task) task 50 arch/arm64/kernel/perf_regs.c if (is_compat_thread(task_thread_info(task))) task 502 arch/arm64/kernel/probes/kprobes.c if (ri->task != current) task 521 arch/arm64/kernel/probes/kprobes.c if (ri->task != current) task 1780 arch/arm64/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 1791 arch/arm64/kernel/ptrace.c else if (is_compat_thread(task_thread_info(task))) task 1936 arch/arm64/kernel/ptrace.c int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) task 1938 arch/arm64/kernel/ptrace.c if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) task 1941 arch/arm64/kernel/ptrace.c if (is_compat_thread(task_thread_info(task))) task 111 arch/arm64/kernel/smp.c secondary_data.task = idle; task 137 arch/arm64/kernel/smp.c secondary_data.task = NULL; task 15 arch/arm64/kernel/ssbd.c static void ssbd_ssbs_enable(struct task_struct *task) task 17 arch/arm64/kernel/ssbd.c u64 val = is_compat_thread(task_thread_info(task)) ? task 20 arch/arm64/kernel/ssbd.c task_pt_regs(task)->pstate |= val; task 23 arch/arm64/kernel/ssbd.c static void ssbd_ssbs_disable(struct task_struct *task) task 25 arch/arm64/kernel/ssbd.c u64 val = is_compat_thread(task_thread_info(task)) ? task 28 arch/arm64/kernel/ssbd.c task_pt_regs(task)->pstate &= ~val; task 34 arch/arm64/kernel/ssbd.c static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) task 62 arch/arm64/kernel/ssbd.c task_spec_ssb_force_disable(task)) task 64 arch/arm64/kernel/ssbd.c task_clear_spec_ssb_disable(task); task 65 arch/arm64/kernel/ssbd.c clear_tsk_thread_flag(task, TIF_SSBD); task 66 arch/arm64/kernel/ssbd.c ssbd_ssbs_enable(task); task 71 arch/arm64/kernel/ssbd.c task_set_spec_ssb_disable(task); task 72 arch/arm64/kernel/ssbd.c set_tsk_thread_flag(task, TIF_SSBD); task 73 arch/arm64/kernel/ssbd.c ssbd_ssbs_disable(task); task 78 arch/arm64/kernel/ssbd.c task_set_spec_ssb_disable(task); task 79 arch/arm64/kernel/ssbd.c task_set_spec_ssb_force_disable(task); task 80 arch/arm64/kernel/ssbd.c set_tsk_thread_flag(task, TIF_SSBD); task 81 arch/arm64/kernel/ssbd.c ssbd_ssbs_disable(task); task 90 arch/arm64/kernel/ssbd.c int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, task 95 arch/arm64/kernel/ssbd.c return ssbd_prctl_set(task, ctrl); task 101 arch/arm64/kernel/ssbd.c static int ssbd_prctl_get(struct task_struct *task) task 109 arch/arm64/kernel/ssbd.c if (task_spec_ssb_force_disable(task)) task 111 arch/arm64/kernel/ssbd.c if (task_spec_ssb_disable(task)) task 121 arch/arm64/kernel/ssbd.c int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) task 125 arch/arm64/kernel/ssbd.c return ssbd_prctl_get(task); task 63 arch/c6x/include/asm/processor.h #define task_pt_regs(task) \ task 64 arch/c6x/include/asm/processor.h ((struct pt_regs *)(THREAD_START_SP + task_stack_page(task)) - 1) task 98 arch/c6x/include/asm/processor.h #define KSTK_EIP(task) (task_pt_regs(task)->pc) task 99 arch/c6x/include/asm/processor.h #define KSTK_ESP(task) (task_pt_regs(task)->sp) task 14 arch/c6x/include/asm/syscall.h static inline int syscall_get_nr(struct task_struct *task, task 20 arch/c6x/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 26 arch/c6x/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 32 arch/c6x/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 38 arch/c6x/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 45 arch/c6x/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 57 arch/c6x/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 69 arch/c6x/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 39 arch/c6x/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 53 arch/c6x/include/asm/thread_info.h .task = &tsk, \ task 71 arch/c6x/include/asm/thread_info.h #define get_thread_info(ti) get_task_struct((ti)->task) task 72 arch/c6x/include/asm/thread_info.h #define put_thread_info(ti) put_task_struct((ti)->task) task 31 arch/c6x/kernel/ptrace.c static inline long get_reg(struct task_struct *task, int regno) task 33 arch/c6x/kernel/ptrace.c long *addr = (long *)task_pt_regs(task); task 44 arch/c6x/kernel/ptrace.c static inline int put_reg(struct task_struct *task, task 48 arch/c6x/kernel/ptrace.c unsigned long *addr = (unsigned long *)task_pt_regs(task); task 91 arch/c6x/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 376 arch/c6x/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *stack) task 382 arch/c6x/kernel/traps.c if (task && task != current) task 385 arch/c6x/kernel/traps.c stack = (unsigned long *)thread_saved_ksp(task); task 14 arch/csky/include/asm/syscall.h syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 20 arch/csky/include/asm/syscall.h syscall_set_nr(struct task_struct *task, struct pt_regs *regs, task 27 arch/csky/include/asm/syscall.h syscall_rollback(struct task_struct *task, struct pt_regs *regs) task 33 arch/csky/include/asm/syscall.h syscall_get_error(struct task_struct *task, struct pt_regs *regs) task 41 arch/csky/include/asm/syscall.h syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) task 47 arch/csky/include/asm/syscall.h syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, task 54 arch/csky/include/asm/syscall.h syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, task 63 arch/csky/include/asm/syscall.h syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, task 72 arch/csky/include/asm/syscall.h syscall_get_arch(struct task_struct *task) task 16 arch/csky/include/asm/thread_info.h struct task_struct *task; task 29 arch/csky/include/asm/thread_info.h .task = &tsk, \ task 32 arch/csky/kernel/asm-offsets.c DEFINE(TINFO_TASK, offsetof(struct thread_info, task)); task 35 arch/csky/kernel/dumpstack.c void show_stack(struct task_struct *task, unsigned long *stack) task 38 arch/csky/kernel/dumpstack.c if (task) task 39 arch/csky/kernel/dumpstack.c stack = (unsigned long *)thread_saved_fp(task); task 29 arch/csky/kernel/perf_regs.c u64 perf_reg_abi(struct task_struct *task) task 191 arch/csky/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 238 arch/csky/kernel/ptrace.c extern void show_stack(struct task_struct *task, unsigned long *stack); task 31 arch/h8300/include/asm/ptrace.h #define task_pt_regs(task) \ task 32 arch/h8300/include/asm/ptrace.h ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1) task 34 arch/h8300/include/asm/ptrace.h extern long h8300_get_reg(struct task_struct *task, int regno); task 35 arch/h8300/include/asm/ptrace.h extern int h8300_put_reg(struct task_struct *task, int regno, task 14 arch/h8300/include/asm/syscall.h syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 20 arch/h8300/include/asm/syscall.h syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, task 32 arch/h8300/include/asm/syscall.h syscall_get_arch(struct task_struct *task) task 30 arch/h8300/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 42 arch/h8300/include/asm/thread_info.h .task = &tsk, \ task 62 arch/h8300/kernel/asm-offsets.c OFFSET(TI_TASK, thread_info, task); task 39 arch/h8300/kernel/ptrace.c long h8300_get_reg(struct task_struct *task, int regno) task 43 arch/h8300/kernel/ptrace.c return task->thread.usp + sizeof(long)*2; task 46 arch/h8300/kernel/ptrace.c return *(unsigned short *)(task->thread.esp0 + task 49 arch/h8300/kernel/ptrace.c return *(unsigned long *)(task->thread.esp0 + task 54 arch/h8300/kernel/ptrace.c int h8300_put_reg(struct task_struct *task, int regno, unsigned long data) task 61 arch/h8300/kernel/ptrace.c task->thread.usp = data - sizeof(long)*2; task 63 arch/h8300/kernel/ptrace.c oldccr = *(unsigned short *)(task->thread.esp0 + task 68 arch/h8300/kernel/ptrace.c *(unsigned short *)(task->thread.esp0 + task 72 arch/h8300/kernel/ptrace.c oldexr = *(unsigned short *)(task->thread.esp0 + task 77 arch/h8300/kernel/ptrace.c *(unsigned short *)(task->thread.esp0 + task 81 arch/h8300/kernel/ptrace.c *(unsigned long *)(task->thread.esp0 + task 154 arch/h8300/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 138 arch/h8300/kernel/ptrace_h.c static int isbranch(struct task_struct *task, int reson) task 140 arch/h8300/kernel/ptrace_h.c unsigned char cond = h8300_get_reg(task, PT_CCR); task 118 arch/h8300/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *esp) task 82 arch/hexagon/include/asm/mmu_context.h switch_mm(prev, next, current_thread_info()->task); task 57 arch/hexagon/include/asm/processor.h #define task_pt_regs(task) \ task 58 arch/hexagon/include/asm/processor.h ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1) task 23 arch/hexagon/include/asm/syscall.h static inline long syscall_get_nr(struct task_struct *task, task 29 arch/hexagon/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 36 arch/hexagon/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 42 arch/hexagon/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 48 arch/hexagon/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 36 arch/hexagon/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 65 arch/hexagon/include/asm/thread_info.h .task = &tsk, \ task 108 arch/hexagon/kernel/kgdb.c struct task_struct *task) task 112 arch/hexagon/kernel/kgdb.c if (task == NULL) task 119 arch/hexagon/kernel/kgdb.c thread_regs = task_pt_regs(task); task 176 arch/hexagon/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 81 arch/hexagon/kernel/traps.c static void do_show_stack(struct task_struct *task, unsigned long *fp, task 93 arch/hexagon/kernel/traps.c if (task == NULL) task 94 arch/hexagon/kernel/traps.c task = current; task 97 arch/hexagon/kernel/traps.c raw_smp_processor_id(), task->comm, task 98 arch/hexagon/kernel/traps.c task_pid_nr(task)); task 101 arch/hexagon/kernel/traps.c if (task == current) { task 106 arch/hexagon/kernel/traps.c task->thread.switch_sp)->fp; task 120 arch/hexagon/kernel/traps.c low = (unsigned long)task_stack_page(task); task 181 arch/hexagon/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *fp) task 184 arch/hexagon/kernel/traps.c do_show_stack(task, fp, 0); task 25 arch/ia64/include/asm/perfmon.h extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs); task 70 arch/ia64/include/asm/perfmon.h int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg); task 71 arch/ia64/include/asm/perfmon.h int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size); task 72 arch/ia64/include/asm/perfmon.h int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg); task 73 arch/ia64/include/asm/perfmon.h int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp); task 74 arch/ia64/include/asm/perfmon.h int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); task 75 arch/ia64/include/asm/perfmon.h int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); task 76 arch/ia64/include/asm/perfmon.h int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs); task 89 arch/ia64/include/asm/perfmon.h extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs); task 90 arch/ia64/include/asm/perfmon.h extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs); task 250 arch/ia64/include/asm/processor.h #define SET_UNALIGN_CTL(task,value) \ task 252 arch/ia64/include/asm/processor.h (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ task 256 arch/ia64/include/asm/processor.h #define GET_UNALIGN_CTL(task,addr) \ task 258 arch/ia64/include/asm/processor.h put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ task 262 arch/ia64/include/asm/processor.h #define SET_FPEMU_CTL(task,value) \ task 264 arch/ia64/include/asm/processor.h (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ task 268 arch/ia64/include/asm/processor.h #define GET_FPEMU_CTL(task,addr) \ task 270 arch/ia64/include/asm/processor.h put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ task 92 arch/ia64/include/asm/ptrace.h # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs)) task 93 arch/ia64/include/asm/ptrace.h # define fsys_mode(task,regs) \ task 95 arch/ia64/include/asm/ptrace.h struct task_struct *_task = (task); \ task 31 arch/ia64/include/asm/switch_to.h extern void ia64_save_extra (struct task_struct *task); task 32 arch/ia64/include/asm/switch_to.h extern void ia64_load_extra (struct task_struct *task); task 17 arch/ia64/include/asm/syscall.h static inline long syscall_get_nr(struct task_struct *task, task 26 arch/ia64/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 32 arch/ia64/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 38 arch/ia64/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 44 arch/ia64/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 58 arch/ia64/include/asm/syscall.h extern void ia64_syscall_get_set_arguments(struct task_struct *task, task 60 arch/ia64/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 64 arch/ia64/include/asm/syscall.h ia64_syscall_get_set_arguments(task, regs, args, 0); task 67 arch/ia64/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 71 arch/ia64/include/asm/syscall.h ia64_syscall_get_set_arguments(task, regs, args, 1); task 74 arch/ia64/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 25 arch/ia64/include/asm/thread_info.h struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */ task 48 arch/ia64/include/asm/thread_info.h .task = &tsk, \ task 75 arch/ia64/include/asm/thread_info.h task_thread_info(p)->task = (p); task 79 arch/ia64/include/asm/thread_info.h task_thread_info(p)->task = (p); task 68 arch/ia64/include/asm/unwind.h struct task_struct *task; task 433 arch/ia64/kernel/kprobes.c if (ri->task != current) task 450 arch/ia64/kernel/kprobes.c if (ri->task != current) task 1815 arch/ia64/kernel/mca.c ti->task = p; task 392 arch/ia64/kernel/perfmon.c typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); task 583 arch/ia64/kernel/perfmon.c pfm_put_task(struct task_struct *task) task 585 arch/ia64/kernel/perfmon.c if (task != current) put_task_struct(task); task 878 arch/ia64/kernel/perfmon.c pfm_mask_monitoring(struct task_struct *task) task 880 arch/ia64/kernel/perfmon.c pfm_context_t *ctx = PFM_GET_CTX(task); task 884 arch/ia64/kernel/perfmon.c DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task))); task 952 arch/ia64/kernel/perfmon.c pfm_restore_monitoring(struct task_struct *task) task 954 arch/ia64/kernel/perfmon.c pfm_context_t *ctx = PFM_GET_CTX(task); task 962 arch/ia64/kernel/perfmon.c if (task != current) { task 963 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current)); task 968 arch/ia64/kernel/perfmon.c task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); task 1023 arch/ia64/kernel/perfmon.c task_pid_nr(task), i, ctx->th_pmcs[i])); task 1080 arch/ia64/kernel/perfmon.c pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) task 1116 arch/ia64/kernel/perfmon.c pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) task 1151 arch/ia64/kernel/perfmon.c pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs) task 1154 arch/ia64/kernel/perfmon.c if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs); task 1159 arch/ia64/kernel/perfmon.c pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size) task 1162 arch/ia64/kernel/perfmon.c if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size); task 1168 arch/ia64/kernel/perfmon.c pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, task 1172 arch/ia64/kernel/perfmon.c if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg); task 1177 arch/ia64/kernel/perfmon.c pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags, task 1181 arch/ia64/kernel/perfmon.c if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg); task 1186 arch/ia64/kernel/perfmon.c pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) task 1189 arch/ia64/kernel/perfmon.c if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs); task 1194 arch/ia64/kernel/perfmon.c pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) task 1197 arch/ia64/kernel/perfmon.c if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs); task 1284 arch/ia64/kernel/perfmon.c pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) task 1313 arch/ia64/kernel/perfmon.c pfm_sessions.pfs_sys_session[cpu] = task; task 1405 arch/ia64/kernel/perfmon.c struct task_struct *task = current; task 1409 arch/ia64/kernel/perfmon.c if (task->mm == NULL || size == 0UL || vaddr == NULL) { task 1410 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm); task 1422 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); task 1770 arch/ia64/kernel/perfmon.c struct task_struct *task; task 1806 arch/ia64/kernel/perfmon.c task = PFM_CTX_TASK(ctx); task 1807 arch/ia64/kernel/perfmon.c regs = task_pt_regs(task); task 1811 arch/ia64/kernel/perfmon.c task == current ? 1 : 0)); task 1820 arch/ia64/kernel/perfmon.c if (task == current) { task 1909 arch/ia64/kernel/perfmon.c struct task_struct *task; task 1936 arch/ia64/kernel/perfmon.c task = PFM_CTX_TASK(ctx); task 1937 arch/ia64/kernel/perfmon.c regs = task_pt_regs(task); task 1941 arch/ia64/kernel/perfmon.c task == current ? 1 : 0)); task 2017 arch/ia64/kernel/perfmon.c else if (task != current) { task 2024 arch/ia64/kernel/perfmon.c DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task))); task 2196 arch/ia64/kernel/perfmon.c pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) task 2198 arch/ia64/kernel/perfmon.c struct mm_struct *mm = task->mm; task 2219 arch/ia64/kernel/perfmon.c if (size > task_rlimit(task, RLIMIT_MEMLOCK)) task 2261 arch/ia64/kernel/perfmon.c down_write(&task->mm->mmap_sem); task 2267 arch/ia64/kernel/perfmon.c up_write(&task->mm->mmap_sem); task 2278 arch/ia64/kernel/perfmon.c up_write(&task->mm->mmap_sem); task 2289 arch/ia64/kernel/perfmon.c up_write(&task->mm->mmap_sem); task 2311 arch/ia64/kernel/perfmon.c pfm_bad_permissions(struct task_struct *task) task 2319 arch/ia64/kernel/perfmon.c tcred = __task_cred(task); task 2343 arch/ia64/kernel/perfmon.c pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx) task 2368 arch/ia64/kernel/perfmon.c pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags, task 2381 arch/ia64/kernel/perfmon.c DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task))); task 2390 arch/ia64/kernel/perfmon.c ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); task 2392 arch/ia64/kernel/perfmon.c DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret)); task 2403 arch/ia64/kernel/perfmon.c ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size); task 2416 arch/ia64/kernel/perfmon.c ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg); task 2509 arch/ia64/kernel/perfmon.c pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) task 2514 arch/ia64/kernel/perfmon.c if (task->mm == NULL) { task 2515 arch/ia64/kernel/perfmon.c DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task))); task 2518 arch/ia64/kernel/perfmon.c if (pfm_bad_permissions(task)) { task 2519 arch/ia64/kernel/perfmon.c DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task))); task 2525 arch/ia64/kernel/perfmon.c if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { task 2526 arch/ia64/kernel/perfmon.c DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task))); task 2530 arch/ia64/kernel/perfmon.c if (task->exit_state == EXIT_ZOMBIE) { task 2531 arch/ia64/kernel/perfmon.c DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task))); task 2538 arch/ia64/kernel/perfmon.c if (task == current) return 0; task 2540 arch/ia64/kernel/perfmon.c if (!task_is_stopped_or_traced(task)) { task 2541 arch/ia64/kernel/perfmon.c DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state)); task 2547 arch/ia64/kernel/perfmon.c wait_task_inactive(task, 0); task 2555 arch/ia64/kernel/perfmon.c pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) task 2572 arch/ia64/kernel/perfmon.c *task = p; task 2768 arch/ia64/kernel/perfmon.c struct task_struct *task; task 2782 arch/ia64/kernel/perfmon.c task = ctx->ctx_task; task 2797 arch/ia64/kernel/perfmon.c can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; task 2880 arch/ia64/kernel/perfmon.c ret = (*wr_func)(task, ctx, cnum, &value, regs); task 3008 arch/ia64/kernel/perfmon.c struct task_struct *task; task 3022 arch/ia64/kernel/perfmon.c task = ctx->ctx_task; task 3040 arch/ia64/kernel/perfmon.c can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; task 3062 arch/ia64/kernel/perfmon.c ret = (*wr_func)(task, ctx, cnum, &v, regs); task 3204 arch/ia64/kernel/perfmon.c struct task_struct *task; task 3222 arch/ia64/kernel/perfmon.c task = ctx->ctx_task; task 3239 arch/ia64/kernel/perfmon.c can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; task 3333 arch/ia64/kernel/perfmon.c pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) task 3347 arch/ia64/kernel/perfmon.c if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; task 3354 arch/ia64/kernel/perfmon.c pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) task 3368 arch/ia64/kernel/perfmon.c if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; task 3379 arch/ia64/kernel/perfmon.c pfm_use_debug_registers(struct task_struct *task) task 3381 arch/ia64/kernel/perfmon.c pfm_context_t *ctx = task->thread.pfm_context; task 3387 arch/ia64/kernel/perfmon.c DPRINT(("called for [%d]\n", task_pid_nr(task))); task 3392 arch/ia64/kernel/perfmon.c if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0; task 3418 arch/ia64/kernel/perfmon.c task_pid_nr(task), ret)); task 3434 arch/ia64/kernel/perfmon.c pfm_release_debug_registers(struct task_struct *task) task 3443 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task)); task 3457 arch/ia64/kernel/perfmon.c struct task_struct *task; task 3466 arch/ia64/kernel/perfmon.c task = PFM_CTX_TASK(ctx); task 3494 arch/ia64/kernel/perfmon.c if (unlikely(task == NULL)) { task 3499 arch/ia64/kernel/perfmon.c if (task == current || is_system) { task 3504 arch/ia64/kernel/perfmon.c task_pid_nr(task), task 3515 arch/ia64/kernel/perfmon.c ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); task 3517 arch/ia64/kernel/perfmon.c ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); task 3528 arch/ia64/kernel/perfmon.c DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task))); task 3530 arch/ia64/kernel/perfmon.c if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); task 3532 arch/ia64/kernel/perfmon.c DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task))); task 3589 arch/ia64/kernel/perfmon.c DPRINT(("unblocking [%d]\n", task_pid_nr(task))); task 3592 arch/ia64/kernel/perfmon.c DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); task 3596 arch/ia64/kernel/perfmon.c PFM_SET_WORK_PENDING(task, 1); task 3598 arch/ia64/kernel/perfmon.c set_notify_resume(task); task 3630 arch/ia64/kernel/perfmon.c struct task_struct *task; task 3645 arch/ia64/kernel/perfmon.c task = ctx->ctx_task; task 3654 arch/ia64/kernel/perfmon.c thread = &task->thread; task 3664 arch/ia64/kernel/perfmon.c can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; task 3680 arch/ia64/kernel/perfmon.c DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task))); task 3721 arch/ia64/kernel/perfmon.c DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task))); task 3834 arch/ia64/kernel/perfmon.c pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) task 3848 arch/ia64/kernel/perfmon.c if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; task 3855 arch/ia64/kernel/perfmon.c pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) task 3869 arch/ia64/kernel/perfmon.c if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; task 3889 arch/ia64/kernel/perfmon.c struct task_struct *task = PFM_CTX_TASK(ctx); task 3948 arch/ia64/kernel/perfmon.c if (task == current) { task 3957 arch/ia64/kernel/perfmon.c tregs = task_pt_regs(task); task 3968 arch/ia64/kernel/perfmon.c DPRINT(("task=[%d]\n", task_pid_nr(task))); task 4107 arch/ia64/kernel/perfmon.c struct task_struct *task; task 4139 arch/ia64/kernel/perfmon.c ret = pfm_get_task(ctx, req->load_pid, &task); task 4150 arch/ia64/kernel/perfmon.c if (is_system && task != current) { task 4156 arch/ia64/kernel/perfmon.c thread = &task->thread; task 4174 arch/ia64/kernel/perfmon.c task_pid_nr(task))); task 4178 arch/ia64/kernel/perfmon.c DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs)); task 4238 arch/ia64/kernel/perfmon.c ctx->ctx_task = task; task 4255 arch/ia64/kernel/perfmon.c pfm_copy_pmds(task, ctx); task 4256 arch/ia64/kernel/perfmon.c pfm_copy_pmcs(task, ctx); task 4264 arch/ia64/kernel/perfmon.c if (task == current) { task 4270 arch/ia64/kernel/perfmon.c DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task))); task 4303 arch/ia64/kernel/perfmon.c SET_PMU_OWNER(task, ctx); task 4305 arch/ia64/kernel/perfmon.c DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task))); task 4310 arch/ia64/kernel/perfmon.c regs = task_pt_regs(task); task 4337 arch/ia64/kernel/perfmon.c if (is_system == 0 && task != current) { task 4338 arch/ia64/kernel/perfmon.c pfm_put_task(task); task 4364 arch/ia64/kernel/perfmon.c struct task_struct *task = PFM_CTX_TASK(ctx); task 4369 arch/ia64/kernel/perfmon.c DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1)); task 4421 arch/ia64/kernel/perfmon.c task->thread.pfm_context = NULL; task 4436 arch/ia64/kernel/perfmon.c tregs = task == current ? regs : task_pt_regs(task); task 4438 arch/ia64/kernel/perfmon.c if (task == current) { task 4444 arch/ia64/kernel/perfmon.c DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task))); task 4450 arch/ia64/kernel/perfmon.c pfm_flush_pmds(task, ctx); task 4470 arch/ia64/kernel/perfmon.c task->thread.flags &= ~IA64_THREAD_PM_VALID; task 4475 arch/ia64/kernel/perfmon.c task->thread.pfm_context = NULL; task 4478 arch/ia64/kernel/perfmon.c PFM_SET_WORK_PENDING(task, 0); task 4484 arch/ia64/kernel/perfmon.c DPRINT(("disconnected [%d] from context\n", task_pid_nr(task))); task 4495 arch/ia64/kernel/perfmon.c pfm_exit_thread(struct task_struct *task) task 4499 arch/ia64/kernel/perfmon.c struct pt_regs *regs = task_pt_regs(task); task 4503 arch/ia64/kernel/perfmon.c ctx = PFM_GET_CTX(task); task 4507 arch/ia64/kernel/perfmon.c DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task))); task 4516 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task)); task 4522 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); task 4531 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); task 4536 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state); task 4605 arch/ia64/kernel/perfmon.c struct task_struct *task; task 4610 arch/ia64/kernel/perfmon.c task = ctx->ctx_task; task 4612 arch/ia64/kernel/perfmon.c if (task == NULL) { task 4620 arch/ia64/kernel/perfmon.c task_pid_nr(task), task 4621 arch/ia64/kernel/perfmon.c task->state, PFM_CMD_STOPPED(cmd))); task 4630 arch/ia64/kernel/perfmon.c if (task == current || ctx->ctx_fl_system) return 0; task 4666 arch/ia64/kernel/perfmon.c if (!task_is_stopped_or_traced(task)) { task 4667 arch/ia64/kernel/perfmon.c DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task))); task 4688 arch/ia64/kernel/perfmon.c wait_task_inactive(task, 0); task 5120 arch/ia64/kernel/perfmon.c static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, task 5147 arch/ia64/kernel/perfmon.c task ? task_pid_nr(task): -1, task 5250 arch/ia64/kernel/perfmon.c ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp); task 5320 arch/ia64/kernel/perfmon.c PFM_SET_WORK_PENDING(task, 1); task 5326 arch/ia64/kernel/perfmon.c set_notify_resume(task); task 5337 arch/ia64/kernel/perfmon.c PFM_GET_WORK_PENDING(task), task 5346 arch/ia64/kernel/perfmon.c pfm_mask_monitoring(task); task 5361 arch/ia64/kernel/perfmon.c task ? task_pid_nr(task) : -1, task 5394 arch/ia64/kernel/perfmon.c DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1)); task 5404 arch/ia64/kernel/perfmon.c struct task_struct *task; task 5418 arch/ia64/kernel/perfmon.c task = GET_PMU_OWNER(); task 5425 arch/ia64/kernel/perfmon.c if (PMC0_HAS_OVFL(pmc0) && task) { task 5433 arch/ia64/kernel/perfmon.c if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) task 5438 arch/ia64/kernel/perfmon.c pfm_overflow_handler(task, ctx, pmc0, regs); task 5455 arch/ia64/kernel/perfmon.c this_cpu, task_pid_nr(task)); task 5461 arch/ia64/kernel/perfmon.c task_pid_nr(task)); task 5669 arch/ia64/kernel/perfmon.c pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin) task 5681 arch/ia64/kernel/perfmon.c if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { task 5682 arch/ia64/kernel/perfmon.c regs = task_pt_regs(task); task 5719 arch/ia64/kernel/perfmon.c struct task_struct *task = ctx->ctx_task; task 5724 arch/ia64/kernel/perfmon.c if (GET_PMU_OWNER() == task) { task 5733 arch/ia64/kernel/perfmon.c PFM_SET_WORK_PENDING(task, 0); task 5735 arch/ia64/kernel/perfmon.c task->thread.pfm_context = NULL; task 5736 arch/ia64/kernel/perfmon.c task->thread.flags &= ~IA64_THREAD_PM_VALID; task 5738 arch/ia64/kernel/perfmon.c DPRINT(("force cleanup for [%d]\n", task_pid_nr(task))); task 5746 arch/ia64/kernel/perfmon.c pfm_save_regs(struct task_struct *task) task 5753 arch/ia64/kernel/perfmon.c ctx = PFM_GET_CTX(task); task 5764 arch/ia64/kernel/perfmon.c struct pt_regs *regs = task_pt_regs(task); task 5835 arch/ia64/kernel/perfmon.c pfm_save_regs(struct task_struct *task) task 5840 arch/ia64/kernel/perfmon.c ctx = PFM_GET_CTX(task); task 5866 arch/ia64/kernel/perfmon.c pfm_lazy_save_regs (struct task_struct *task) task 5875 arch/ia64/kernel/perfmon.c ctx = PFM_GET_CTX(task); task 5928 arch/ia64/kernel/perfmon.c pfm_load_regs (struct task_struct *task) task 5936 arch/ia64/kernel/perfmon.c ctx = PFM_GET_CTX(task); task 5944 arch/ia64/kernel/perfmon.c if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return; task 5960 arch/ia64/kernel/perfmon.c struct pt_regs *regs = task_pt_regs(task); task 6070 arch/ia64/kernel/perfmon.c SET_PMU_OWNER(task, ctx); task 6091 arch/ia64/kernel/perfmon.c pfm_load_regs (struct task_struct *task) task 6100 arch/ia64/kernel/perfmon.c ctx = PFM_GET_CTX(task); task 6133 arch/ia64/kernel/perfmon.c if (likely(owner == task)) { task 6191 arch/ia64/kernel/perfmon.c SET_PMU_OWNER(task, ctx); task 6207 arch/ia64/kernel/perfmon.c pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) task 6218 arch/ia64/kernel/perfmon.c is_self = ctx->ctx_task == task ? 1 : 0; task 6227 arch/ia64/kernel/perfmon.c can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id()); task 6282 arch/ia64/kernel/perfmon.c task_pid_nr(task), task 6304 arch/ia64/kernel/perfmon.c DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i)); task 6308 arch/ia64/kernel/perfmon.c DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val)); task 6608 arch/ia64/kernel/perfmon.c struct task_struct *task; task 6633 arch/ia64/kernel/perfmon.c task = GET_PMU_OWNER(); task 6636 arch/ia64/kernel/perfmon.c printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx); task 6680 arch/ia64/kernel/perfmon.c pfm_inherit(struct task_struct *task, struct pt_regs *regs) task 6684 arch/ia64/kernel/perfmon.c DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task))); task 6686 arch/ia64/kernel/perfmon.c thread = &task->thread; task 6693 arch/ia64/kernel/perfmon.c PFM_SET_WORK_PENDING(task, 0); task 42 arch/ia64/kernel/perfmon_default_smpl.c default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data) task 48 arch/ia64/kernel/perfmon_default_smpl.c DPRINT(("[%d] no argument passed\n", task_pid_nr(task))); task 52 arch/ia64/kernel/perfmon_default_smpl.c DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu)); task 65 arch/ia64/kernel/perfmon_default_smpl.c default_get_size(struct task_struct *task, unsigned int flags, int cpu, void *data, unsigned long *size) task 78 arch/ia64/kernel/perfmon_default_smpl.c default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *data) task 92 arch/ia64/kernel/perfmon_default_smpl.c task_pid_nr(task), task 103 arch/ia64/kernel/perfmon_default_smpl.c default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp) task 113 arch/ia64/kernel/perfmon_default_smpl.c if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) { task 114 arch/ia64/kernel/perfmon_default_smpl.c DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg)); task 143 arch/ia64/kernel/perfmon_default_smpl.c task->pid, task 231 arch/ia64/kernel/perfmon_default_smpl.c default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) task 247 arch/ia64/kernel/perfmon_default_smpl.c default_exit(struct task_struct *task, void *buf, struct pt_regs *regs) task 249 arch/ia64/kernel/perfmon_default_smpl.c DPRINT(("[%d] exit(%p)\n", task_pid_nr(task), buf)); task 9 arch/ia64/kernel/perfmon_itanium.h static int pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); task 52 arch/ia64/kernel/perfmon_itanium.h pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs) task 71 arch/ia64/kernel/perfmon_itanium.h if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL; task 90 arch/ia64/kernel/perfmon_itanium.h if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL; task 9 arch/ia64/kernel/perfmon_mckinley.h static int pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); task 78 arch/ia64/kernel/perfmon_mckinley.h pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs) task 110 arch/ia64/kernel/perfmon_mckinley.h if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL; task 128 arch/ia64/kernel/perfmon_mckinley.h if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL; task 9 arch/ia64/kernel/perfmon_montecito.h static int pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); task 156 arch/ia64/kernel/perfmon_montecito.h pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs) task 194 arch/ia64/kernel/perfmon_montecito.h if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL; task 214 arch/ia64/kernel/perfmon_montecito.h if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL; task 87 arch/ia64/kernel/process.c show_stack (struct task_struct *task, unsigned long *sp) task 89 arch/ia64/kernel/process.c if (!task) task 94 arch/ia64/kernel/process.c unw_init_from_blocked_task(&info, task); task 264 arch/ia64/kernel/process.c ia64_save_extra (struct task_struct *task) task 270 arch/ia64/kernel/process.c if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) task 271 arch/ia64/kernel/process.c ia64_save_debug_regs(&task->thread.dbr[0]); task 274 arch/ia64/kernel/process.c if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) task 275 arch/ia64/kernel/process.c pfm_save_regs(task); task 279 arch/ia64/kernel/process.c pfm_syst_wide_update_task(task, info, 0); task 284 arch/ia64/kernel/process.c ia64_load_extra (struct task_struct *task) task 290 arch/ia64/kernel/process.c if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) task 291 arch/ia64/kernel/process.c ia64_load_debug_regs(&task->thread.dbr[0]); task 294 arch/ia64/kernel/process.c if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) task 295 arch/ia64/kernel/process.c pfm_load_regs(task); task 299 arch/ia64/kernel/process.c pfm_syst_wide_update_task(task, info, 1); task 444 arch/ia64/kernel/process.c do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) task 461 arch/ia64/kernel/process.c urbs_end = ia64_get_user_rbs_end(task, pt, &cfm); task 463 arch/ia64/kernel/process.c if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0) task 466 arch/ia64/kernel/process.c ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), task 517 arch/ia64/kernel/process.c do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg) task 532 arch/ia64/kernel/process.c ia64_flush_fph(task); task 533 arch/ia64/kernel/process.c if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0) task 534 arch/ia64/kernel/process.c memcpy(dst + 32, task->thread.fph, 96*16); task 252 arch/ia64/kernel/ptrace.c get_rnat (struct task_struct *task, struct switch_stack *sw, task 262 arch/ia64/kernel/ptrace.c pt = task_pt_regs(task); task 312 arch/ia64/kernel/ptrace.c put_rnat (struct task_struct *task, struct switch_stack *sw, task 322 arch/ia64/kernel/ptrace.c pt = task_pt_regs(task); task 590 arch/ia64/kernel/ptrace.c pt = task_pt_regs(info->task); task 591 arch/ia64/kernel/ptrace.c urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); task 593 arch/ia64/kernel/ptrace.c fn(info->task, info->sw, pt->ar_bspstore, urbs_end); task 685 arch/ia64/kernel/ptrace.c ia64_flush_fph (struct task_struct *task) task 687 arch/ia64/kernel/ptrace.c struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); task 694 arch/ia64/kernel/ptrace.c if (ia64_is_local_fpu_owner(task) && psr->mfh) { task 696 arch/ia64/kernel/ptrace.c task->thread.flags |= IA64_THREAD_FPH_VALID; task 697 arch/ia64/kernel/ptrace.c ia64_save_fpu(&task->thread.fph[0]); task 711 arch/ia64/kernel/ptrace.c ia64_sync_fph (struct task_struct *task) task 713 arch/ia64/kernel/ptrace.c struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); task 715 arch/ia64/kernel/ptrace.c ia64_flush_fph(task); task 716 arch/ia64/kernel/ptrace.c if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { task 717 arch/ia64/kernel/ptrace.c task->thread.flags |= IA64_THREAD_FPH_VALID; task 718 arch/ia64/kernel/ptrace.c memset(&task->thread.fph, 0, sizeof(task->thread.fph)); task 720 arch/ia64/kernel/ptrace.c ia64_drop_fpu(task); task 1695 arch/ia64/kernel/ptrace.c struct task_struct *task = dst->target; task 1735 arch/ia64/kernel/ptrace.c if (task->thread.flags & IA64_THREAD_FPH_VALID) task 2157 arch/ia64/kernel/ptrace.c krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; task 2181 arch/ia64/kernel/ptrace.c void ia64_syscall_get_set_arguments(struct task_struct *task, task 2192 arch/ia64/kernel/ptrace.c if (task == current) task 2197 arch/ia64/kernel/ptrace.c unw_init_from_blocked_task(&ufi, task); task 473 arch/ia64/kernel/unwind.c struct task_struct *t = info->task; task 1976 arch/ia64/kernel/unwind.c if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) task 2035 arch/ia64/kernel/unwind.c info->task = t; task 20 arch/ia64/oprofile/perfmon.c perfmon_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, task 22 arch/m68k/include/asm/current.h return(current_thread_info()->task); task 97 arch/m68k/include/asm/mmu_context.h static inline void load_ksp_mmu(struct task_struct *task) task 108 arch/m68k/include/asm/mmu_context.h mmuar = task->thread.ksp; task 119 arch/m68k/include/asm/mmu_context.h pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm); task 120 arch/m68k/include/asm/mmu_context.h mm = task->mm; task 7 arch/m68k/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 28 arch/m68k/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 39 arch/m68k/include/asm/thread_info.h .task = &tsk, \ task 75 arch/m68k/kernel/ptrace.c static inline long get_reg(struct task_struct *task, int regno) task 80 arch/m68k/kernel/ptrace.c addr = &task->thread.usp; task 82 arch/m68k/kernel/ptrace.c addr = (unsigned long *)(task->thread.esp0 + regoff[regno]); task 87 arch/m68k/kernel/ptrace.c long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj)); task 99 arch/m68k/kernel/ptrace.c static inline int put_reg(struct task_struct *task, int regno, task 105 arch/m68k/kernel/ptrace.c addr = &task->thread.usp; task 107 arch/m68k/kernel/ptrace.c addr = (unsigned long *)(task->thread.esp0 + regoff[regno]); task 112 arch/m68k/kernel/ptrace.c long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj)); task 938 arch/m68k/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *stack) task 945 arch/m68k/kernel/traps.c if (task) task 946 arch/m68k/kernel/traps.c stack = (unsigned long *)task->thread.esp0; task 116 arch/microblaze/include/asm/processor.h # define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE) task 117 arch/microblaze/include/asm/processor.h # define task_regs(task) ((struct pt_regs *)task_tos(task) - 1) task 122 arch/microblaze/include/asm/processor.h # define task_sp(task) (task_regs(task)->r1) task 123 arch/microblaze/include/asm/processor.h # define task_pc(task) (task_regs(task)->pc) task 125 arch/microblaze/include/asm/processor.h # define KSTK_EIP(task) (task_pc(task)) task 126 arch/microblaze/include/asm/processor.h # define KSTK_ESP(task) (task_sp(task)) task 11 arch/microblaze/include/asm/syscall.h static inline long syscall_get_nr(struct task_struct *task, task 17 arch/microblaze/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 23 arch/microblaze/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 29 arch/microblaze/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 35 arch/microblaze/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 83 arch/microblaze/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 94 arch/microblaze/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 108 arch/microblaze/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 67 arch/microblaze/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 82 arch/microblaze/include/asm/thread_info.h .task = &tsk, \ task 26 arch/microblaze/include/asm/unwind.h void microblaze_unwind(struct task_struct *task, struct stack_trace *trace); task 90 arch/microblaze/kernel/asm-offsets.c DEFINE(TI_TASK, offsetof(struct thread_info, task)); task 34 arch/microblaze/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *sp) task 40 arch/microblaze/kernel/traps.c if (task) { task 42 arch/microblaze/kernel/traps.c (task->stack))->cpu_context.r1; task 71 arch/microblaze/kernel/traps.c microblaze_unwind(task, NULL); task 74 arch/microblaze/kernel/traps.c if (!task) task 75 arch/microblaze/kernel/traps.c task = current; task 77 arch/microblaze/kernel/traps.c debug_show_held_locks(task); task 154 arch/microblaze/kernel/unwind.c static void microblaze_unwind_inner(struct task_struct *task, task 164 arch/microblaze/kernel/unwind.c static inline void unwind_trap(struct task_struct *task, unsigned long pc, task 170 arch/microblaze/kernel/unwind.c static inline void unwind_trap(struct task_struct *task, unsigned long pc, task 174 arch/microblaze/kernel/unwind.c microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace); task 188 arch/microblaze/kernel/unwind.c static void microblaze_unwind_inner(struct task_struct *task, task 219 arch/microblaze/kernel/unwind.c microblaze_unwind_inner(task, regs->r17 - 4, task 232 arch/microblaze/kernel/unwind.c unwind_trap(task, pc, fp, trace); task 250 arch/microblaze/kernel/unwind.c if (unlikely(pc == task_pt_regs(task)->pc)) { task 253 arch/microblaze/kernel/unwind.c (unsigned long) task->pid, task 254 arch/microblaze/kernel/unwind.c task->comm); task 286 arch/microblaze/kernel/unwind.c void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) task 288 arch/microblaze/kernel/unwind.c if (task) { task 289 arch/microblaze/kernel/unwind.c if (task == current) { task 290 arch/microblaze/kernel/unwind.c const struct pt_regs *regs = task_pt_regs(task); task 291 arch/microblaze/kernel/unwind.c microblaze_unwind_inner(task, regs->pc, regs->r1, task 295 arch/microblaze/kernel/unwind.c (struct thread_info *)(task->stack); task 299 arch/microblaze/kernel/unwind.c microblaze_unwind_inner(task, task 431 arch/mips/include/asm/processor.h extern int mips_get_process_fp_mode(struct task_struct *task); task 432 arch/mips/include/asm/processor.h extern int mips_set_process_fp_mode(struct task_struct *task, task 435 arch/mips/include/asm/processor.h #define GET_FP_MODE(task) mips_get_process_fp_mode(task) task 436 arch/mips/include/asm/processor.h #define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value) task 11 arch/mips/include/asm/stacktrace.h extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, task 19 arch/mips/include/asm/stacktrace.h static inline unsigned long unwind_stack(struct task_struct *task, task 29 arch/mips/include/asm/syscall.h static inline bool mips_syscall_is_indirect(struct task_struct *task, task 34 arch/mips/include/asm/syscall.h test_tsk_thread_flag(task, TIF_32BIT_REGS)) && task 38 arch/mips/include/asm/syscall.h static inline long syscall_get_nr(struct task_struct *task, task 44 arch/mips/include/asm/syscall.h static inline void mips_syscall_update_nr(struct task_struct *task, task 51 arch/mips/include/asm/syscall.h if (mips_syscall_is_indirect(task, regs)) task 52 arch/mips/include/asm/syscall.h task_thread_info(task)->syscall = regs->regs[4]; task 54 arch/mips/include/asm/syscall.h task_thread_info(task)->syscall = regs->regs[2]; task 58 arch/mips/include/asm/syscall.h struct task_struct *task, struct pt_regs *regs, unsigned int n) task 77 arch/mips/include/asm/syscall.h if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) task 93 arch/mips/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 99 arch/mips/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 105 arch/mips/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 111 arch/mips/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 124 arch/mips/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 132 arch/mips/include/asm/syscall.h if (mips_syscall_is_indirect(task, regs)) task 136 arch/mips/include/asm/syscall.h mips_get_syscall_arg(args++, task, regs, i++); task 143 arch/mips/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 147 arch/mips/include/asm/syscall.h if (!test_tsk_thread_flag(task, TIF_32BIT_REGS)) { task 150 arch/mips/include/asm/syscall.h if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) task 26 arch/mips/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 45 arch/mips/include/asm/thread_info.h .task = &tsk, \ task 21 arch/mips/include/asm/watch.h #define __restore_watch(task) do { \ task 23 arch/mips/include/asm/watch.h &task_thread_info(task)->flags))) { \ task 24 arch/mips/include/asm/watch.h mips_install_watch_registers(task); \ task 29 arch/mips/include/asm/watch.h #define __restore_watch(task) do {} while (0) task 96 arch/mips/kernel/asm-offsets.c OFFSET(TI_TASK, thread_info, task); task 514 arch/mips/kernel/kprobes.c if (ri->task != current) task 600 arch/mips/kernel/process.c unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, task 614 arch/mips/kernel/process.c stack_page = (unsigned long)task_stack_page(task); task 623 arch/mips/kernel/process.c unsigned long get_wchan(struct task_struct *task) task 631 arch/mips/kernel/process.c if (!task || task == current || task->state == TASK_RUNNING) task 633 arch/mips/kernel/process.c if (!task_stack_page(task)) task 636 arch/mips/kernel/process.c pc = thread_saved_pc(task); task 639 arch/mips/kernel/process.c sp = task->thread.reg29 + schedule_mfi.frame_size; task 642 arch/mips/kernel/process.c pc = unwind_stack(task, &sp, pc, &ra); task 722 arch/mips/kernel/process.c int mips_get_process_fp_mode(struct task_struct *task) task 726 arch/mips/kernel/process.c if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) task 728 arch/mips/kernel/process.c if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) task 746 arch/mips/kernel/process.c int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) task 754 arch/mips/kernel/process.c if (value == mips_get_process_fp_mode(task)) task 785 arch/mips/kernel/process.c for_each_thread(task, t) { task 812 arch/mips/kernel/process.c for_each_thread(task, t) task 1147 arch/mips/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 1153 arch/mips/kernel/ptrace.c if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) task 1157 arch/mips/kernel/ptrace.c if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) task 141 arch/mips/kernel/traps.c static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) task 147 arch/mips/kernel/traps.c if (!task) task 148 arch/mips/kernel/traps.c task = current; task 157 arch/mips/kernel/traps.c pc = unwind_stack(task, &sp, pc, &ra); task 166 arch/mips/kernel/traps.c static void show_stacktrace(struct task_struct *task, task 195 arch/mips/kernel/traps.c show_backtrace(task, regs); task 198 arch/mips/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *sp) task 209 arch/mips/kernel/traps.c if (task && task != current) { task 210 arch/mips/kernel/traps.c regs.regs[29] = task->thread.reg29; task 212 arch/mips/kernel/traps.c regs.cp0_epc = task->thread.reg31; task 227 arch/mips/kernel/traps.c show_stacktrace(task, ®s); task 90 arch/nds32/include/asm/processor.h #define task_pt_regs(task) \ task 91 arch/nds32/include/asm/processor.h ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ task 30 arch/nds32/include/asm/syscall.h syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 52 arch/nds32/include/asm/syscall.h syscall_rollback(struct task_struct *task, struct pt_regs *regs) task 68 arch/nds32/include/asm/syscall.h syscall_get_error(struct task_struct *task, struct pt_regs *regs) task 86 arch/nds32/include/asm/syscall.h syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) task 107 arch/nds32/include/asm/syscall.h syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, task 127 arch/nds32/include/asm/syscall.h syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, task 148 arch/nds32/include/asm/syscall.h syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, task 158 arch/nds32/include/asm/syscall.h syscall_get_arch(struct task_struct *task) task 55 arch/nds32/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 13 arch/nios2/include/asm/syscall.h static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 18 arch/nios2/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 25 arch/nios2/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 31 arch/nios2/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 37 arch/nios2/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 50 arch/nios2/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 61 arch/nios2/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 72 arch/nios2/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 41 arch/nios2/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 59 arch/nios2/include/asm/thread_info.h .task = &tsk, \ task 137 arch/nios2/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 60 arch/nios2/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *stack) task 66 arch/nios2/kernel/traps.c if (task) task 67 arch/nios2/kernel/traps.c stack = (unsigned long *)task->thread.ksp; task 63 arch/openrisc/include/asm/processor.h #define task_pt_regs(task) user_regs(task_thread_info(task)) task 23 arch/openrisc/include/asm/syscall.h syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 29 arch/openrisc/include/asm/syscall.h syscall_rollback(struct task_struct *task, struct pt_regs *regs) task 35 arch/openrisc/include/asm/syscall.h syscall_get_error(struct task_struct *task, struct pt_regs *regs) task 41 arch/openrisc/include/asm/syscall.h syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) task 47 arch/openrisc/include/asm/syscall.h syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, task 54 arch/openrisc/include/asm/syscall.h syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, task 61 arch/openrisc/include/asm/syscall.h syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, task 67 arch/openrisc/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 46 arch/openrisc/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 70 arch/openrisc/include/asm/thread_info.h .task = &tsk, \ task 82 arch/openrisc/include/asm/thread_info.h #define get_thread_info(ti) get_task_struct((ti)->task) task 83 arch/openrisc/include/asm/thread_info.h #define put_thread_info(ti) put_task_struct((ti)->task) task 49 arch/openrisc/kernel/asm-offsets.c DEFINE(TI_TASK, offsetof(struct thread_info, task)); task 254 arch/openrisc/kernel/process.c last = (_switch(old_ti, new_ti))->task; task 130 arch/openrisc/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 49 arch/openrisc/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *esp) task 131 arch/parisc/include/asm/processor.h #define SET_UNALIGN_CTL(task,value) \ task 133 arch/parisc/include/asm/processor.h (task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \ task 139 arch/parisc/include/asm/processor.h #define GET_UNALIGN_CTL(task,addr) \ task 141 arch/parisc/include/asm/processor.h put_user(((task)->thread.flags & PARISC_UAC_MASK) \ task 159 arch/parisc/include/asm/processor.h void show_trace(struct task_struct *task, unsigned long *stack); task 97 arch/parisc/include/asm/psw.h #define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4)) task 99 arch/parisc/include/asm/psw.h #define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) task 11 arch/parisc/include/asm/ptrace.h #define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS)) task 32 arch/parisc/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 39 arch/parisc/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 45 arch/parisc/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 52 arch/parisc/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 58 arch/parisc/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 62 arch/parisc/include/asm/syscall.h if (!__is_compat_task(task)) task 12 arch/parisc/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 21 arch/parisc/include/asm/thread_info.h .task = &tsk, \ task 79 arch/parisc/include/asm/unwind.h struct task_struct *task, struct pt_regs *regs); task 231 arch/parisc/kernel/asm-offsets.c DEFINE(TI_TASK, offsetof(struct thread_info, task)); task 126 arch/parisc/kernel/kgdb.c struct task_struct *task) task 128 arch/parisc/kernel/kgdb.c struct pt_regs *regs = task_pt_regs(task); task 218 arch/parisc/kernel/kprobes.c if (ri->task != current) task 237 arch/parisc/kernel/kprobes.c if (ri->task != current) task 52 arch/parisc/kernel/ptrace.c void ptrace_disable(struct task_struct *task) task 54 arch/parisc/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_SINGLESTEP); task 55 arch/parisc/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_BLOCKSTEP); task 58 arch/parisc/kernel/ptrace.c pa_psw(task)->r = 0; task 59 arch/parisc/kernel/ptrace.c pa_psw(task)->t = 0; task 60 arch/parisc/kernel/ptrace.c pa_psw(task)->h = 0; task 61 arch/parisc/kernel/ptrace.c pa_psw(task)->l = 0; task 68 arch/parisc/kernel/ptrace.c void user_disable_single_step(struct task_struct *task) task 70 arch/parisc/kernel/ptrace.c ptrace_disable(task); task 73 arch/parisc/kernel/ptrace.c void user_enable_single_step(struct task_struct *task) task 75 arch/parisc/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_BLOCKSTEP); task 76 arch/parisc/kernel/ptrace.c set_tsk_thread_flag(task, TIF_SINGLESTEP); task 78 arch/parisc/kernel/ptrace.c if (pa_psw(task)->n) { task 80 arch/parisc/kernel/ptrace.c task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; task 81 arch/parisc/kernel/ptrace.c task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; task 82 arch/parisc/kernel/ptrace.c task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; task 83 arch/parisc/kernel/ptrace.c pa_psw(task)->n = 0; task 84 arch/parisc/kernel/ptrace.c pa_psw(task)->x = 0; task 85 arch/parisc/kernel/ptrace.c pa_psw(task)->y = 0; task 86 arch/parisc/kernel/ptrace.c pa_psw(task)->z = 0; task 87 arch/parisc/kernel/ptrace.c pa_psw(task)->b = 0; task 88 arch/parisc/kernel/ptrace.c ptrace_disable(task); task 92 arch/parisc/kernel/ptrace.c (void __user *) (task_regs(task)->iaoq[0] & ~3), task 93 arch/parisc/kernel/ptrace.c task); task 106 arch/parisc/kernel/ptrace.c pa_psw(task)->r = 1; task 107 arch/parisc/kernel/ptrace.c pa_psw(task)->t = 0; task 108 arch/parisc/kernel/ptrace.c pa_psw(task)->h = 0; task 109 arch/parisc/kernel/ptrace.c pa_psw(task)->l = 0; task 112 arch/parisc/kernel/ptrace.c void user_enable_block_step(struct task_struct *task) task 114 arch/parisc/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_SINGLESTEP); task 115 arch/parisc/kernel/ptrace.c set_tsk_thread_flag(task, TIF_BLOCKSTEP); task 118 arch/parisc/kernel/ptrace.c pa_psw(task)->r = 0; task 119 arch/parisc/kernel/ptrace.c pa_psw(task)->t = 1; task 120 arch/parisc/kernel/ptrace.c pa_psw(task)->h = 0; task 121 arch/parisc/kernel/ptrace.c pa_psw(task)->l = 0; task 691 arch/parisc/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 16 arch/parisc/kernel/stacktrace.c static void dump_trace(struct task_struct *task, struct stack_trace *trace) task 20 arch/parisc/kernel/stacktrace.c unwind_frame_init_task(&info, task, NULL); task 51 arch/parisc/kernel/traps.c static void parisc_show_stack(struct task_struct *task, task 191 arch/parisc/kernel/traps.c static void parisc_show_stack(struct task_struct *task, task 196 arch/parisc/kernel/traps.c unwind_frame_init_task(&info, task, regs); task 415 arch/parisc/kernel/unwind.c struct task_struct *task, struct pt_regs *regs) task 417 arch/parisc/kernel/unwind.c task = task ? task : current; task 419 arch/parisc/kernel/unwind.c if (task == current) { task 429 arch/parisc/kernel/unwind.c unwind_frame_init(info, task, regs); task 431 arch/parisc/kernel/unwind.c unwind_frame_init_from_blocked_task(info, task); task 17 arch/powerpc/include/asm/current.h struct task_struct *task; task 21 arch/powerpc/include/asm/current.h : "=r" (task) task 24 arch/powerpc/include/asm/current.h return task; task 172 arch/powerpc/include/asm/ptrace.h extern int ptrace_get_reg(struct task_struct *task, int regno, task 174 arch/powerpc/include/asm/ptrace.h extern int ptrace_put_reg(struct task_struct *task, int regno, task 402 arch/powerpc/include/asm/reg_booke.h #define dbcr_iac_range(task) ((task)->thread.debug.dbcr0) task 416 arch/powerpc/include/asm/reg_booke.h #define dbcr_dac(task) ((task)->thread.debug.dbcr1) task 462 arch/powerpc/include/asm/reg_booke.h #define dbcr_dac(task) ((task)->thread.debug.dbcr0) task 496 arch/powerpc/include/asm/reg_booke.h #define dbcr_iac_range(task) ((task)->thread.debug.dbcr1) task 21 arch/powerpc/include/asm/syscall.h static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 32 arch/powerpc/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 38 arch/powerpc/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 48 arch/powerpc/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 54 arch/powerpc/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 73 arch/powerpc/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 81 arch/powerpc/include/asm/syscall.h if (test_tsk_thread_flag(task, TIF_32BIT)) task 94 arch/powerpc/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 104 arch/powerpc/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 108 arch/powerpc/include/asm/syscall.h if (IS_ENABLED(CONFIG_PPC64) && !test_tsk_thread_flag(task, TIF_32BIT)) task 105 arch/powerpc/kernel/hw_breakpoint.c if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) task 106 arch/powerpc/kernel/hw_breakpoint.c bp->ctx->task->thread.last_hit_ubp = NULL; task 422 arch/powerpc/kernel/kprobes.c if (ri->task != current) task 207 arch/powerpc/kernel/ptrace.c static unsigned long get_user_msr(struct task_struct *task) task 209 arch/powerpc/kernel/ptrace.c return task->thread.regs->msr | task->thread.fpexc_mode; task 212 arch/powerpc/kernel/ptrace.c static int set_user_msr(struct task_struct *task, unsigned long msr) task 214 arch/powerpc/kernel/ptrace.c task->thread.regs->msr &= ~MSR_DEBUGCHANGE; task 215 arch/powerpc/kernel/ptrace.c task->thread.regs->msr |= msr & MSR_DEBUGCHANGE; task 220 arch/powerpc/kernel/ptrace.c static unsigned long get_user_ckpt_msr(struct task_struct *task) task 222 arch/powerpc/kernel/ptrace.c return task->thread.ckpt_regs.msr | task->thread.fpexc_mode; task 225 arch/powerpc/kernel/ptrace.c static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr) task 227 arch/powerpc/kernel/ptrace.c task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE; task 228 arch/powerpc/kernel/ptrace.c task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE; task 232 arch/powerpc/kernel/ptrace.c static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap) task 234 arch/powerpc/kernel/ptrace.c task->thread.ckpt_regs.trap = trap & 0xfff0; task 240 arch/powerpc/kernel/ptrace.c static int get_user_dscr(struct task_struct *task, unsigned long *data) task 242 arch/powerpc/kernel/ptrace.c *data = task->thread.dscr; task 246 arch/powerpc/kernel/ptrace.c static int set_user_dscr(struct task_struct *task, unsigned long dscr) task 248 arch/powerpc/kernel/ptrace.c task->thread.dscr = dscr; task 249 arch/powerpc/kernel/ptrace.c task->thread.dscr_inherit = 1; task 253 arch/powerpc/kernel/ptrace.c static int get_user_dscr(struct task_struct *task, unsigned long *data) task 258 arch/powerpc/kernel/ptrace.c static int set_user_dscr(struct task_struct *task, unsigned long dscr) task 268 arch/powerpc/kernel/ptrace.c static int set_user_trap(struct task_struct *task, unsigned long trap) task 270 arch/powerpc/kernel/ptrace.c task->thread.regs->trap = trap & 0xfff0; task 277 arch/powerpc/kernel/ptrace.c int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data) task 281 arch/powerpc/kernel/ptrace.c if ((task->thread.regs == NULL) || !data) task 285 arch/powerpc/kernel/ptrace.c *data = get_user_msr(task); task 290 arch/powerpc/kernel/ptrace.c return get_user_dscr(task, data); task 307 arch/powerpc/kernel/ptrace.c *data = ((unsigned long *)task->thread.regs)[regno]; task 317 arch/powerpc/kernel/ptrace.c int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data) task 319 arch/powerpc/kernel/ptrace.c if (task->thread.regs == NULL) task 323 arch/powerpc/kernel/ptrace.c return set_user_msr(task, data); task 325 arch/powerpc/kernel/ptrace.c return set_user_trap(task, data); task 327 arch/powerpc/kernel/ptrace.c return set_user_dscr(task, data); task 331 arch/powerpc/kernel/ptrace.c ((unsigned long *)task->thread.regs)[regno] = data; task 2292 arch/powerpc/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 2295 arch/powerpc/kernel/ptrace.c if (test_tsk_thread_flag(task, TIF_32BIT)) task 2302 arch/powerpc/kernel/ptrace.c void user_enable_single_step(struct task_struct *task) task 2304 arch/powerpc/kernel/ptrace.c struct pt_regs *regs = task->thread.regs; task 2308 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr0 &= ~DBCR0_BT; task 2309 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; task 2316 arch/powerpc/kernel/ptrace.c set_tsk_thread_flag(task, TIF_SINGLESTEP); task 2319 arch/powerpc/kernel/ptrace.c void user_enable_block_step(struct task_struct *task) task 2321 arch/powerpc/kernel/ptrace.c struct pt_regs *regs = task->thread.regs; task 2325 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr0 &= ~DBCR0_IC; task 2326 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT; task 2333 arch/powerpc/kernel/ptrace.c set_tsk_thread_flag(task, TIF_SINGLESTEP); task 2336 arch/powerpc/kernel/ptrace.c void user_disable_single_step(struct task_struct *task) task 2338 arch/powerpc/kernel/ptrace.c struct pt_regs *regs = task->thread.regs; task 2348 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT); task 2352 arch/powerpc/kernel/ptrace.c if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, task 2353 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr1)) { task 2357 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr0 &= ~DBCR0_IDM; task 2364 arch/powerpc/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_SINGLESTEP); task 2385 arch/powerpc/kernel/ptrace.c static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, task 2390 arch/powerpc/kernel/ptrace.c struct thread_struct *thread = &(task->thread); task 2464 arch/powerpc/kernel/ptrace.c ptrace_triggered, NULL, task); task 2474 arch/powerpc/kernel/ptrace.c task->thread.hw_brk = hw_brk; task 2482 arch/powerpc/kernel/ptrace.c task->thread.debug.dac1 = data & ~0x3UL; task 2484 arch/powerpc/kernel/ptrace.c if (task->thread.debug.dac1 == 0) { task 2485 arch/powerpc/kernel/ptrace.c dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); task 2486 arch/powerpc/kernel/ptrace.c if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, task 2487 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr1)) { task 2488 arch/powerpc/kernel/ptrace.c task->thread.regs->msr &= ~MSR_DE; task 2489 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr0 &= ~DBCR0_IDM; task 2501 arch/powerpc/kernel/ptrace.c task->thread.debug.dbcr0 |= DBCR0_IDM; task 2505 arch/powerpc/kernel/ptrace.c dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W); task 2507 arch/powerpc/kernel/ptrace.c dbcr_dac(task) |= DBCR_DAC1R; task 2509 arch/powerpc/kernel/ptrace.c dbcr_dac(task) |= DBCR_DAC1W; task 2510 arch/powerpc/kernel/ptrace.c task->thread.regs->msr |= MSR_DE; task 25 arch/powerpc/kernel/signal.h struct task_struct *task); task 27 arch/powerpc/kernel/signal.h struct task_struct *task); task 28 arch/powerpc/kernel/signal.h extern unsigned long copy_fpr_from_user(struct task_struct *task, task 30 arch/powerpc/kernel/signal.h extern unsigned long copy_ckfpr_from_user(struct task_struct *task, task 36 arch/powerpc/kernel/signal.h struct task_struct *task); task 38 arch/powerpc/kernel/signal.h struct task_struct *task); task 39 arch/powerpc/kernel/signal.h extern unsigned long copy_vsx_from_user(struct task_struct *task, task 41 arch/powerpc/kernel/signal.h extern unsigned long copy_ckvsx_from_user(struct task_struct *task, task 240 arch/powerpc/kernel/signal_32.c struct task_struct *task) task 247 arch/powerpc/kernel/signal_32.c buf[i] = task->thread.TS_FPR(i); task 248 arch/powerpc/kernel/signal_32.c buf[i] = task->thread.fp_state.fpscr; task 252 arch/powerpc/kernel/signal_32.c unsigned long copy_fpr_from_user(struct task_struct *task, task 261 arch/powerpc/kernel/signal_32.c task->thread.TS_FPR(i) = buf[i]; task 262 arch/powerpc/kernel/signal_32.c task->thread.fp_state.fpscr = buf[i]; task 268 arch/powerpc/kernel/signal_32.c struct task_struct *task) task 275 arch/powerpc/kernel/signal_32.c buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; task 279 arch/powerpc/kernel/signal_32.c unsigned long copy_vsx_from_user(struct task_struct *task, task 288 arch/powerpc/kernel/signal_32.c task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; task 294 arch/powerpc/kernel/signal_32.c struct task_struct *task) task 301 arch/powerpc/kernel/signal_32.c buf[i] = task->thread.TS_CKFPR(i); task 302 arch/powerpc/kernel/signal_32.c buf[i] = task->thread.ckfp_state.fpscr; task 306 arch/powerpc/kernel/signal_32.c unsigned long copy_ckfpr_from_user(struct task_struct *task, task 315 arch/powerpc/kernel/signal_32.c task->thread.TS_CKFPR(i) = buf[i]; task 316 arch/powerpc/kernel/signal_32.c task->thread.ckfp_state.fpscr = buf[i]; task 322 arch/powerpc/kernel/signal_32.c struct task_struct *task) task 329 arch/powerpc/kernel/signal_32.c buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET]; task 333 arch/powerpc/kernel/signal_32.c unsigned long copy_ckvsx_from_user(struct task_struct *task, task 342 arch/powerpc/kernel/signal_32.c task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; task 348 arch/powerpc/kernel/signal_32.c struct task_struct *task) task 350 arch/powerpc/kernel/signal_32.c return __copy_to_user(to, task->thread.fp_state.fpr, task 354 arch/powerpc/kernel/signal_32.c inline unsigned long copy_fpr_from_user(struct task_struct *task, task 357 arch/powerpc/kernel/signal_32.c return __copy_from_user(task->thread.fp_state.fpr, from, task 363 arch/powerpc/kernel/signal_32.c struct task_struct *task) task 365 arch/powerpc/kernel/signal_32.c return __copy_to_user(to, task->thread.ckfp_state.fpr, task 369 arch/powerpc/kernel/signal_32.c inline unsigned long copy_ckfpr_from_user(struct task_struct *task, task 372 arch/powerpc/kernel/signal_32.c return __copy_from_user(task->thread.ckfp_state.fpr, from, task 375 arch/powerpc/perf/core-book3s.c if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { task 98 arch/powerpc/perf/perf_regs.c u64 perf_reg_abi(struct task_struct *task) task 101 arch/powerpc/perf/perf_regs.c if (!test_tsk_thread_flag(task, TIF_32BIT)) task 899 arch/powerpc/platforms/ps3/device-init.c struct task_struct *task; task 923 arch/powerpc/platforms/ps3/device-init.c task = kthread_run(ps3_probe_thread, (void *)repo.bus_id, task 926 arch/powerpc/platforms/ps3/device-init.c if (IS_ERR(task)) { task 927 arch/powerpc/platforms/ps3/device-init.c result = PTR_ERR(task); task 933 arch/powerpc/platforms/ps3/device-init.c probe_task = task; task 17 arch/riscv/include/asm/mmu_context.h struct task_struct *task) task 22 arch/riscv/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *task, task 33 arch/riscv/include/asm/mmu_context.h struct task_struct *task); task 41 arch/riscv/include/asm/mmu_context.h static inline void deactivate_mm(struct task_struct *task, task 23 arch/riscv/include/asm/switch_to.h static inline void fstate_off(struct task_struct *task, task 29 arch/riscv/include/asm/switch_to.h static inline void fstate_save(struct task_struct *task, task 33 arch/riscv/include/asm/switch_to.h __fstate_save(task); task 38 arch/riscv/include/asm/switch_to.h static inline void fstate_restore(struct task_struct *task, task 42 arch/riscv/include/asm/switch_to.h __fstate_restore(task); task 61 arch/riscv/include/asm/switch_to.h #define fstate_save(task, regs) do { } while (0) task 62 arch/riscv/include/asm/switch_to.h #define fstate_restore(task, regs) do { } while (0) task 25 arch/riscv/include/asm/syscall.h static inline int syscall_get_nr(struct task_struct *task, task 31 arch/riscv/include/asm/syscall.h static inline void syscall_set_nr(struct task_struct *task, task 38 arch/riscv/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 44 arch/riscv/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 52 arch/riscv/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 58 arch/riscv/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 65 arch/riscv/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 74 arch/riscv/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 83 arch/riscv/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 82 arch/riscv/kernel/perf_callchain.c void notrace walk_stackframe(struct task_struct *task, task 29 arch/riscv/kernel/perf_regs.c u64 perf_reg_abi(struct task_struct *task) task 123 arch/riscv/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 77 arch/riscv/kernel/signal.c #define save_fp_state(task, regs) (0) task 78 arch/riscv/kernel/signal.c #define restore_fp_state(task, regs) (0) task 97 arch/riscv/kernel/signal.c struct task_struct *task; task 122 arch/riscv/kernel/signal.c task = current; task 126 arch/riscv/kernel/signal.c task->comm, task_pid_nr(task), __func__, task 22 arch/riscv/kernel/stacktrace.c void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, task 31 arch/riscv/kernel/stacktrace.c } else if (task == NULL || task == current) { task 38 arch/riscv/kernel/stacktrace.c fp = task->thread.s[0]; task 39 arch/riscv/kernel/stacktrace.c sp = task->thread.sp; task 40 arch/riscv/kernel/stacktrace.c pc = task->thread.ra; task 66 arch/riscv/kernel/stacktrace.c void notrace walk_stackframe(struct task_struct *task, task 75 arch/riscv/kernel/stacktrace.c } else if (task == NULL || task == current) { task 81 arch/riscv/kernel/stacktrace.c sp = task->thread.sp; task 82 arch/riscv/kernel/stacktrace.c pc = task->thread.ra; task 105 arch/riscv/kernel/stacktrace.c void show_stack(struct task_struct *task, unsigned long *sp) task 108 arch/riscv/kernel/stacktrace.c walk_stackframe(task, NULL, print_trace_address, NULL); task 122 arch/riscv/kernel/stacktrace.c unsigned long get_wchan(struct task_struct *task) task 126 arch/riscv/kernel/stacktrace.c if (likely(task && task != current && task->state != TASK_RUNNING)) task 127 arch/riscv/kernel/stacktrace.c walk_stackframe(task, NULL, save_wchan, &pc); task 44 arch/riscv/mm/context.c struct task_struct *task) task 23 arch/s390/include/asm/stacktrace.h int get_stack_info(unsigned long sp, struct task_struct *task, task 36 arch/s390/include/asm/stacktrace.h static inline unsigned long get_stack_pointer(struct task_struct *task, task 41 arch/s390/include/asm/stacktrace.h if (task == current) task 43 arch/s390/include/asm/stacktrace.h return (unsigned long) task->thread.ksp; task 17 arch/s390/include/asm/switch_to.h extern void update_cr_regs(struct task_struct *task); task 20 arch/s390/include/asm/syscall.h static inline long syscall_get_nr(struct task_struct *task, task 27 arch/s390/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 33 arch/s390/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 39 arch/s390/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 45 arch/s390/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 52 arch/s390/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 60 arch/s390/include/asm/syscall.h if (test_tsk_thread_flag(task, TIF_31BIT)) task 70 arch/s390/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 82 arch/s390/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 85 arch/s390/include/asm/syscall.h if (test_tsk_thread_flag(task, TIF_31BIT)) task 35 arch/s390/include/asm/unwind.h struct task_struct *task; task 44 arch/s390/include/asm/unwind.h void __unwind_start(struct unwind_state *state, struct task_struct *task, task 60 arch/s390/include/asm/unwind.h struct task_struct *task, task 64 arch/s390/include/asm/unwind.h sp = sp ? : get_stack_pointer(task, regs); task 65 arch/s390/include/asm/unwind.h __unwind_start(state, task, regs, sp); task 73 arch/s390/include/asm/unwind.h #define unwind_for_each_frame(state, task, regs, first_frame) \ task 74 arch/s390/include/asm/unwind.h for (unwind_start(state, task, regs, first_frame); \ task 54 arch/s390/kernel/dumpstack.c static bool in_task_stack(unsigned long sp, struct task_struct *task, task 59 arch/s390/kernel/dumpstack.c stack = (unsigned long) task_stack_page(task); task 90 arch/s390/kernel/dumpstack.c int get_stack_info(unsigned long sp, struct task_struct *task, task 96 arch/s390/kernel/dumpstack.c task = task ? : current; task 99 arch/s390/kernel/dumpstack.c if (in_task_stack(sp, task, info)) task 102 arch/s390/kernel/dumpstack.c if (task != current) task 126 arch/s390/kernel/dumpstack.c void show_stack(struct task_struct *task, unsigned long *stack) task 131 arch/s390/kernel/dumpstack.c if (!task) task 132 arch/s390/kernel/dumpstack.c task = current; task 133 arch/s390/kernel/dumpstack.c unwind_for_each_frame(&state, task, NULL, (unsigned long) stack) task 137 arch/s390/kernel/dumpstack.c debug_show_held_locks(task ? : current); task 395 arch/s390/kernel/kprobes.c if (ri->task != current) task 414 arch/s390/kernel/kprobes.c if (ri->task != current) task 47 arch/s390/kernel/perf_regs.c u64 perf_reg_abi(struct task_struct *task) task 49 arch/s390/kernel/perf_regs.c if (test_tsk_thread_flag(task, TIF_31BIT)) task 45 arch/s390/kernel/ptrace.c void update_cr_regs(struct task_struct *task) task 47 arch/s390/kernel/ptrace.c struct pt_regs *regs = task_pt_regs(task); task 48 arch/s390/kernel/ptrace.c struct thread_struct *thread = &task->thread; task 62 arch/s390/kernel/ptrace.c if (task->thread.per_flags & PER_FLAG_NO_TE) task 66 arch/s390/kernel/ptrace.c if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { task 67 arch/s390/kernel/ptrace.c if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) task 76 arch/s390/kernel/ptrace.c if (task->thread.gs_cb) task 92 arch/s390/kernel/ptrace.c if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) || task 93 arch/s390/kernel/ptrace.c test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) { task 94 arch/s390/kernel/ptrace.c if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) task 100 arch/s390/kernel/ptrace.c if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) task 117 arch/s390/kernel/ptrace.c void user_enable_single_step(struct task_struct *task) task 119 arch/s390/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_BLOCK_STEP); task 120 arch/s390/kernel/ptrace.c set_tsk_thread_flag(task, TIF_SINGLE_STEP); task 123 arch/s390/kernel/ptrace.c void user_disable_single_step(struct task_struct *task) task 125 arch/s390/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_BLOCK_STEP); task 126 arch/s390/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_SINGLE_STEP); task 129 arch/s390/kernel/ptrace.c void user_enable_block_step(struct task_struct *task) task 131 arch/s390/kernel/ptrace.c set_tsk_thread_flag(task, TIF_SINGLE_STEP); task 132 arch/s390/kernel/ptrace.c set_tsk_thread_flag(task, TIF_BLOCK_STEP); task 140 arch/s390/kernel/ptrace.c void ptrace_disable(struct task_struct *task) task 142 arch/s390/kernel/ptrace.c memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); task 143 arch/s390/kernel/ptrace.c memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); task 144 arch/s390/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_SINGLE_STEP); task 145 arch/s390/kernel/ptrace.c clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP); task 146 arch/s390/kernel/ptrace.c task->thread.per_flags = 0; task 1678 arch/s390/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 1681 arch/s390/kernel/ptrace.c if (test_tsk_thread_flag(task, TIF_31BIT)) task 33 arch/s390/kernel/runtime_instr.c struct task_struct *task = current; task 36 arch/s390/kernel/runtime_instr.c if (!task->thread.ri_cb) task 38 arch/s390/kernel/runtime_instr.c regs = task_pt_regs(task); task 41 arch/s390/kernel/runtime_instr.c kfree(task->thread.ri_cb); task 42 arch/s390/kernel/runtime_instr.c task->thread.ri_cb = NULL; task 14 arch/s390/kernel/stacktrace.c struct task_struct *task, struct pt_regs *regs) task 19 arch/s390/kernel/stacktrace.c unwind_for_each_frame(&state, task, regs, 0) { task 32 arch/s390/kernel/unwind_bc.c if (get_stack_info(sp, state->task, info, mask) != 0 || task 96 arch/s390/kernel/unwind_bc.c ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, task 115 arch/s390/kernel/unwind_bc.c void __unwind_start(struct unwind_state *state, struct task_struct *task, task 125 arch/s390/kernel/unwind_bc.c state->task = task; task 135 arch/s390/kernel/unwind_bc.c if (get_stack_info(sp, task, info, mask) != 0 || task 158 arch/s390/kernel/unwind_bc.c ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, task 126 arch/sh/include/asm/ptrace.h #define task_pt_regs(task) \ task 127 arch/sh/include/asm/ptrace.h ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1) task 12 arch/sh/include/asm/syscall_32.h static inline long syscall_get_nr(struct task_struct *task, task 18 arch/sh/include/asm/syscall_32.h static inline void syscall_rollback(struct task_struct *task, task 27 arch/sh/include/asm/syscall_32.h static inline long syscall_get_error(struct task_struct *task, task 33 arch/sh/include/asm/syscall_32.h static inline long syscall_get_return_value(struct task_struct *task, task 39 arch/sh/include/asm/syscall_32.h static inline void syscall_set_return_value(struct task_struct *task, task 49 arch/sh/include/asm/syscall_32.h static inline void syscall_get_arguments(struct task_struct *task, task 63 arch/sh/include/asm/syscall_32.h static inline void syscall_set_arguments(struct task_struct *task, task 75 arch/sh/include/asm/syscall_32.h static inline int syscall_get_arch(struct task_struct *task) task 11 arch/sh/include/asm/syscall_64.h static inline long syscall_get_nr(struct task_struct *task, task 17 arch/sh/include/asm/syscall_64.h static inline void syscall_rollback(struct task_struct *task, task 26 arch/sh/include/asm/syscall_64.h static inline long syscall_get_error(struct task_struct *task, task 32 arch/sh/include/asm/syscall_64.h static inline long syscall_get_return_value(struct task_struct *task, task 38 arch/sh/include/asm/syscall_64.h static inline void syscall_set_return_value(struct task_struct *task, task 48 arch/sh/include/asm/syscall_64.h static inline void syscall_get_arguments(struct task_struct *task, task 55 arch/sh/include/asm/syscall_64.h static inline void syscall_set_arguments(struct task_struct *task, task 62 arch/sh/include/asm/syscall_64.h static inline int syscall_get_arch(struct task_struct *task) task 30 arch/sh/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 58 arch/sh/include/asm/thread_info.h .task = &tsk, \ task 24 arch/sh/kernel/asm-offsets.c DEFINE(TI_TASK, offsetof(struct thread_info, task)); task 295 arch/sh/kernel/cpu/sh5/unwind.c static void sh64_unwinder_dump(struct task_struct *task, task 58 arch/sh/kernel/dumpstack.c struct task_struct *task = tinfo->task; task 65 arch/sh/kernel/dumpstack.c if (!task->ret_stack) task 68 arch/sh/kernel/dumpstack.c ret_stack = ftrace_graph_get_ret_stack(task, *graph); task 87 arch/sh/kernel/dumpstack.c stack_reader_dump(struct task_struct *task, struct pt_regs *regs, task 957 arch/sh/kernel/dwarf.c static void dwarf_unwinder_dump(struct task_struct *task, task 88 arch/sh/kernel/irq.c irqctx->tinfo.task = curctx->tinfo.task; task 127 arch/sh/kernel/irq.c irqctx->tinfo.task = NULL; task 135 arch/sh/kernel/irq.c irqctx->tinfo.task = NULL; task 159 arch/sh/kernel/irq.c irqctx->tinfo.task = curctx->task; task 328 arch/sh/kernel/kprobes.c if (ri->task != current) task 40 arch/sh/kernel/ptrace_32.c static inline int get_stack_long(struct task_struct *task, int offset) task 44 arch/sh/kernel/ptrace_32.c stack = (unsigned char *)task_pt_regs(task); task 52 arch/sh/kernel/ptrace_32.c static inline int put_stack_long(struct task_struct *task, int offset, task 57 arch/sh/kernel/ptrace_32.c stack = (unsigned char *)task_pt_regs(task); task 363 arch/sh/kernel/ptrace_32.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 56 arch/sh/kernel/ptrace_64.c static inline int get_stack_long(struct task_struct *task, int offset) task 60 arch/sh/kernel/ptrace_64.c stack = (unsigned char *)(task->thread.uregs); task 66 arch/sh/kernel/ptrace_64.c get_fpu_long(struct task_struct *task, unsigned long addr) task 70 arch/sh/kernel/ptrace_64.c regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; task 72 arch/sh/kernel/ptrace_64.c if (!tsk_used_math(task)) { task 81 arch/sh/kernel/ptrace_64.c if (last_task_used_math == task) { task 83 arch/sh/kernel/ptrace_64.c save_fpu(task); task 89 arch/sh/kernel/ptrace_64.c tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)]; task 96 arch/sh/kernel/ptrace_64.c static inline int put_stack_long(struct task_struct *task, int offset, task 101 arch/sh/kernel/ptrace_64.c stack = (unsigned char *)(task->thread.uregs); task 108 arch/sh/kernel/ptrace_64.c put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data) task 112 arch/sh/kernel/ptrace_64.c regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; task 114 arch/sh/kernel/ptrace_64.c if (!tsk_used_math(task)) { task 115 arch/sh/kernel/ptrace_64.c init_fpu(task); task 116 arch/sh/kernel/ptrace_64.c } else if (last_task_used_math == task) { task 118 arch/sh/kernel/ptrace_64.c save_fpu(task); task 124 arch/sh/kernel/ptrace_64.c ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data; task 379 arch/sh/kernel/ptrace_64.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 132 arch/sh/kernel/unwinder.c void unwind_stack(struct task_struct *task, struct pt_regs *regs, task 163 arch/sh/kernel/unwinder.c curr_unwinder->dump(task, regs, sp, ops, data); task 30 arch/sparc/include/asm/current.h return current_thread_info()->task; task 186 arch/sparc/include/asm/processor_64.h unsigned long get_wchan(struct task_struct *task); task 20 arch/sparc/include/asm/syscall.h static inline long syscall_get_nr(struct task_struct *task, task 28 arch/sparc/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 68 arch/sparc/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 76 arch/sparc/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 84 arch/sparc/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 97 arch/sparc/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 106 arch/sparc/include/asm/syscall.h if (test_tsk_thread_flag(task, TIF_32BIT)) task 120 arch/sparc/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 130 arch/sparc/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 133 arch/sparc/include/asm/syscall.h return test_tsk_thread_flag(task, TIF_32BIT) task 30 arch/sparc/include/asm/thread_info_32.h struct task_struct *task; /* main task structure */ task 60 arch/sparc/include/asm/thread_info_32.h .task = &tsk, \ task 38 arch/sparc/include/asm/thread_info_64.h struct task_struct *task; task 118 arch/sparc/include/asm/thread_info_64.h .task = &tsk, \ task 491 arch/sparc/kernel/kprobes.c if (ri->task != current) task 461 arch/sparc/kernel/process_32.c unsigned long get_wchan(struct task_struct *task) task 464 arch/sparc/kernel/process_32.c unsigned long task_base = (unsigned long) task; task 469 arch/sparc/kernel/process_32.c if (!task || task == current || task 470 arch/sparc/kernel/process_32.c task->state == TASK_RUNNING) task 473 arch/sparc/kernel/process_32.c fp = task_thread_info(task)->ksp + bias; task 287 arch/sparc/kernel/process_64.c ((tp && tp->task) ? tp->task->comm : "NULL"), task 288 arch/sparc/kernel/process_64.c ((tp && tp->task) ? tp->task->pid : -1)); task 428 arch/sparc/kernel/process_64.c mm = t->task->mm; task 770 arch/sparc/kernel/process_64.c unsigned long get_wchan(struct task_struct *task) task 778 arch/sparc/kernel/process_64.c if (!task || task == current || task 779 arch/sparc/kernel/process_64.c task->state == TASK_RUNNING) task 782 arch/sparc/kernel/process_64.c tp = task_thread_info(task); task 784 arch/sparc/kernel/process_64.c fp = task_thread_info(task)->ksp + bias; task 324 arch/sparc/kernel/ptrace_32.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 888 arch/sparc/kernel/ptrace_64.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 891 arch/sparc/kernel/ptrace_64.c if (test_tsk_thread_flag(task, TIF_32BIT)) task 32 arch/sparc/kernel/stacktrace.c t = tp->task; task 374 arch/sparc/kernel/traps_32.c TI_TASK != offsetof(struct thread_info, task) || task 2851 arch/sparc/kernel/traps_64.c BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) || task 133 arch/um/drivers/chan_kern.c struct line *line = container_of(work, struct line, task.work); task 145 arch/um/drivers/chan_kern.c INIT_DELAYED_WORK(&line->task, line_timer_cb); task 550 arch/um/drivers/chan_kern.c schedule_delayed_work(&line->task, 1); task 55 arch/um/drivers/line.h struct delayed_work task; task 643 arch/um/drivers/mconsole_kern.c struct task_struct *task = arg; task 645 arch/um/drivers/mconsole_kern.c show_stack(task, NULL); task 78 arch/um/include/asm/mmu_context.h extern int init_new_context(struct task_struct *task, struct mm_struct *mm); task 57 arch/um/include/asm/processor-generic.h static inline void release_thread(struct task_struct *task) task 40 arch/um/include/asm/ptrace-generic.h extern void clear_flushed_tls(struct task_struct *task); task 19 arch/um/include/asm/stacktrace.h get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs) task 21 arch/um/include/asm/stacktrace.h if (!task || task == current) task 23 arch/um/include/asm/stacktrace.h return KSTK_EBP(task); task 27 arch/um/include/asm/stacktrace.h get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs) task 34 arch/um/include/asm/stacktrace.h *get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs) task 36 arch/um/include/asm/stacktrace.h if (!task || task == current) task 38 arch/um/include/asm/stacktrace.h return (unsigned long *)KSTK_ESP(task); task 18 arch/um/include/asm/syscall-generic.h static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 24 arch/um/include/asm/syscall-generic.h static inline void syscall_rollback(struct task_struct *task, task 30 arch/um/include/asm/syscall-generic.h static inline long syscall_get_error(struct task_struct *task, task 38 arch/um/include/asm/syscall-generic.h static inline long syscall_get_return_value(struct task_struct *task, task 44 arch/um/include/asm/syscall-generic.h static inline void syscall_set_return_value(struct task_struct *task, task 51 arch/um/include/asm/syscall-generic.h static inline void syscall_get_arguments(struct task_struct *task, task 65 arch/um/include/asm/syscall-generic.h static inline void syscall_set_arguments(struct task_struct *task, task 6 arch/um/include/asm/sysrq.h extern void show_trace(struct task_struct* task, unsigned long *stack); task 20 arch/um/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 35 arch/um/include/asm/thread_info.h .task = &tsk, \ task 42 arch/um/include/shared/as-layout.h void *task; task 566 arch/um/kernel/irq.c struct task_struct *task; task 569 arch/um/kernel/irq.c task = cpu_tasks[ti->cpu].task; task 570 arch/um/kernel/irq.c tti = task_thread_info(task); task 574 arch/um/kernel/irq.c task->stack = ti; task 78 arch/um/kernel/process.c static inline void set_current(struct task_struct *task) task 80 arch/um/kernel/process.c cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) task 81 arch/um/kernel/process.c { external_pid(), task }); task 381 arch/um/kernel/process.c struct task_struct *task = t ? t : current; task 383 arch/um/kernel/process.c if (!(task->ptrace & PT_DTRACE)) task 386 arch/um/kernel/process.c if (task->thread.singlestep_syscall) task 51 arch/um/kernel/skas/mmu.c int init_new_context(struct task_struct *task, struct mm_struct *mm) task 26 arch/um/kernel/skas/process.c cpu_tasks[0].task = current; task 28 arch/um/kernel/sysrq.c void show_stack(struct task_struct *task, unsigned long *stack) task 41 arch/um/kernel/sysrq.c sp = get_stack_pointer(task, segv_regs); task 513 arch/um/kernel/tlb.c pte_t *addr_pte(struct task_struct *task, unsigned long addr) task 515 arch/um/kernel/tlb.c pgd_t *pgd = pgd_offset(task->mm, addr); task 7 arch/unicore32/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 69 arch/unicore32/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 81 arch/unicore32/include/asm/thread_info.h .task = &tsk, \ task 41 arch/unicore32/kernel/asm-offsets.c DEFINE(TI_TASK, offsetof(struct thread_info, task)); task 23 arch/unicore32/kernel/ptrace.c static inline long get_user_reg(struct task_struct *task, int offset) task 25 arch/unicore32/kernel/ptrace.c return task_pt_regs(task)->uregs[offset]; task 35 arch/unicore32/kernel/ptrace.c put_user_reg(struct task_struct *task, int offset, long data) task 37 arch/unicore32/kernel/ptrace.c struct pt_regs newregs, *regs = task_pt_regs(task); task 182 arch/unicore32/kernel/traps.c struct task_struct *tsk = thread->task; task 17 arch/x86/include/asm/fsgsbase.h extern unsigned long x86_fsbase_read_task(struct task_struct *task); task 18 arch/x86/include/asm/fsgsbase.h extern unsigned long x86_gsbase_read_task(struct task_struct *task); task 19 arch/x86/include/asm/fsgsbase.h extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); task 20 arch/x86/include/asm/fsgsbase.h extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); task 768 arch/x86/include/asm/processor.h extern void set_task_blockstep(struct task_struct *task, bool on); task 820 arch/x86/include/asm/processor.h #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) task 822 arch/x86/include/asm/processor.h #define task_pt_regs(task) \ task 824 arch/x86/include/asm/processor.h unsigned long __ptr = (unsigned long)task_stack_page(task); \ task 848 arch/x86/include/asm/processor.h #define KSTK_ESP(task) (task_pt_regs(task)->sp) task 893 arch/x86/include/asm/processor.h extern unsigned long KSTK_ESP(struct task_struct *task); task 907 arch/x86/include/asm/processor.h #define KSTK_EIP(task) (task_pt_regs(task)->ip) task 13 arch/x86/include/asm/proto.h long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2); task 38 arch/x86/include/asm/proto.h long do_arch_prctl_common(struct task_struct *task, int option, task 31 arch/x86/include/asm/stacktrace.h bool in_task_stack(unsigned long *stack, struct task_struct *task, task 36 arch/x86/include/asm/stacktrace.h int get_stack_info(unsigned long *stack, struct task_struct *task, task 59 arch/x86/include/asm/stacktrace.h get_frame_pointer(struct task_struct *task, struct pt_regs *regs) task 64 arch/x86/include/asm/stacktrace.h if (task == current) task 67 arch/x86/include/asm/stacktrace.h return &((struct inactive_task_frame *)task->thread.sp)->bp; task 71 arch/x86/include/asm/stacktrace.h get_frame_pointer(struct task_struct *task, struct pt_regs *regs) task 78 arch/x86/include/asm/stacktrace.h get_stack_pointer(struct task_struct *task, struct pt_regs *regs) task 83 arch/x86/include/asm/stacktrace.h if (task == current) task 86 arch/x86/include/asm/stacktrace.h return (unsigned long *)task->thread.sp; task 89 arch/x86/include/asm/stacktrace.h void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, task 88 arch/x86/include/asm/switch_to.h static inline void update_task_stack(struct task_struct *task) task 93 arch/x86/include/asm/switch_to.h load_sp0(task->thread.sp0); task 95 arch/x86/include/asm/switch_to.h this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0); task 104 arch/x86/include/asm/switch_to.h load_sp0(task_top_of_stack(task)); task 48 arch/x86/include/asm/syscall.h static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) task 53 arch/x86/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 59 arch/x86/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 68 arch/x86/include/asm/syscall.h if (task->thread_info.status & (TS_COMPAT|TS_I386_REGS_POKED)) task 78 arch/x86/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 84 arch/x86/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 93 arch/x86/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 100 arch/x86/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 109 arch/x86/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 116 arch/x86/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 121 arch/x86/include/asm/syscall.h if (task->thread_info.status & TS_COMPAT) { task 140 arch/x86/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 145 arch/x86/include/asm/syscall.h if (task->thread_info.status & TS_COMPAT) { task 164 arch/x86/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 168 arch/x86/include/asm/syscall.h task->thread_info.status & TS_COMPAT) task 16 arch/x86/include/asm/unwind.h struct task_struct *task; task 38 arch/x86/include/asm/unwind.h void __unwind_start(struct unwind_state *state, struct task_struct *task, task 55 arch/x86/include/asm/unwind.h void unwind_start(struct unwind_state *state, struct task_struct *task, task 58 arch/x86/include/asm/unwind.h first_frame = first_frame ? : get_stack_pointer(task, regs); task 60 arch/x86/include/asm/unwind.h __unwind_start(state, task, regs, first_frame); task 107 arch/x86/include/asm/unwind.h #define READ_ONCE_TASK_STACK(task, x) \ task 110 arch/x86/include/asm/unwind.h if (task == current) \ task 117 arch/x86/include/asm/unwind.h static inline bool task_on_another_cpu(struct task_struct *task) task 120 arch/x86/include/asm/unwind.h return task != current && task->on_cpu; task 1216 arch/x86/kernel/cpu/bugs.c static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) task 1225 arch/x86/kernel/cpu/bugs.c if (task_spec_ssb_force_disable(task)) task 1227 arch/x86/kernel/cpu/bugs.c task_clear_spec_ssb_disable(task); task 1228 arch/x86/kernel/cpu/bugs.c task_clear_spec_ssb_noexec(task); task 1229 arch/x86/kernel/cpu/bugs.c task_update_spec_tif(task); task 1232 arch/x86/kernel/cpu/bugs.c task_set_spec_ssb_disable(task); task 1233 arch/x86/kernel/cpu/bugs.c task_clear_spec_ssb_noexec(task); task 1234 arch/x86/kernel/cpu/bugs.c task_update_spec_tif(task); task 1237 arch/x86/kernel/cpu/bugs.c task_set_spec_ssb_disable(task); task 1238 arch/x86/kernel/cpu/bugs.c task_set_spec_ssb_force_disable(task); task 1239 arch/x86/kernel/cpu/bugs.c task_clear_spec_ssb_noexec(task); task 1240 arch/x86/kernel/cpu/bugs.c task_update_spec_tif(task); task 1243 arch/x86/kernel/cpu/bugs.c if (task_spec_ssb_force_disable(task)) task 1245 arch/x86/kernel/cpu/bugs.c task_set_spec_ssb_disable(task); task 1246 arch/x86/kernel/cpu/bugs.c task_set_spec_ssb_noexec(task); task 1247 arch/x86/kernel/cpu/bugs.c task_update_spec_tif(task); task 1255 arch/x86/kernel/cpu/bugs.c static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) task 1271 arch/x86/kernel/cpu/bugs.c task_spec_ib_force_disable(task)) task 1273 arch/x86/kernel/cpu/bugs.c task_clear_spec_ib_disable(task); task 1274 arch/x86/kernel/cpu/bugs.c task_update_spec_tif(task); task 1289 arch/x86/kernel/cpu/bugs.c task_set_spec_ib_disable(task); task 1291 arch/x86/kernel/cpu/bugs.c task_set_spec_ib_force_disable(task); task 1292 arch/x86/kernel/cpu/bugs.c task_update_spec_tif(task); task 1300 arch/x86/kernel/cpu/bugs.c int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, task 1305 arch/x86/kernel/cpu/bugs.c return ssb_prctl_set(task, ctrl); task 1307 arch/x86/kernel/cpu/bugs.c return ib_prctl_set(task, ctrl); task 1314 arch/x86/kernel/cpu/bugs.c void arch_seccomp_spec_mitigate(struct task_struct *task) task 1317 arch/x86/kernel/cpu/bugs.c ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); task 1320 arch/x86/kernel/cpu/bugs.c ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); task 1324 arch/x86/kernel/cpu/bugs.c static int ssb_prctl_get(struct task_struct *task) task 1331 arch/x86/kernel/cpu/bugs.c if (task_spec_ssb_force_disable(task)) task 1333 arch/x86/kernel/cpu/bugs.c if (task_spec_ssb_noexec(task)) task 1335 arch/x86/kernel/cpu/bugs.c if (task_spec_ssb_disable(task)) task 1345 arch/x86/kernel/cpu/bugs.c static int ib_prctl_get(struct task_struct *task) task 1361 arch/x86/kernel/cpu/bugs.c if (task_spec_ib_force_disable(task)) task 1363 arch/x86/kernel/cpu/bugs.c if (task_spec_ib_disable(task)) task 1370 arch/x86/kernel/cpu/bugs.c int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) task 1374 arch/x86/kernel/cpu/bugs.c return ssb_prctl_get(task); task 1376 arch/x86/kernel/cpu/bugs.c return ib_prctl_get(task); task 617 arch/x86/kernel/cpu/resctrl/rdtgroup.c static int rdtgroup_task_write_permission(struct task_struct *task, task 620 arch/x86/kernel/cpu/resctrl/rdtgroup.c const struct cred *tcred = get_task_cred(task); task 631 arch/x86/kernel/cpu/resctrl/rdtgroup.c rdt_last_cmd_printf("No permission to move task %d\n", task->pid); task 32 arch/x86/kernel/dumpstack.c bool in_task_stack(unsigned long *stack, struct task_struct *task, task 35 arch/x86/kernel/dumpstack.c unsigned long *begin = task_stack_page(task); task 36 arch/x86/kernel/dumpstack.c unsigned long *end = task_stack_page(task) + THREAD_SIZE; task 162 arch/x86/kernel/dumpstack.c void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, task 173 arch/x86/kernel/dumpstack.c unwind_start(&state, task, regs, stack); task 174 arch/x86/kernel/dumpstack.c stack = stack ? : get_stack_pointer(task, regs); task 196 arch/x86/kernel/dumpstack.c if (get_stack_info(stack, task, &stack_info, &visit_mask)) { task 204 arch/x86/kernel/dumpstack.c if (get_stack_info(stack, task, &stack_info, &visit_mask)) task 254 arch/x86/kernel/dumpstack.c real_addr = ftrace_graph_ret_addr(task, &graph_idx, task 282 arch/x86/kernel/dumpstack.c void show_stack(struct task_struct *task, unsigned long *sp) task 284 arch/x86/kernel/dumpstack.c task = task ? : current; task 290 arch/x86/kernel/dumpstack.c if (!sp && task == current) task 293 arch/x86/kernel/dumpstack.c show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT); task 85 arch/x86/kernel/dumpstack_32.c int get_stack_info(unsigned long *stack, struct task_struct *task, task 91 arch/x86/kernel/dumpstack_32.c task = task ? : current; task 93 arch/x86/kernel/dumpstack_32.c if (in_task_stack(stack, task, info)) task 96 arch/x86/kernel/dumpstack_32.c if (task != current) task 153 arch/x86/kernel/dumpstack_64.c int get_stack_info(unsigned long *stack, struct task_struct *task, task 159 arch/x86/kernel/dumpstack_64.c task = task ? : current; task 161 arch/x86/kernel/dumpstack_64.c if (in_task_stack(stack, task, info)) task 164 arch/x86/kernel/dumpstack_64.c if (task != current) task 1252 arch/x86/kernel/fpu/xstate.c static void avx512_status(struct seq_file *m, struct task_struct *task) task 1254 arch/x86/kernel/fpu/xstate.c unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp); task 1280 arch/x86/kernel/fpu/xstate.c struct pid *pid, struct task_struct *task) task 1286 arch/x86/kernel/fpu/xstate.c avx512_status(m, task); task 806 arch/x86/kernel/kprobes/core.c if (ri->task != current) task 842 arch/x86/kernel/kprobes/core.c if (ri->task != current) task 98 arch/x86/kernel/perf_regs.c u64 perf_reg_abi(struct task_struct *task) task 124 arch/x86/kernel/perf_regs.c u64 perf_reg_abi(struct task_struct *task) task 126 arch/x86/kernel/perf_regs.c if (test_tsk_thread_flag(task, TIF_IA32)) task 237 arch/x86/kernel/process.c static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled) task 858 arch/x86/kernel/process.c long do_arch_prctl_common(struct task_struct *task, int option, task 865 arch/x86/kernel/process.c return set_cpuid_mode(task, cpuid_enabled); task 199 arch/x86/kernel/process_64.c static __always_inline void save_fsgs(struct task_struct *task) task 201 arch/x86/kernel/process_64.c savesegment(fs, task->thread.fsindex); task 202 arch/x86/kernel/process_64.c savesegment(gs, task->thread.gsindex); task 203 arch/x86/kernel/process_64.c save_base_legacy(task, task->thread.fsindex, FS); task 204 arch/x86/kernel/process_64.c save_base_legacy(task, task->thread.gsindex, GS); task 289 arch/x86/kernel/process_64.c static unsigned long x86_fsgsbase_read_task(struct task_struct *task, task 307 arch/x86/kernel/process_64.c base = get_desc_base(&task->thread.tls_array[idx]); task 317 arch/x86/kernel/process_64.c mutex_lock(&task->mm->context.lock); task 318 arch/x86/kernel/process_64.c ldt = task->mm->context.ldt; task 323 arch/x86/kernel/process_64.c mutex_unlock(&task->mm->context.lock); task 332 arch/x86/kernel/process_64.c unsigned long x86_fsbase_read_task(struct task_struct *task) task 336 arch/x86/kernel/process_64.c if (task == current) task 338 arch/x86/kernel/process_64.c else if (task->thread.fsindex == 0) task 339 arch/x86/kernel/process_64.c fsbase = task->thread.fsbase; task 341 arch/x86/kernel/process_64.c fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); task 346 arch/x86/kernel/process_64.c unsigned long x86_gsbase_read_task(struct task_struct *task) task 350 arch/x86/kernel/process_64.c if (task == current) task 352 arch/x86/kernel/process_64.c else if (task->thread.gsindex == 0) task 353 arch/x86/kernel/process_64.c gsbase = task->thread.gsbase; task 355 arch/x86/kernel/process_64.c gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); task 360 arch/x86/kernel/process_64.c void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) task 362 arch/x86/kernel/process_64.c WARN_ON_ONCE(task == current); task 364 arch/x86/kernel/process_64.c task->thread.fsbase = fsbase; task 367 arch/x86/kernel/process_64.c void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) task 369 arch/x86/kernel/process_64.c WARN_ON_ONCE(task == current); task 371 arch/x86/kernel/process_64.c task->thread.gsbase = gsbase; task 703 arch/x86/kernel/process_64.c long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) task 719 arch/x86/kernel/process_64.c if (task == current) { task 727 arch/x86/kernel/process_64.c task->thread.gsbase = arg2; task 730 arch/x86/kernel/process_64.c task->thread.gsindex = 0; task 731 arch/x86/kernel/process_64.c x86_gsbase_write_task(task, arg2); task 749 arch/x86/kernel/process_64.c if (task == current) { task 757 arch/x86/kernel/process_64.c task->thread.fsbase = arg2; task 759 arch/x86/kernel/process_64.c task->thread.fsindex = 0; task 760 arch/x86/kernel/process_64.c x86_fsbase_write_task(task, arg2); task 766 arch/x86/kernel/process_64.c unsigned long base = x86_fsbase_read_task(task); task 772 arch/x86/kernel/process_64.c unsigned long base = x86_gsbase_read_task(task); task 817 arch/x86/kernel/process_64.c unsigned long KSTK_ESP(struct task_struct *task) task 819 arch/x86/kernel/process_64.c return task_pt_regs(task)->sp; task 164 arch/x86/kernel/ptrace.c static u16 get_segment_reg(struct task_struct *task, unsigned long offset) task 171 arch/x86/kernel/ptrace.c retval = *pt_regs_access(task_pt_regs(task), offset); task 173 arch/x86/kernel/ptrace.c if (task == current) task 174 arch/x86/kernel/ptrace.c retval = get_user_gs(task_pt_regs(task)); task 176 arch/x86/kernel/ptrace.c retval = task_user_gs(task); task 181 arch/x86/kernel/ptrace.c static int set_segment_reg(struct task_struct *task, task 207 arch/x86/kernel/ptrace.c *pt_regs_access(task_pt_regs(task), offset) = value; task 211 arch/x86/kernel/ptrace.c if (task == current) task 212 arch/x86/kernel/ptrace.c set_user_gs(task_pt_regs(task), value); task 214 arch/x86/kernel/ptrace.c task_user_gs(task) = value; task 230 arch/x86/kernel/ptrace.c static u16 get_segment_reg(struct task_struct *task, unsigned long offset) task 239 arch/x86/kernel/ptrace.c if (task == current) { task 244 arch/x86/kernel/ptrace.c return task->thread.fsindex; task 246 arch/x86/kernel/ptrace.c if (task == current) { task 250 arch/x86/kernel/ptrace.c return task->thread.gsindex; task 252 arch/x86/kernel/ptrace.c if (task == current) { task 256 arch/x86/kernel/ptrace.c return task->thread.ds; task 258 arch/x86/kernel/ptrace.c if (task == current) { task 262 arch/x86/kernel/ptrace.c return task->thread.es; task 268 arch/x86/kernel/ptrace.c return *pt_regs_access(task_pt_regs(task), offset); task 271 arch/x86/kernel/ptrace.c static int set_segment_reg(struct task_struct *task, task 282 arch/x86/kernel/ptrace.c task->thread.fsindex = value; task 283 arch/x86/kernel/ptrace.c if (task == current) task 284 arch/x86/kernel/ptrace.c loadsegment(fs, task->thread.fsindex); task 287 arch/x86/kernel/ptrace.c task->thread.gsindex = value; task 288 arch/x86/kernel/ptrace.c if (task == current) task 289 arch/x86/kernel/ptrace.c load_gs_index(task->thread.gsindex); task 292 arch/x86/kernel/ptrace.c task->thread.ds = value; task 293 arch/x86/kernel/ptrace.c if (task == current) task 294 arch/x86/kernel/ptrace.c loadsegment(ds, task->thread.ds); task 297 arch/x86/kernel/ptrace.c task->thread.es = value; task 298 arch/x86/kernel/ptrace.c if (task == current) task 299 arch/x86/kernel/ptrace.c loadsegment(es, task->thread.es); task 308 arch/x86/kernel/ptrace.c task_pt_regs(task)->cs = value; task 313 arch/x86/kernel/ptrace.c task_pt_regs(task)->ss = value; task 322 arch/x86/kernel/ptrace.c static unsigned long get_flags(struct task_struct *task) task 324 arch/x86/kernel/ptrace.c unsigned long retval = task_pt_regs(task)->flags; task 329 arch/x86/kernel/ptrace.c if (test_tsk_thread_flag(task, TIF_FORCED_TF)) task 335 arch/x86/kernel/ptrace.c static int set_flags(struct task_struct *task, unsigned long value) task 337 arch/x86/kernel/ptrace.c struct pt_regs *regs = task_pt_regs(task); task 345 arch/x86/kernel/ptrace.c clear_tsk_thread_flag(task, TIF_FORCED_TF); task 346 arch/x86/kernel/ptrace.c else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) task 397 arch/x86/kernel/ptrace.c static unsigned long getreg(struct task_struct *task, unsigned long offset) task 406 arch/x86/kernel/ptrace.c return get_segment_reg(task, offset); task 409 arch/x86/kernel/ptrace.c return get_flags(task); task 413 arch/x86/kernel/ptrace.c return x86_fsbase_read_task(task); task 415 arch/x86/kernel/ptrace.c return x86_gsbase_read_task(task); task 419 arch/x86/kernel/ptrace.c return *pt_regs_access(task_pt_regs(task), offset); task 1322 arch/x86/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 1325 arch/x86/kernel/ptrace.c if (!user_64bit_mode(task_pt_regs(task))) task 16 arch/x86/kernel/stacktrace.c struct task_struct *task, struct pt_regs *regs) task 24 arch/x86/kernel/stacktrace.c for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); task 39 arch/x86/kernel/stacktrace.c void *cookie, struct task_struct *task) task 45 arch/x86/kernel/stacktrace.c for (unwind_start(&state, task, NULL, NULL); task 85 arch/x86/kernel/stacktrace.c if (!(task->flags & (PF_KTHREAD | PF_IDLE))) task 167 arch/x86/kernel/step.c void set_task_blockstep(struct task_struct *task, bool on) task 185 arch/x86/kernel/step.c set_tsk_thread_flag(task, TIF_BLOCKSTEP); task 188 arch/x86/kernel/step.c clear_tsk_thread_flag(task, TIF_BLOCKSTEP); task 190 arch/x86/kernel/step.c if (task == current) task 820 arch/x86/kernel/traps.c struct task_struct *task = current; task 821 arch/x86/kernel/traps.c struct fpu *fpu = &task->thread.fpu; task 832 arch/x86/kernel/traps.c task->thread.error_code = error_code; task 833 arch/x86/kernel/traps.c task->thread.trap_nr = trapnr; task 846 arch/x86/kernel/traps.c task->thread.trap_nr = trapnr; task 847 arch/x86/kernel/traps.c task->thread.error_code = error_code; task 50 arch/x86/kernel/unwind_frame.c if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) task 88 arch/x86/kernel/unwind_frame.c return (unsigned long *)task_pt_regs(state->task) - 2; task 225 arch/x86/kernel/unwind_frame.c if (get_stack_info(info->next_sp, state->task, info, task 248 arch/x86/kernel/unwind_frame.c addr = READ_ONCE_TASK_STACK(state->task, *addr_p); task 249 arch/x86/kernel/unwind_frame.c state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, task 273 arch/x86/kernel/unwind_frame.c regs = task_pt_regs(state->task); task 307 arch/x86/kernel/unwind_frame.c next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp); task 326 arch/x86/kernel/unwind_frame.c if (state->task != current) task 337 arch/x86/kernel/unwind_frame.c state->regs->sp < (unsigned long)task_pt_regs(state->task)) task 350 arch/x86/kernel/unwind_frame.c state->regs, state->task->comm, task 351 arch/x86/kernel/unwind_frame.c state->task->pid, next_bp); task 356 arch/x86/kernel/unwind_frame.c state->bp, state->task->comm, task 357 arch/x86/kernel/unwind_frame.c state->task->pid, next_bp); task 366 arch/x86/kernel/unwind_frame.c void __unwind_start(struct unwind_state *state, struct task_struct *task, task 372 arch/x86/kernel/unwind_frame.c state->task = task; task 381 arch/x86/kernel/unwind_frame.c bp = get_frame_pointer(task, regs); task 398 arch/x86/kernel/unwind_frame.c get_stack_info(bp, state->task, &state->stack_info, task 18 arch/x86/kernel/unwind_guess.c return ftrace_graph_ret_addr(state->task, &state->graph_idx, task 45 arch/x86/kernel/unwind_guess.c } while (!get_stack_info(state->sp, state->task, info, task 52 arch/x86/kernel/unwind_guess.c void __unwind_start(struct unwind_state *state, struct task_struct *task, task 57 arch/x86/kernel/unwind_guess.c state->task = task; task 60 arch/x86/kernel/unwind_guess.c get_stack_info(first_frame, state->task, &state->stack_info, task 314 arch/x86/kernel/unwind_orc.c struct task_struct *task = state->task; task 322 arch/x86/kernel/unwind_orc.c if (task != current && state->sp == task->thread.sp) { task 323 arch/x86/kernel/unwind_orc.c struct inactive_task_frame *frame = (void *)task->thread.sp; task 340 arch/x86/kernel/unwind_orc.c (get_stack_info(addr, state->task, info, &state->stack_mask))) task 528 arch/x86/kernel/unwind_orc.c state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, task 615 arch/x86/kernel/unwind_orc.c void __unwind_start(struct unwind_state *state, struct task_struct *task, task 619 arch/x86/kernel/unwind_orc.c state->task = task; task 629 arch/x86/kernel/unwind_orc.c if (task_on_another_cpu(task)) task 643 arch/x86/kernel/unwind_orc.c } else if (task == current) { task 651 arch/x86/kernel/unwind_orc.c struct inactive_task_frame *frame = (void *)task->thread.sp; task 653 arch/x86/kernel/unwind_orc.c state->sp = task->thread.sp; task 658 arch/x86/kernel/unwind_orc.c if (get_stack_info((unsigned long *)state->sp, state->task, task 668 arch/x86/kernel/unwind_orc.c if (get_stack_info(next_page, state->task, &state->stack_info, task 812 arch/x86/kernel/vm86_32.c void release_vm86_irqs(struct task_struct *task) task 816 arch/x86/kernel/vm86_32.c if (vm86_irqs[i].tsk == task) task 82 arch/x86/um/asm/ptrace.h extern long arch_prctl(struct task_struct *task, int option, task 12 arch/x86/um/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 15 arch/x86/um/syscalls_64.c long arch_prctl(struct task_struct *task, int option, task 20 arch/x86/um/syscalls_64.c int pid = task->mm->context.id.u.pid; task 63 arch/x86/um/tls_32.c static int get_free_idx(struct task_struct* task) task 65 arch/x86/um/tls_32.c struct thread_struct *t = &task->thread; task 132 arch/x86/um/tls_32.c static inline int needs_TLS_update(struct task_struct *task) task 139 arch/x86/um/tls_32.c &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; task 157 arch/x86/um/tls_32.c void clear_flushed_tls(struct task_struct *task) task 163 arch/x86/um/tls_32.c &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; task 203 arch/x86/um/tls_32.c static int set_tls_entry(struct task_struct* task, struct user_desc *info, task 206 arch/x86/um/tls_32.c struct thread_struct *t = &task->thread; task 238 arch/x86/um/tls_32.c static int get_tls_entry(struct task_struct *task, struct user_desc *info, task 241 arch/x86/um/tls_32.c struct thread_struct *t = &task->thread; task 259 arch/x86/um/tls_32.c if (unlikely(task == current && task 5 arch/x86/um/tls_64.c void clear_flushed_tls(struct task_struct *task) task 24 arch/xtensa/include/asm/current.h return current_thread_info()->task; task 20 arch/xtensa/include/asm/stacktrace.h static __always_inline unsigned long *stack_pointer(struct task_struct *task) task 24 arch/xtensa/include/asm/stacktrace.h if (!task || task == current) task 27 arch/xtensa/include/asm/stacktrace.h sp = (unsigned long *)task->thread.sp; task 17 arch/xtensa/include/asm/syscall.h static inline int syscall_get_arch(struct task_struct *task) task 25 arch/xtensa/include/asm/syscall.h static inline long syscall_get_nr(struct task_struct *task, task 31 arch/xtensa/include/asm/syscall.h static inline void syscall_rollback(struct task_struct *task, task 37 arch/xtensa/include/asm/syscall.h static inline long syscall_get_error(struct task_struct *task, task 44 arch/xtensa/include/asm/syscall.h static inline long syscall_get_return_value(struct task_struct *task, task 50 arch/xtensa/include/asm/syscall.h static inline void syscall_set_return_value(struct task_struct *task, task 60 arch/xtensa/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *task, task 71 arch/xtensa/include/asm/syscall.h static inline void syscall_set_arguments(struct task_struct *task, task 49 arch/xtensa/include/asm/thread_info.h struct task_struct *task; /* main task structure */ task 76 arch/xtensa/include/asm/thread_info.h .task = &tsk, \ task 85 arch/xtensa/kernel/asm-offsets.c OFFSET(TI_TASK, thread_info, task); task 221 arch/xtensa/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *task) task 222 arch/xtensa/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) task 228 arch/xtensa/kernel/stacktrace.c walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data); task 487 arch/xtensa/kernel/traps.c void show_trace(struct task_struct *task, unsigned long *sp) task 490 arch/xtensa/kernel/traps.c sp = stack_pointer(task); task 501 arch/xtensa/kernel/traps.c void show_stack(struct task_struct *task, unsigned long *sp) task 507 arch/xtensa/kernel/traps.c sp = stack_pointer(task); task 519 arch/xtensa/kernel/traps.c show_trace(task, stack); task 169 arch/xtensa/mm/tlb.c struct task_struct *task = get_current(); task 170 arch/xtensa/mm/tlb.c struct mm_struct *mm = task->mm; task 176 arch/xtensa/mm/tlb.c mm = task->active_mm; task 1282 block/blk-cgroup.c struct task_struct *task; task 1288 block/blk-cgroup.c cgroup_taskset_for_each(task, dst_css, tset) { task 1289 block/blk-cgroup.c task_lock(task); task 1290 block/blk-cgroup.c ioc = task->io_context; task 1293 block/blk-cgroup.c task_unlock(task); task 199 block/blk-ioc.c void exit_io_context(struct task_struct *task) task 203 block/blk-ioc.c task_lock(task); task 204 block/blk-ioc.c ioc = task->io_context; task 205 block/blk-ioc.c task->io_context = NULL; task 206 block/blk-ioc.c task_unlock(task); task 250 block/blk-ioc.c int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) task 276 block/blk-ioc.c task_lock(task); task 277 block/blk-ioc.c if (!task->io_context && task 278 block/blk-ioc.c (task == current || !(task->flags & PF_EXITING))) task 279 block/blk-ioc.c task->io_context = ioc; task 283 block/blk-ioc.c ret = task->io_context ? 0 : -EBUSY; task 285 block/blk-ioc.c task_unlock(task); task 303 block/blk-ioc.c struct io_context *get_task_io_context(struct task_struct *task, task 311 block/blk-ioc.c task_lock(task); task 312 block/blk-ioc.c ioc = task->io_context; task 315 block/blk-ioc.c task_unlock(task); task 318 block/blk-ioc.c task_unlock(task); task 319 block/blk-ioc.c } while (!create_task_io_context(task, gfp_flags, node)); task 3452 block/blk-mq.c if (hs.task) task 3456 block/blk-mq.c } while (hs.task && !signal_pending(current)); task 205 block/blk-rq-qos.c struct task_struct *task; task 229 block/blk-rq-qos.c wake_up_process(data->task); task 258 block/blk-rq-qos.c .task = current, task 286 block/blk.h int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); task 36 block/ioprio.c int set_task_ioprio(struct task_struct *task, int ioprio) task 43 block/ioprio.c tcred = __task_cred(task); task 51 block/ioprio.c err = security_task_setioprio(task, ioprio); task 55 block/ioprio.c ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); task 460 crypto/crypto_engine.c sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); task 885 drivers/atm/eni.c tasklet_disable(&eni_dev->task); task 888 drivers/atm/eni.c tasklet_enable(&eni_dev->task); task 1408 drivers/atm/eni.c tasklet_disable(&eni_dev->task); task 1410 drivers/atm/eni.c tasklet_enable(&eni_dev->task); task 1523 drivers/atm/eni.c tasklet_schedule(&eni_dev->task); task 1845 drivers/atm/eni.c tasklet_init(&eni_dev->task,eni_tasklet,(unsigned long) dev); task 1982 drivers/atm/eni.c tasklet_disable(&eni_dev->task); task 1992 drivers/atm/eni.c tasklet_enable(&eni_dev->task); task 2076 drivers/atm/eni.c tasklet_disable(&ENI_DEV(vcc->dev)->task); task 2078 drivers/atm/eni.c tasklet_enable(&ENI_DEV(vcc->dev)->task); task 2082 drivers/atm/eni.c tasklet_schedule(&ENI_DEV(vcc->dev)->task); task 72 drivers/atm/eni.h struct tasklet_struct task; /* tasklet for interrupt work */ task 199 drivers/block/aoe/aoe.h struct task_struct *task; task 1258 drivers/block/aoe/aoecmd.c kthread_stop(k->task); task 1265 drivers/block/aoe/aoecmd.c struct task_struct *task; task 1268 drivers/block/aoe/aoecmd.c task = kthread_run(kthread, k, "%s", k->name); task 1269 drivers/block/aoe/aoecmd.c if (task == NULL || IS_ERR(task)) task 1271 drivers/block/aoe/aoecmd.c k->task = task; task 237 drivers/block/drbd/drbd_int.h struct task_struct *task; task 1961 drivers/block/drbd/drbd_int.h struct task_struct *task = connection->ack_receiver.task; task 1962 drivers/block/drbd/drbd_int.h if (task && get_t_state(&connection->ack_receiver) == RUNNING) task 1963 drivers/block/drbd/drbd_int.h send_sig(SIGXCPU, task, 1); task 349 drivers/block/drbd/drbd_main.c thi->task = NULL; task 370 drivers/block/drbd/drbd_main.c thi->task = NULL; task 423 drivers/block/drbd/drbd_main.c thi->task = nt; task 461 drivers/block/drbd/drbd_main.c if (thi->task == NULL) { task 469 drivers/block/drbd/drbd_main.c if (thi->task != current) task 470 drivers/block/drbd/drbd_main.c send_sig(DRBD_SIGKILL, thi->task, 1); task 1481 drivers/block/drbd/drbd_main.c || !connection->ack_receiver.task task 3620 drivers/block/drbd/drbd_main.c D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); task 3662 drivers/block/drbd/drbd_main.c D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); task 369 drivers/block/drbd/drbd_nl.c if (current == connection->worker.task) task 398 drivers/block/drbd/drbd_nl.c if (current == connection->worker.task) task 625 drivers/block/drbd/drbd_state.c D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); task 1526 drivers/block/drbd/drbd_state.c D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); task 1780 drivers/block/drbd/drbd_worker.c if (current == connection->worker.task) { task 62 drivers/bluetooth/btmrvl_drv.h struct task_struct *task; task 710 drivers/bluetooth/btmrvl_main.c kthread_stop(priv->main_thread.task); task 742 drivers/bluetooth/btmrvl_main.c priv->main_thread.task = kthread_run(btmrvl_service_main_thread, task 744 drivers/bluetooth/btmrvl_main.c if (IS_ERR(priv->main_thread.task)) task 772 drivers/bluetooth/btmrvl_main.c kthread_stop(priv->main_thread.task); task 175 drivers/char/pcmcia/synclink_cs.c struct work_struct task; /* task structure for scheduling bh */ task 531 drivers/char/pcmcia/synclink_cs.c INIT_WORK(&info->task, bh_handler); task 770 drivers/char/pcmcia/synclink_cs.c MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); task 1252 drivers/char/pcmcia/synclink_cs.c schedule_work(&info->task); task 62 drivers/connector/cn_proc.c void proc_fork_connector(struct task_struct *task) task 78 drivers/connector/cn_proc.c parent = rcu_dereference(task->real_parent); task 82 drivers/connector/cn_proc.c ev->event_data.fork.child_pid = task->pid; task 83 drivers/connector/cn_proc.c ev->event_data.fork.child_tgid = task->tgid; task 92 drivers/connector/cn_proc.c void proc_exec_connector(struct task_struct *task) task 106 drivers/connector/cn_proc.c ev->event_data.exec.process_pid = task->pid; task 107 drivers/connector/cn_proc.c ev->event_data.exec.process_tgid = task->tgid; task 116 drivers/connector/cn_proc.c void proc_id_connector(struct task_struct *task, int which_id) task 130 drivers/connector/cn_proc.c ev->event_data.id.process_pid = task->pid; task 131 drivers/connector/cn_proc.c ev->event_data.id.process_tgid = task->tgid; task 133 drivers/connector/cn_proc.c cred = __task_cred(task); task 154 drivers/connector/cn_proc.c void proc_sid_connector(struct task_struct *task) task 168 drivers/connector/cn_proc.c ev->event_data.sid.process_pid = task->pid; task 169 drivers/connector/cn_proc.c ev->event_data.sid.process_tgid = task->tgid; task 178 drivers/connector/cn_proc.c void proc_ptrace_connector(struct task_struct *task, int ptrace_id) task 192 drivers/connector/cn_proc.c ev->event_data.ptrace.process_pid = task->pid; task 193 drivers/connector/cn_proc.c ev->event_data.ptrace.process_tgid = task->tgid; task 210 drivers/connector/cn_proc.c void proc_comm_connector(struct task_struct *task) task 224 drivers/connector/cn_proc.c ev->event_data.comm.process_pid = task->pid; task 225 drivers/connector/cn_proc.c ev->event_data.comm.process_tgid = task->tgid; task 226 drivers/connector/cn_proc.c get_task_comm(ev->event_data.comm.comm, task); task 235 drivers/connector/cn_proc.c void proc_coredump_connector(struct task_struct *task) task 250 drivers/connector/cn_proc.c ev->event_data.coredump.process_pid = task->pid; task 251 drivers/connector/cn_proc.c ev->event_data.coredump.process_tgid = task->tgid; task 254 drivers/connector/cn_proc.c if (pid_alive(task)) { task 255 drivers/connector/cn_proc.c parent = rcu_dereference(task->real_parent); task 268 drivers/connector/cn_proc.c void proc_exit_connector(struct task_struct *task) task 283 drivers/connector/cn_proc.c ev->event_data.exit.process_pid = task->pid; task 284 drivers/connector/cn_proc.c ev->event_data.exit.process_tgid = task->tgid; task 285 drivers/connector/cn_proc.c ev->event_data.exit.exit_code = task->exit_code; task 286 drivers/connector/cn_proc.c ev->event_data.exit.exit_signal = task->exit_signal; task 289 drivers/connector/cn_proc.c if (pid_alive(task)) { task 290 drivers/connector/cn_proc.c parent = rcu_dereference(task->real_parent); task 263 drivers/crypto/axis/artpec6_crypto.c struct tasklet_struct task; task 2080 drivers/crypto/axis/artpec6_crypto.c tasklet_schedule(&ac->task); task 2620 drivers/crypto/axis/artpec6_crypto.c tasklet_schedule(&ac->task); task 2904 drivers/crypto/axis/artpec6_crypto.c tasklet_init(&ac->task, artpec6_crypto_task, task 2970 drivers/crypto/axis/artpec6_crypto.c tasklet_disable(&ac->task); task 2972 drivers/crypto/axis/artpec6_crypto.c tasklet_kill(&ac->task); task 142 drivers/crypto/chelsio/chtls/chtls_cm.h #define DECLARE_TASK_FUNC(task, task_param) \ task 143 drivers/crypto/chelsio/chtls/chtls_cm.h static void task(struct work_struct *task_param) task 434 drivers/dma-buf/dma-fence.c struct task_struct *task; task 443 drivers/dma-buf/dma-fence.c wake_up_state(wait->task, TASK_NORMAL); task 497 drivers/dma-buf/dma-fence.c cb.task = current; task 592 drivers/dma-buf/dma-fence.c cb[i].task = current; task 38 drivers/dma-buf/st-dma-fence.c struct task_struct *task; task 43 drivers/dma-buf/st-dma-fence.c wake_up_process(container_of(cb, struct wait_cb, cb)->task); task 49 drivers/dma-buf/st-dma-fence.c struct wait_cb cb = { .task = current }; task 433 drivers/dma-buf/st-dma-fence.c struct task_struct *task; task 522 drivers/dma-buf/st-dma-fence.c t[i].task = kthread_run(thread_signal_callback, &t[i], task 524 drivers/dma-buf/st-dma-fence.c get_task_struct(t[i].task); task 532 drivers/dma-buf/st-dma-fence.c err = kthread_stop(t[i].task); task 536 drivers/dma-buf/st-dma-fence.c put_task_struct(t[i].task); task 854 drivers/dma/bcm2835-dma.c tasklet_kill(&c->vc.task); task 140 drivers/dma/bestcomm/bestcomm.c bcom_load_image(int task, u32 *task_image) task 154 drivers/dma/bestcomm/bestcomm.c if ((task < 0) || (task >= BCOM_MAX_TASKS)) { task 156 drivers/dma/bestcomm/bestcomm.c ": Trying to load invalid task %d\n", task); task 161 drivers/dma/bestcomm/bestcomm.c tdt = &bcom_eng->tdt[task]; task 164 drivers/dma/bestcomm/bestcomm.c desc = bcom_task_desc(task); task 165 drivers/dma/bestcomm/bestcomm.c if (hdr->desc_size != bcom_task_num_descs(task)) { task 169 drivers/dma/bestcomm/bestcomm.c task, task 171 drivers/dma/bestcomm/bestcomm.c bcom_task_num_descs(task)); task 185 drivers/dma/bestcomm/bestcomm.c var = bcom_task_var(task); task 186 drivers/dma/bestcomm/bestcomm.c inc = bcom_task_inc(task); task 205 drivers/dma/bestcomm/bestcomm.c bcom_set_initiator(int task, int initiator) task 212 drivers/dma/bestcomm/bestcomm.c bcom_set_tcr_initiator(task, initiator); task 218 drivers/dma/bestcomm/bestcomm.c desc = bcom_task_desc(task); task 220 drivers/dma/bestcomm/bestcomm.c num_descs = bcom_task_num_descs(task); task 279 drivers/dma/bestcomm/bestcomm.c int task; task 314 drivers/dma/bestcomm/bestcomm.c for (task=0; task<BCOM_MAX_TASKS; task++) task 316 drivers/dma/bestcomm/bestcomm.c out_be16(&bcom_eng->regs->tcr[task], 0); task 317 drivers/dma/bestcomm/bestcomm.c out_8(&bcom_eng->regs->ipr[task], 0); task 319 drivers/dma/bestcomm/bestcomm.c bcom_eng->tdt[task].context = ctx_pa; task 320 drivers/dma/bestcomm/bestcomm.c bcom_eng->tdt[task].var = var_pa; task 321 drivers/dma/bestcomm/bestcomm.c bcom_eng->tdt[task].fdt = fdt_pa; task 345 drivers/dma/bestcomm/bestcomm.c int task; task 348 drivers/dma/bestcomm/bestcomm.c for (task=0; task<BCOM_MAX_TASKS; task++) task 350 drivers/dma/bestcomm/bestcomm.c out_be16(&bcom_eng->regs->tcr[task], 0); task 351 drivers/dma/bestcomm/bestcomm.c out_8(&bcom_eng->regs->ipr[task], 0); task 952 drivers/dma/dma-axi-dmac.c tasklet_kill(&dmac->chan.vchan.task); task 993 drivers/dma/dma-jz4780.c tasklet_kill(&jzdma->chan[i].vchan.task); task 215 drivers/dma/dmatest.c struct task_struct *task; task 920 drivers/dma/dmatest.c ret = kthread_stop(thread->task); task 922 drivers/dma/dmatest.c thread->task->comm, ret); task 924 drivers/dma/dmatest.c put_task_struct(thread->task); task 967 drivers/dma/dmatest.c thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", task 969 drivers/dma/dmatest.c if (IS_ERR(thread->task)) { task 977 drivers/dma/dmatest.c get_task_struct(thread->task); task 1106 drivers/dma/dmatest.c wake_up_process(thread->task); task 978 drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c tasklet_kill(&chan->vc.task); task 915 drivers/dma/dw-edma/dw-edma-core.c tasklet_kill(&chan->vc.task); task 921 drivers/dma/dw-edma/dw-edma-core.c tasklet_kill(&chan->vc.task); task 656 drivers/dma/fsl-edma-common.c tasklet_kill(&chan->vchan.task); task 1214 drivers/dma/fsl-qdma.c tasklet_kill(&chan->vchan.task); task 492 drivers/dma/hsu/hsu.c tasklet_kill(&hsuc->vchan.task); task 619 drivers/dma/idma64.c tasklet_kill(&idma64c->vchan.task); task 1037 drivers/dma/img-mdc-dma.c tasklet_kill(&mchan->vc.task); task 2197 drivers/dma/imx-sdma.c tasklet_kill(&sdmac->vc.task); task 102 drivers/dma/k3dma.c struct tasklet_struct task; task 257 drivers/dma/k3dma.c tasklet_schedule(&d->task); task 438 drivers/dma/k3dma.c tasklet_schedule(&d->task); task 970 drivers/dma/k3dma.c tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d); task 995 drivers/dma/k3dma.c tasklet_kill(&c->vc.task); task 997 drivers/dma/k3dma.c tasklet_kill(&d->task); task 914 drivers/dma/mediatek/mtk-cqdma.c tasklet_kill(&vc->vc.task); task 1026 drivers/dma/mediatek/mtk-hsdma.c tasklet_kill(&vc->vc.task); task 312 drivers/dma/mediatek/mtk-uart-apdma.c tasklet_kill(&c->vc.task); task 463 drivers/dma/mediatek/mtk-uart-apdma.c tasklet_kill(&c->vc.task); task 1017 drivers/dma/owl-dma.c tasklet_kill(&vchan->vc.task); task 414 drivers/dma/pl330.c struct tasklet_struct task; task 1565 drivers/dma/pl330.c tasklet_schedule(&pch->task); task 2052 drivers/dma/pl330.c tasklet_schedule(&pch->task); task 2167 drivers/dma/pl330.c tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); task 2347 drivers/dma/pl330.c tasklet_kill(&pch->task); task 1229 drivers/dma/pxa_dma.c tasklet_kill(&c->vc.task); task 399 drivers/dma/qcom/bam_dma.c struct tasklet_struct task; task 875 drivers/dma/qcom/bam_dma.c tasklet_schedule(&bdev->task); task 1296 drivers/dma/qcom/bam_dma.c tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev); task 1377 drivers/dma/qcom/bam_dma.c tasklet_kill(&bdev->channels[i].vc.task); task 1379 drivers/dma/qcom/bam_dma.c tasklet_kill(&bdev->task); task 1403 drivers/dma/qcom/bam_dma.c tasklet_kill(&bdev->channels[i].vc.task); task 1413 drivers/dma/qcom/bam_dma.c tasklet_kill(&bdev->task); task 260 drivers/dma/qcom/hidma.c tasklet_schedule(&dmadev->task); task 888 drivers/dma/qcom/hidma.c tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); task 936 drivers/dma/qcom/hidma.c tasklet_kill(&dmadev->task); task 72 drivers/dma/qcom/hidma.h struct tasklet_struct task; /* task delivering notifications */ task 132 drivers/dma/qcom/hidma.h struct tasklet_struct task; task 226 drivers/dma/qcom/hidma_ll.c tasklet_schedule(&lldev->task); task 795 drivers/dma/qcom/hidma_ll.c tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); task 816 drivers/dma/qcom/hidma_ll.c tasklet_kill(&lldev->task); task 1140 drivers/dma/s3c24xx-dma.c tasklet_kill(&chan->vc.task); task 121 drivers/dma/sa11x0-dma.c struct tasklet_struct task; task 235 drivers/dma/sa11x0-dma.c tasklet_schedule(&p->dev->task); task 512 drivers/dma/sa11x0-dma.c tasklet_schedule(&d->task); task 787 drivers/dma/sa11x0-dma.c tasklet_schedule(&d->task); task 896 drivers/dma/sa11x0-dma.c tasklet_kill(&c->vc.task); task 931 drivers/dma/sa11x0-dma.c tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d); task 979 drivers/dma/sa11x0-dma.c tasklet_kill(&d->task); task 997 drivers/dma/sa11x0-dma.c tasklet_kill(&d->task); task 1217 drivers/dma/sprd-dma.c tasklet_kill(&c->vc.task); task 734 drivers/dma/st_fdma.c tasklet_kill(&fchan->vchan.task); task 193 drivers/dma/sun6i-dma.c struct tasklet_struct task; task 571 drivers/dma/sun6i-dma.c tasklet_schedule(&sdev->task); task 978 drivers/dma/sun6i-dma.c tasklet_schedule(&sdev->task); task 1039 drivers/dma/sun6i-dma.c tasklet_kill(&sdev->task); task 1050 drivers/dma/sun6i-dma.c tasklet_kill(&vchan->vc.task); task 1346 drivers/dma/sun6i-dma.c tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); task 676 drivers/dma/tegra210-adma.c tasklet_kill(&tdc->vc.task); task 2488 drivers/dma/ti/edma.c tasklet_kill(&echan->vchan.task); task 1451 drivers/dma/ti/omap-dma.c tasklet_kill(&c->vc.task); task 138 drivers/dma/virt-dma.c tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); task 24 drivers/dma/virt-dma.h struct tasklet_struct task; task 105 drivers/dma/virt-dma.h tasklet_schedule(&vc->task); task 131 drivers/dma/virt-dma.h tasklet_schedule(&vc->task); task 212 drivers/dma/virt-dma.h tasklet_kill(&vc->task); task 280 drivers/dma/zx_dma.c u32 i, irq_chan = 0, task = 0; task 296 drivers/dma/zx_dma.c task = 1; task 312 drivers/dma/zx_dma.c if (task) task 141 drivers/firmware/stratix10-svc.c struct task_struct *task; task 810 drivers/firmware/stratix10-svc.c if (!chan->ctrl->task) { task 811 drivers/firmware/stratix10-svc.c chan->ctrl->task = task 816 drivers/firmware/stratix10-svc.c if (IS_ERR(chan->ctrl->task)) { task 822 drivers/firmware/stratix10-svc.c kthread_bind(chan->ctrl->task, cpu); task 823 drivers/firmware/stratix10-svc.c wake_up_process(chan->ctrl->task); task 878 drivers/firmware/stratix10-svc.c if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) { task 880 drivers/firmware/stratix10-svc.c kthread_stop(chan->ctrl->task); task 881 drivers/firmware/stratix10-svc.c chan->ctrl->task = NULL; task 1002 drivers/firmware/stratix10-svc.c controller->task = NULL; task 1058 drivers/firmware/stratix10-svc.c if (ctrl->task) { task 1059 drivers/firmware/stratix10-svc.c kthread_stop(ctrl->task); task 1060 drivers/firmware/stratix10-svc.c ctrl->task = NULL; task 859 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c struct task_struct *task; task 868 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c task = pid_task(file->pid, PIDTYPE_PID); task 870 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c task ? task->comm : "<unknown>"); task 336 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++]; task 338 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->type = type; task 339 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum); task 340 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count; task 351 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr); task 352 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr); task 353 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->size_bytes = smu8_smu->scratch_buffer[i].data_size; task 373 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++]; task 375 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->type = TASK_TYPE_UCODE_LOAD; task 376 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum); task 377 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count; task 388 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr); task 389 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr); task 390 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c task->size_bytes = smu8_smu->driver_buffer[i].data_size; task 93 drivers/gpu/drm/drm_debugfs.c struct task_struct *task; task 96 drivers/gpu/drm/drm_debugfs.c task = pid_task(priv->pid, PIDTYPE_PID); task 97 drivers/gpu/drm/drm_debugfs.c uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID; task 99 drivers/gpu/drm/drm_debugfs.c task ? task->comm : "<unknown>", task 39 drivers/gpu/drm/drm_flip_work.c struct drm_flip_task *task; task 41 drivers/gpu/drm/drm_flip_work.c task = kzalloc(sizeof(*task), flags); task 42 drivers/gpu/drm/drm_flip_work.c if (task) task 43 drivers/gpu/drm/drm_flip_work.c task->data = data; task 45 drivers/gpu/drm/drm_flip_work.c return task; task 58 drivers/gpu/drm/drm_flip_work.c struct drm_flip_task *task) task 63 drivers/gpu/drm/drm_flip_work.c list_add_tail(&task->node, &work->queued); task 78 drivers/gpu/drm/drm_flip_work.c struct drm_flip_task *task; task 80 drivers/gpu/drm/drm_flip_work.c task = drm_flip_work_allocate_task(val, task 82 drivers/gpu/drm/drm_flip_work.c if (task) { task 83 drivers/gpu/drm/drm_flip_work.c drm_flip_work_queue_task(work, task); task 121 drivers/gpu/drm/drm_flip_work.c struct drm_flip_task *task, *tmp; task 132 drivers/gpu/drm/drm_flip_work.c list_for_each_entry_safe(task, tmp, &tasks, node) { task 133 drivers/gpu/drm/drm_flip_work.c work->func(work, task->data); task 134 drivers/gpu/drm/drm_flip_work.c kfree(task); task 143 drivers/gpu/drm/drm_syncobj.c struct task_struct *task; task 344 drivers/gpu/drm/drm_syncobj.c wait.task = current; task 844 drivers/gpu/drm/drm_syncobj.c wake_up_process(wait->task); task 865 drivers/gpu/drm/drm_syncobj.c wake_up_process(wait->task); task 908 drivers/gpu/drm/drm_syncobj.c entries[i].task = current; task 102 drivers/gpu/drm/exynos/exynos_drm_fimc.c struct exynos_drm_ipp_task *task; task 968 drivers/gpu/drm/exynos/exynos_drm_fimc.c if (ctx->task) { task 969 drivers/gpu/drm/exynos/exynos_drm_fimc.c struct exynos_drm_ipp_task *task = ctx->task; task 971 drivers/gpu/drm/exynos/exynos_drm_fimc.c ctx->task = NULL; task 974 drivers/gpu/drm/exynos/exynos_drm_fimc.c exynos_drm_ipp_task_done(task, 0); task 1084 drivers/gpu/drm/exynos/exynos_drm_fimc.c struct exynos_drm_ipp_task *task) task 1090 drivers/gpu/drm/exynos/exynos_drm_fimc.c ctx->task = task; task 1092 drivers/gpu/drm/exynos/exynos_drm_fimc.c fimc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier); task 1093 drivers/gpu/drm/exynos/exynos_drm_fimc.c fimc_src_set_size(ctx, &task->src); task 1095 drivers/gpu/drm/exynos/exynos_drm_fimc.c fimc_src_set_addr(ctx, &task->src); task 1096 drivers/gpu/drm/exynos/exynos_drm_fimc.c fimc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier); task 1097 drivers/gpu/drm/exynos/exynos_drm_fimc.c fimc_dst_set_transf(ctx, task->transform.rotation); task 1098 drivers/gpu/drm/exynos/exynos_drm_fimc.c fimc_dst_set_size(ctx, &task->dst); task 1099 drivers/gpu/drm/exynos/exynos_drm_fimc.c fimc_dst_set_addr(ctx, &task->dst); task 1100 drivers/gpu/drm/exynos/exynos_drm_fimc.c fimc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect); task 1107 drivers/gpu/drm/exynos/exynos_drm_fimc.c struct exynos_drm_ipp_task *task) task 1114 drivers/gpu/drm/exynos/exynos_drm_fimc.c if (ctx->task) { task 1115 drivers/gpu/drm/exynos/exynos_drm_fimc.c struct exynos_drm_ipp_task *task = ctx->task; task 1117 drivers/gpu/drm/exynos/exynos_drm_fimc.c ctx->task = NULL; task 1120 drivers/gpu/drm/exynos/exynos_drm_fimc.c exynos_drm_ipp_task_done(task, -EIO); task 102 drivers/gpu/drm/exynos/exynos_drm_gsc.c struct exynos_drm_ipp_task *task; task 1052 drivers/gpu/drm/exynos/exynos_drm_gsc.c if (ctx->task) { task 1053 drivers/gpu/drm/exynos/exynos_drm_gsc.c struct exynos_drm_ipp_task *task = ctx->task; task 1055 drivers/gpu/drm/exynos/exynos_drm_gsc.c ctx->task = NULL; task 1058 drivers/gpu/drm/exynos/exynos_drm_gsc.c exynos_drm_ipp_task_done(task, err); task 1115 drivers/gpu/drm/exynos/exynos_drm_gsc.c struct exynos_drm_ipp_task *task) task 1121 drivers/gpu/drm/exynos/exynos_drm_gsc.c ctx->task = task; task 1126 drivers/gpu/drm/exynos/exynos_drm_gsc.c ctx->task = NULL; task 1130 drivers/gpu/drm/exynos/exynos_drm_gsc.c gsc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier); task 1131 drivers/gpu/drm/exynos/exynos_drm_gsc.c gsc_src_set_transf(ctx, task->transform.rotation); task 1132 drivers/gpu/drm/exynos/exynos_drm_gsc.c gsc_src_set_size(ctx, &task->src); task 1133 drivers/gpu/drm/exynos/exynos_drm_gsc.c gsc_src_set_addr(ctx, 0, &task->src); task 1134 drivers/gpu/drm/exynos/exynos_drm_gsc.c gsc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier); task 1135 drivers/gpu/drm/exynos/exynos_drm_gsc.c gsc_dst_set_size(ctx, &task->dst); task 1136 drivers/gpu/drm/exynos/exynos_drm_gsc.c gsc_dst_set_addr(ctx, 0, &task->dst); task 1137 drivers/gpu/drm/exynos/exynos_drm_gsc.c gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect); task 1144 drivers/gpu/drm/exynos/exynos_drm_gsc.c struct exynos_drm_ipp_task *task) task 1150 drivers/gpu/drm/exynos/exynos_drm_gsc.c if (ctx->task) { task 1151 drivers/gpu/drm/exynos/exynos_drm_gsc.c struct exynos_drm_ipp_task *task = ctx->task; task 1153 drivers/gpu/drm/exynos/exynos_drm_gsc.c ctx->task = NULL; task 1156 drivers/gpu/drm/exynos/exynos_drm_gsc.c exynos_drm_ipp_task_done(task, -EIO); task 85 drivers/gpu/drm/exynos/exynos_drm_ipp.c WARN_ON(ipp->task); task 259 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_task *task; task 261 drivers/gpu/drm/exynos/exynos_drm_ipp.c task = kzalloc(sizeof(*task), GFP_KERNEL); task 262 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (!task) task 265 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->dev = ipp->dev; task 266 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->ipp = ipp; task 269 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->src.rect.w = task->dst.rect.w = UINT_MAX; task 270 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->src.rect.h = task->dst.rect.h = UINT_MAX; task 271 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->transform.rotation = DRM_MODE_ROTATE_0; task 273 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task); task 275 drivers/gpu/drm/exynos/exynos_drm_ipp.c return task; task 312 drivers/gpu/drm/exynos/exynos_drm_ipp.c static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task, task 332 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (copy_from_user((void *)task + map[i].offset, params, task 340 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, task 342 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 394 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_task *task) task 396 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task); task 398 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_release_buf(&task->src); task 399 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_release_buf(&task->dst); task 400 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (task->event) task 401 drivers/gpu/drm/exynos/exynos_drm_ipp.c drm_event_cancel_free(ipp->drm_dev, &task->event->base); task 402 drivers/gpu/drm/exynos/exynos_drm_ipp.c kfree(task); task 547 drivers/gpu/drm/exynos/exynos_drm_ipp.c static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task, task 556 drivers/gpu/drm/exynos/exynos_drm_ipp.c fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier, task 560 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, task 562 drivers/gpu/drm/exynos/exynos_drm_ipp.c task, buf == src ? "src" : "dst"); task 601 drivers/gpu/drm/exynos/exynos_drm_ipp.c static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) task 603 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp *ipp = task->ipp; task 604 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; task 605 drivers/gpu/drm/exynos/exynos_drm_ipp.c unsigned int rotation = task->transform.rotation; task 611 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task); task 626 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, task 628 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 644 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n", task 645 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 649 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap); task 653 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap); task 658 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 663 drivers/gpu/drm/exynos/exynos_drm_ipp.c static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task, task 666 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; task 669 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n", task 670 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 674 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, task 676 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 681 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, task 683 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 687 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n", task 688 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 694 drivers/gpu/drm/exynos/exynos_drm_ipp.c static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task, task 708 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base, task 713 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event = e; task 720 drivers/gpu/drm/exynos/exynos_drm_ipp.c static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task) task 725 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event->event.tv_sec = now.tv_sec; task 726 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; task 727 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event->event.sequence = atomic_inc_return(&task->ipp->sequence); task 729 drivers/gpu/drm/exynos/exynos_drm_ipp.c drm_send_event(task->ipp->drm_dev, &task->event->base); task 732 drivers/gpu/drm/exynos/exynos_drm_ipp.c static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task) task 734 drivers/gpu/drm/exynos/exynos_drm_ipp.c int ret = task->ret; task 736 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (ret == 0 && task->event) { task 737 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_event_send(task); task 739 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event = NULL; task 742 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_free(task->ipp, task); task 748 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_task *task = container_of(work, task 751 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_cleanup(task); task 761 drivers/gpu/drm/exynos/exynos_drm_ipp.c void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret) task 763 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp *ipp = task->ipp; task 766 drivers/gpu/drm/exynos/exynos_drm_ipp.c DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n", task 767 drivers/gpu/drm/exynos/exynos_drm_ipp.c ipp->id, task, ret); task 770 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (ipp->task == task) task 771 drivers/gpu/drm/exynos/exynos_drm_ipp.c ipp->task = NULL; task 772 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->flags |= DRM_EXYNOS_IPP_TASK_DONE; task 773 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->ret = ret; task 779 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) { task 780 drivers/gpu/drm/exynos/exynos_drm_ipp.c INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work); task 781 drivers/gpu/drm/exynos/exynos_drm_ipp.c schedule_work(&task->cleanup_work); task 787 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_task *task; task 796 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (ipp->task || list_empty(&ipp->todo_list)) { task 801 drivers/gpu/drm/exynos/exynos_drm_ipp.c task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task, task 803 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_del_init(&task->head); task 804 drivers/gpu/drm/exynos/exynos_drm_ipp.c ipp->task = task; task 810 drivers/gpu/drm/exynos/exynos_drm_ipp.c task); task 812 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = ipp->funcs->commit(ipp, task); task 814 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_done(task, ret); task 818 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_task *task) task 823 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_add(&task->head, &ipp->todo_list); task 830 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_task *task) task 835 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) { task 837 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_cleanup(task); task 838 drivers/gpu/drm/exynos/exynos_drm_ipp.c } else if (ipp->task != task) { task 840 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_del_init(&task->head); task 841 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_cleanup(task); task 847 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC; task 850 drivers/gpu/drm/exynos/exynos_drm_ipp.c ipp->funcs->abort(ipp, task); task 875 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_task *task; task 890 drivers/gpu/drm/exynos/exynos_drm_ipp.c task = exynos_drm_ipp_task_alloc(ipp); task 891 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (!task) task 894 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = exynos_drm_ipp_task_set(task, arg); task 898 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = exynos_drm_ipp_task_check(task); task 902 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = exynos_drm_ipp_task_setup_buffers(task, file_priv); task 907 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = exynos_drm_ipp_event_create(task, file_priv, task 920 drivers/gpu/drm/exynos/exynos_drm_ipp.c ipp->id, task); task 922 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC; task 923 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_schedule_task(task->ipp, task); task 927 drivers/gpu/drm/exynos/exynos_drm_ipp.c ipp->id, task); task 928 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_schedule_task(ipp, task); task 930 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->flags & DRM_EXYNOS_IPP_TASK_DONE); task 932 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_abort(ipp, task); task 934 drivers/gpu/drm/exynos/exynos_drm_ipp.c ret = exynos_drm_ipp_task_cleanup(task); task 938 drivers/gpu/drm/exynos/exynos_drm_ipp.c exynos_drm_ipp_task_free(ipp, task); task 30 drivers/gpu/drm/exynos/exynos_drm_ipp.h struct exynos_drm_ipp_task *task); task 44 drivers/gpu/drm/exynos/exynos_drm_ipp.h struct exynos_drm_ipp_task *task); task 64 drivers/gpu/drm/exynos/exynos_drm_ipp.h struct exynos_drm_ipp_task *task; task 134 drivers/gpu/drm/exynos/exynos_drm_ipp.h void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret); task 65 drivers/gpu/drm/exynos/exynos_drm_rotator.c struct exynos_drm_ipp_task *task; task 106 drivers/gpu/drm/exynos/exynos_drm_rotator.c if (rot->task) { task 107 drivers/gpu/drm/exynos/exynos_drm_rotator.c struct exynos_drm_ipp_task *task = rot->task; task 109 drivers/gpu/drm/exynos/exynos_drm_rotator.c rot->task = NULL; task 112 drivers/gpu/drm/exynos/exynos_drm_rotator.c exynos_drm_ipp_task_done(task, task 218 drivers/gpu/drm/exynos/exynos_drm_rotator.c struct exynos_drm_ipp_task *task) task 224 drivers/gpu/drm/exynos/exynos_drm_rotator.c rot->task = task; task 226 drivers/gpu/drm/exynos/exynos_drm_rotator.c rotator_src_set_fmt(rot, task->src.buf.fourcc); task 227 drivers/gpu/drm/exynos/exynos_drm_rotator.c rotator_src_set_buf(rot, &task->src); task 228 drivers/gpu/drm/exynos/exynos_drm_rotator.c rotator_dst_set_transf(rot, task->transform.rotation); task 229 drivers/gpu/drm/exynos/exynos_drm_rotator.c rotator_dst_set_buf(rot, &task->dst); task 46 drivers/gpu/drm/exynos/exynos_drm_scaler.c struct exynos_drm_ipp_task *task; task 357 drivers/gpu/drm/exynos/exynos_drm_scaler.c struct exynos_drm_ipp_task *task) task 362 drivers/gpu/drm/exynos/exynos_drm_scaler.c struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect; task 363 drivers/gpu/drm/exynos/exynos_drm_scaler.c struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; task 366 drivers/gpu/drm/exynos/exynos_drm_scaler.c src_fmt = scaler_get_format(task->src.buf.fourcc); task 367 drivers/gpu/drm/exynos/exynos_drm_scaler.c dst_fmt = scaler_get_format(task->dst.buf.fourcc); task 375 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler->task = task; task 378 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler, src_fmt->internal_fmt, task->src.buf.modifier != 0); task 379 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler_set_src_base(scaler, &task->src); task 380 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler_set_src_span(scaler, &task->src); task 385 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler_set_dst_base(scaler, &task->dst); task 386 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler_set_dst_span(scaler, &task->dst); task 390 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler_set_hv_ratio(scaler, task->transform.rotation, src_pos, dst_pos); task 391 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler_set_rotation(scaler, task->transform.rotation); task 393 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler_set_csc(scaler, task->src.format); task 434 drivers/gpu/drm/exynos/exynos_drm_scaler.c if (scaler->task) { task 435 drivers/gpu/drm/exynos/exynos_drm_scaler.c struct exynos_drm_ipp_task *task = scaler->task; task 437 drivers/gpu/drm/exynos/exynos_drm_scaler.c scaler->task = NULL; task 440 drivers/gpu/drm/exynos/exynos_drm_scaler.c exynos_drm_ipp_task_done(task, scaler_task_done(val)); task 850 drivers/gpu/drm/i915/gem/i915_gem_context.c void (*task)(void *data); task 858 drivers/gpu/drm/i915/gem/i915_gem_context.c if (cb->task) task 859 drivers/gpu/drm/i915/gem/i915_gem_context.c cb->task(cb->data); task 870 drivers/gpu/drm/i915/gem/i915_gem_context.c void (*task)(void *data), task 880 drivers/gpu/drm/i915/gem/i915_gem_context.c GEM_BUG_ON(!task); task 926 drivers/gpu/drm/i915/gem/i915_gem_context.c cb->task = err ? NULL : task; /* caller needs to unwind instead */ task 425 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct task_struct *task; task 497 drivers/gpu/drm/i915/gem/i915_gem_userptr.c (work->task, mm, task 535 drivers/gpu/drm/i915/gem/i915_gem_userptr.c put_task_struct(work->task); task 571 drivers/gpu/drm/i915/gem/i915_gem_userptr.c work->task = current; task 572 drivers/gpu/drm/i915/gem/i915_gem_userptr.c get_task_struct(work->task); task 678 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct task_struct *task; task 848 drivers/gpu/drm/i915/gt/selftest_hangcheck.c threads[tmp].task = tsk; task 946 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (!threads[tmp].task) task 949 drivers/gpu/drm/i915/gt/selftest_hangcheck.c ret = kthread_stop(threads[tmp].task); task 956 drivers/gpu/drm/i915/gt/selftest_hangcheck.c put_task_struct(threads[tmp].task); task 334 drivers/gpu/drm/i915/i915_debugfs.c struct task_struct *task; task 342 drivers/gpu/drm/i915/i915_debugfs.c task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID); task 344 drivers/gpu/drm/i915/i915_debugfs.c task ? task->comm : "<unknown>"); task 1585 drivers/gpu/drm/i915/i915_debugfs.c struct task_struct *task; task 1587 drivers/gpu/drm/i915/i915_debugfs.c task = get_pid_task(ctx->pid, PIDTYPE_PID); task 1588 drivers/gpu/drm/i915/i915_debugfs.c if (task) { task 1590 drivers/gpu/drm/i915/i915_debugfs.c task->comm, task->pid); task 1591 drivers/gpu/drm/i915/i915_debugfs.c put_task_struct(task); task 1255 drivers/gpu/drm/i915/i915_gpu_error.c struct task_struct *task; task 1258 drivers/gpu/drm/i915/i915_gpu_error.c task = pid_task(ctx->pid, PIDTYPE_PID); task 1259 drivers/gpu/drm/i915/i915_gpu_error.c if (task) { task 1260 drivers/gpu/drm/i915/i915_gpu_error.c strcpy(e->comm, task->comm); task 1261 drivers/gpu/drm/i915/i915_gpu_error.c e->pid = task->pid; task 95 drivers/gpu/drm/lima/lima_drv.c struct lima_sched_task *task; task 121 drivers/gpu/drm/lima/lima_drv.c task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL); task 122 drivers/gpu/drm/lima/lima_drv.c if (!task) { task 127 drivers/gpu/drm/lima/lima_drv.c task->frame = task + 1; task 128 drivers/gpu/drm/lima/lima_drv.c if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) { task 133 drivers/gpu/drm/lima/lima_drv.c err = pipe->task_validate(pipe, task); task 147 drivers/gpu/drm/lima/lima_drv.c submit.task = task; task 159 drivers/gpu/drm/lima/lima_drv.c kmem_cache_free(pipe->task_slab, task); task 36 drivers/gpu/drm/lima/lima_drv.h struct lima_sched_task *task; task 133 drivers/gpu/drm/lima/lima_gem.c static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, task 148 drivers/gpu/drm/lima/lima_gem.c return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write); task 221 drivers/gpu/drm/lima/lima_gem.c err = drm_gem_fence_array_add(&submit->task->deps, fence); task 276 drivers/gpu/drm/lima/lima_gem.c submit->task, submit->ctx->context + submit->pipe, task 287 drivers/gpu/drm/lima/lima_gem.c submit->task, bos[i], task 295 drivers/gpu/drm/lima/lima_gem.c submit->ctx->context + submit->pipe, submit->task); task 319 drivers/gpu/drm/lima/lima_gem.c lima_sched_task_fini(submit->task); task 92 drivers/gpu/drm/lima/lima_gp.c struct lima_sched_task *task) task 94 drivers/gpu/drm/lima/lima_gp.c struct drm_lima_gp_frame *frame = task->frame; task 116 drivers/gpu/drm/lima/lima_gp.c struct lima_sched_task *task) task 119 drivers/gpu/drm/lima/lima_gp.c struct drm_lima_gp_frame *frame = task->frame; task 54 drivers/gpu/drm/lima/lima_pp.c if (atomic_dec_and_test(&pipe->task)) task 97 drivers/gpu/drm/lima/lima_pp.c if (atomic_dec_and_test(&pipe->task)) task 279 drivers/gpu/drm/lima/lima_pp.c struct lima_sched_task *task) task 284 drivers/gpu/drm/lima/lima_pp.c struct drm_lima_m450_pp_frame *f = task->frame; task 291 drivers/gpu/drm/lima/lima_pp.c struct drm_lima_m400_pp_frame *f = task->frame; task 303 drivers/gpu/drm/lima/lima_pp.c struct lima_sched_task *task) task 306 drivers/gpu/drm/lima/lima_pp.c struct drm_lima_m450_pp_frame *frame = task->frame; task 312 drivers/gpu/drm/lima/lima_pp.c atomic_set(&pipe->task, frame->num_pp); task 338 drivers/gpu/drm/lima/lima_pp.c struct drm_lima_m400_pp_frame *frame = task->frame; task 341 drivers/gpu/drm/lima/lima_pp.c atomic_set(&pipe->task, frame->num_pp); task 388 drivers/gpu/drm/lima/lima_pp.c if (atomic_dec_and_test(&pipe->task)) task 108 drivers/gpu/drm/lima/lima_sched.c int lima_sched_task_init(struct lima_sched_task *task, task 115 drivers/gpu/drm/lima/lima_sched.c task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL); task 116 drivers/gpu/drm/lima/lima_sched.c if (!task->bos) task 122 drivers/gpu/drm/lima/lima_sched.c err = drm_sched_job_init(&task->base, &context->base, vm); task 124 drivers/gpu/drm/lima/lima_sched.c kfree(task->bos); task 128 drivers/gpu/drm/lima/lima_sched.c task->num_bos = num_bos; task 129 drivers/gpu/drm/lima/lima_sched.c task->vm = lima_vm_get(vm); task 131 drivers/gpu/drm/lima/lima_sched.c xa_init_flags(&task->deps, XA_FLAGS_ALLOC); task 136 drivers/gpu/drm/lima/lima_sched.c void lima_sched_task_fini(struct lima_sched_task *task) task 142 drivers/gpu/drm/lima/lima_sched.c drm_sched_job_cleanup(&task->base); task 144 drivers/gpu/drm/lima/lima_sched.c xa_for_each(&task->deps, index, fence) { task 147 drivers/gpu/drm/lima/lima_sched.c xa_destroy(&task->deps); task 149 drivers/gpu/drm/lima/lima_sched.c if (task->bos) { task 150 drivers/gpu/drm/lima/lima_sched.c for (i = 0; i < task->num_bos; i++) task 151 drivers/gpu/drm/lima/lima_sched.c drm_gem_object_put_unlocked(&task->bos[i]->gem); task 152 drivers/gpu/drm/lima/lima_sched.c kfree(task->bos); task 155 drivers/gpu/drm/lima/lima_sched.c lima_vm_put(task->vm); task 174 drivers/gpu/drm/lima/lima_sched.c struct lima_sched_task *task) task 176 drivers/gpu/drm/lima/lima_sched.c struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); task 178 drivers/gpu/drm/lima/lima_sched.c drm_sched_entity_push_job(&task->base, &context->base); task 185 drivers/gpu/drm/lima/lima_sched.c struct lima_sched_task *task = to_lima_task(job); task 187 drivers/gpu/drm/lima/lima_sched.c if (!xa_empty(&task->deps)) task 188 drivers/gpu/drm/lima/lima_sched.c return xa_erase(&task->deps, task->last_dep++); task 195 drivers/gpu/drm/lima/lima_sched.c struct lima_sched_task *task = to_lima_task(job); task 209 drivers/gpu/drm/lima/lima_sched.c task->fence = &fence->base; task 214 drivers/gpu/drm/lima/lima_sched.c ret = dma_fence_get(task->fence); task 216 drivers/gpu/drm/lima/lima_sched.c pipe->current_task = task; task 236 drivers/gpu/drm/lima/lima_sched.c if (task->vm != pipe->current_vm) { task 237 drivers/gpu/drm/lima/lima_sched.c vm = lima_vm_get(task->vm); task 239 drivers/gpu/drm/lima/lima_sched.c pipe->current_vm = task->vm; task 253 drivers/gpu/drm/lima/lima_sched.c pipe->task_run(pipe, task); task 255 drivers/gpu/drm/lima/lima_sched.c return task->fence; task 259 drivers/gpu/drm/lima/lima_sched.c struct lima_sched_task *task) task 261 drivers/gpu/drm/lima/lima_sched.c drm_sched_stop(&pipe->base, &task->base); task 263 drivers/gpu/drm/lima/lima_sched.c if (task) task 264 drivers/gpu/drm/lima/lima_sched.c drm_sched_increase_karma(&task->base); task 290 drivers/gpu/drm/lima/lima_sched.c struct lima_sched_task *task = to_lima_task(job); task 294 drivers/gpu/drm/lima/lima_sched.c lima_sched_handle_error_task(pipe, task); task 299 drivers/gpu/drm/lima/lima_sched.c struct lima_sched_task *task = to_lima_task(job); task 301 drivers/gpu/drm/lima/lima_sched.c struct lima_vm *vm = task->vm; task 302 drivers/gpu/drm/lima/lima_sched.c struct lima_bo **bos = task->bos; task 305 drivers/gpu/drm/lima/lima_sched.c dma_fence_put(task->fence); task 307 drivers/gpu/drm/lima/lima_sched.c for (i = 0; i < task->num_bos; i++) task 310 drivers/gpu/drm/lima/lima_sched.c lima_sched_task_fini(task); task 311 drivers/gpu/drm/lima/lima_sched.c kmem_cache_free(pipe->task_slab, task); task 325 drivers/gpu/drm/lima/lima_sched.c struct lima_sched_task *task = pipe->current_task; task 327 drivers/gpu/drm/lima/lima_sched.c lima_sched_handle_error_task(pipe, task); task 354 drivers/gpu/drm/lima/lima_sched.c struct lima_sched_task *task = pipe->current_task; task 357 drivers/gpu/drm/lima/lima_sched.c dma_fence_signal(task->fence); task 61 drivers/gpu/drm/lima/lima_sched.h atomic_t task; task 66 drivers/gpu/drm/lima/lima_sched.h int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task); task 67 drivers/gpu/drm/lima/lima_sched.h void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task); task 75 drivers/gpu/drm/lima/lima_sched.h int lima_sched_task_init(struct lima_sched_task *task, task 79 drivers/gpu/drm/lima/lima_sched.h void lima_sched_task_fini(struct lima_sched_task *task); task 87 drivers/gpu/drm/lima/lima_sched.h struct lima_sched_task *task); task 435 drivers/gpu/drm/msm/msm_gpu.c struct task_struct *task; task 441 drivers/gpu/drm/msm/msm_gpu.c task = get_pid_task(submit->pid, PIDTYPE_PID); task 442 drivers/gpu/drm/msm/msm_gpu.c if (task) { task 443 drivers/gpu/drm/msm/msm_gpu.c comm = kstrdup(task->comm, GFP_KERNEL); task 444 drivers/gpu/drm/msm/msm_gpu.c cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); task 445 drivers/gpu/drm/msm/msm_gpu.c put_task_struct(task); task 347 drivers/gpu/drm/msm/msm_rd.c struct task_struct *task; task 370 drivers/gpu/drm/msm/msm_rd.c task = pid_task(submit->pid, PIDTYPE_PID); task 371 drivers/gpu/drm/msm/msm_rd.c if (task) { task 373 drivers/gpu/drm/msm/msm_rd.c TASK_COMM_LEN, task->comm, task 1067 drivers/gpu/drm/radeon/radeon_fence.c struct task_struct *task; task 1076 drivers/gpu/drm/radeon/radeon_fence.c wake_up_process(wait->task); task 1086 drivers/gpu/drm/radeon/radeon_fence.c cb.task = current; task 156 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c struct task_struct *task; task 165 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c wake_up_process(wait->task); task 197 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c cb.task = current; task 139 drivers/gpu/ipu-v3/ipu-ic.c enum ipu_ic_task task; task 161 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic task[IC_NUM_TASKS]; task 335 drivers/gpu/ipu-v3/ipu-ic.c if (ic->task == IC_TASK_ENCODER) task 659 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task) task 665 drivers/gpu/ipu-v3/ipu-ic.c if (task >= IC_NUM_TASKS) task 668 drivers/gpu/ipu-v3/ipu-ic.c ic = &priv->task[task]; task 722 drivers/gpu/ipu-v3/ipu-ic.c priv->task[i].task = i; task 723 drivers/gpu/ipu-v3/ipu-ic.c priv->task[i].priv = priv; task 724 drivers/gpu/ipu-v3/ipu-ic.c priv->task[i].reg = &ic_task_reg[i]; task 725 drivers/gpu/ipu-v3/ipu-ic.c priv->task[i].bit = &ic_task_bit[i]; task 139 drivers/iio/adc/ina2xx-adc.c struct task_struct *task; task 832 drivers/iio/adc/ina2xx-adc.c struct task_struct *task; task 842 drivers/iio/adc/ina2xx-adc.c task = kthread_create(ina2xx_capture_thread, (void *)indio_dev, task 845 drivers/iio/adc/ina2xx-adc.c if (IS_ERR(task)) task 846 drivers/iio/adc/ina2xx-adc.c return PTR_ERR(task); task 848 drivers/iio/adc/ina2xx-adc.c get_task_struct(task); task 849 drivers/iio/adc/ina2xx-adc.c wake_up_process(task); task 850 drivers/iio/adc/ina2xx-adc.c chip->task = task; task 859 drivers/iio/adc/ina2xx-adc.c if (chip->task) { task 860 drivers/iio/adc/ina2xx-adc.c kthread_stop(chip->task); task 861 drivers/iio/adc/ina2xx-adc.c put_task_struct(chip->task); task 862 drivers/iio/adc/ina2xx-adc.c chip->task = NULL; task 35 drivers/iio/trigger/iio-trig-loop.c struct task_struct *task; task 60 drivers/iio/trigger/iio-trig-loop.c loop_trig->task = kthread_run(iio_loop_thread, task 62 drivers/iio/trigger/iio-trig-loop.c if (IS_ERR(loop_trig->task)) { task 65 drivers/iio/trigger/iio-trig-loop.c return PTR_ERR(loop_trig->task); task 68 drivers/iio/trigger/iio-trig-loop.c kthread_stop(loop_trig->task); task 156 drivers/infiniband/core/counters.c if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task)) task 259 drivers/infiniband/core/counters.c rdma_restrack_attach_task(&counter->res, qp->res.task); task 488 drivers/infiniband/core/counters.c if (counter->res.task != qp->res.task) { task 412 drivers/infiniband/core/nldev.c task_pid_vnr(res->task))) task 87 drivers/infiniband/core/restrack.c get_task_comm(buf, e->task); task 187 drivers/infiniband/core/restrack.c if (res->task) task 188 drivers/infiniband/core/restrack.c put_task_struct(res->task); task 190 drivers/infiniband/core/restrack.c res->task = current; task 200 drivers/infiniband/core/restrack.c struct task_struct *task) task 202 drivers/infiniband/core/restrack.c if (res->task) task 203 drivers/infiniband/core/restrack.c put_task_struct(res->task); task 204 drivers/infiniband/core/restrack.c get_task_struct(task); task 205 drivers/infiniband/core/restrack.c res->task = task; task 249 drivers/infiniband/core/restrack.c res->task = NULL; task 264 drivers/infiniband/core/restrack.c res->task = NULL; task 266 drivers/infiniband/core/restrack.c if (!res->task) task 343 drivers/infiniband/core/restrack.c if (res->task) { task 344 drivers/infiniband/core/restrack.c put_task_struct(res->task); task 345 drivers/infiniband/core/restrack.c res->task = NULL; task 362 drivers/infiniband/core/restrack.c return task_pid_vnr(res->task); task 29 drivers/infiniband/core/restrack.h struct task_struct *task); task 145 drivers/infiniband/sw/rxe/rxe_comp.c rxe_run_task(&qp->comp.task, 1); task 159 drivers/infiniband/sw/rxe/rxe_comp.c rxe_run_task(&qp->comp.task, must_sched); task 332 drivers/infiniband/sw/rxe/rxe_comp.c rxe_run_task(&qp->req.task, 0); task 466 drivers/infiniband/sw/rxe/rxe_comp.c rxe_run_task(&qp->req.task, 0); task 482 drivers/infiniband/sw/rxe/rxe_comp.c rxe_run_task(&qp->req.task, 0); task 528 drivers/infiniband/sw/rxe/rxe_comp.c rxe_run_task(&qp->req.task, 1); task 647 drivers/infiniband/sw/rxe/rxe_comp.c rxe_run_task(&qp->req.task, 1); task 728 drivers/infiniband/sw/rxe/rxe_comp.c rxe_run_task(&qp->req.task, 0); task 280 drivers/infiniband/sw/rxe/rxe_loc.h rxe_run_task(&qp->comp.task, 1); task 419 drivers/infiniband/sw/rxe/rxe_net.c rxe_run_task(&qp->req.task, 1); task 274 drivers/infiniband/sw/rxe/rxe_qp.c rxe_init_task(rxe, &qp->req.task, qp, task 276 drivers/infiniband/sw/rxe/rxe_qp.c rxe_init_task(rxe, &qp->comp.task, qp, task 325 drivers/infiniband/sw/rxe/rxe_qp.c rxe_init_task(rxe, &qp->resp.task, qp, task 503 drivers/infiniband/sw/rxe/rxe_qp.c rxe_disable_task(&qp->resp.task); task 508 drivers/infiniband/sw/rxe/rxe_qp.c rxe_disable_task(&qp->comp.task); task 509 drivers/infiniband/sw/rxe/rxe_qp.c rxe_disable_task(&qp->req.task); task 519 drivers/infiniband/sw/rxe/rxe_qp.c __rxe_do_task(&qp->resp.task); task 522 drivers/infiniband/sw/rxe/rxe_qp.c __rxe_do_task(&qp->comp.task); task 523 drivers/infiniband/sw/rxe/rxe_qp.c __rxe_do_task(&qp->req.task); task 546 drivers/infiniband/sw/rxe/rxe_qp.c rxe_enable_task(&qp->resp.task); task 550 drivers/infiniband/sw/rxe/rxe_qp.c rxe_enable_task(&qp->comp.task); task 552 drivers/infiniband/sw/rxe/rxe_qp.c rxe_enable_task(&qp->req.task); task 563 drivers/infiniband/sw/rxe/rxe_qp.c rxe_run_task(&qp->comp.task, 1); task 565 drivers/infiniband/sw/rxe/rxe_qp.c __rxe_do_task(&qp->comp.task); task 566 drivers/infiniband/sw/rxe/rxe_qp.c rxe_run_task(&qp->req.task, 1); task 579 drivers/infiniband/sw/rxe/rxe_qp.c rxe_run_task(&qp->resp.task, 1); task 582 drivers/infiniband/sw/rxe/rxe_qp.c rxe_run_task(&qp->comp.task, 1); task 584 drivers/infiniband/sw/rxe/rxe_qp.c __rxe_do_task(&qp->comp.task); task 585 drivers/infiniband/sw/rxe/rxe_qp.c rxe_run_task(&qp->req.task, 1); task 786 drivers/infiniband/sw/rxe/rxe_qp.c rxe_cleanup_task(&qp->resp.task); task 793 drivers/infiniband/sw/rxe/rxe_qp.c rxe_cleanup_task(&qp->req.task); task 794 drivers/infiniband/sw/rxe/rxe_qp.c rxe_cleanup_task(&qp->comp.task); task 797 drivers/infiniband/sw/rxe/rxe_qp.c __rxe_do_task(&qp->req.task); task 799 drivers/infiniband/sw/rxe/rxe_qp.c __rxe_do_task(&qp->comp.task); task 800 drivers/infiniband/sw/rxe/rxe_qp.c __rxe_do_task(&qp->req.task); task 129 drivers/infiniband/sw/rxe/rxe_req.c rxe_run_task(&qp->req.task, 1); task 660 drivers/infiniband/sw/rxe/rxe_req.c rxe_run_task(&qp->comp.task, 1); task 710 drivers/infiniband/sw/rxe/rxe_req.c __rxe_do_task(&qp->comp.task); task 745 drivers/infiniband/sw/rxe/rxe_req.c rxe_run_task(&qp->req.task, 1); task 759 drivers/infiniband/sw/rxe/rxe_req.c __rxe_do_task(&qp->comp.task); task 117 drivers/infiniband/sw/rxe/rxe_resp.c rxe_run_task(&qp->resp.task, must_sched); task 40 drivers/infiniband/sw/rxe/rxe_task.c int __rxe_do_task(struct rxe_task *task) task 45 drivers/infiniband/sw/rxe/rxe_task.c while ((ret = task->func(task->arg)) == 0) task 48 drivers/infiniband/sw/rxe/rxe_task.c task->ret = ret; task 63 drivers/infiniband/sw/rxe/rxe_task.c struct rxe_task *task = (struct rxe_task *)data; task 65 drivers/infiniband/sw/rxe/rxe_task.c spin_lock_irqsave(&task->state_lock, flags); task 66 drivers/infiniband/sw/rxe/rxe_task.c switch (task->state) { task 68 drivers/infiniband/sw/rxe/rxe_task.c task->state = TASK_STATE_BUSY; task 69 drivers/infiniband/sw/rxe/rxe_task.c spin_unlock_irqrestore(&task->state_lock, flags); task 73 drivers/infiniband/sw/rxe/rxe_task.c task->state = TASK_STATE_ARMED; task 76 drivers/infiniband/sw/rxe/rxe_task.c spin_unlock_irqrestore(&task->state_lock, flags); task 80 drivers/infiniband/sw/rxe/rxe_task.c spin_unlock_irqrestore(&task->state_lock, flags); task 81 drivers/infiniband/sw/rxe/rxe_task.c pr_warn("%s failed with bad state %d\n", __func__, task->state); task 87 drivers/infiniband/sw/rxe/rxe_task.c ret = task->func(task->arg); task 89 drivers/infiniband/sw/rxe/rxe_task.c spin_lock_irqsave(&task->state_lock, flags); task 90 drivers/infiniband/sw/rxe/rxe_task.c switch (task->state) { task 93 drivers/infiniband/sw/rxe/rxe_task.c task->state = TASK_STATE_START; task 103 drivers/infiniband/sw/rxe/rxe_task.c task->state = TASK_STATE_BUSY; task 109 drivers/infiniband/sw/rxe/rxe_task.c task->state); task 111 drivers/infiniband/sw/rxe/rxe_task.c spin_unlock_irqrestore(&task->state_lock, flags); task 114 drivers/infiniband/sw/rxe/rxe_task.c task->ret = ret; task 117 drivers/infiniband/sw/rxe/rxe_task.c int rxe_init_task(void *obj, struct rxe_task *task, task 120 drivers/infiniband/sw/rxe/rxe_task.c task->obj = obj; task 121 drivers/infiniband/sw/rxe/rxe_task.c task->arg = arg; task 122 drivers/infiniband/sw/rxe/rxe_task.c task->func = func; task 123 drivers/infiniband/sw/rxe/rxe_task.c snprintf(task->name, sizeof(task->name), "%s", name); task 124 drivers/infiniband/sw/rxe/rxe_task.c task->destroyed = false; task 126 drivers/infiniband/sw/rxe/rxe_task.c tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task); task 128 drivers/infiniband/sw/rxe/rxe_task.c task->state = TASK_STATE_START; task 129 drivers/infiniband/sw/rxe/rxe_task.c spin_lock_init(&task->state_lock); task 134 drivers/infiniband/sw/rxe/rxe_task.c void rxe_cleanup_task(struct rxe_task *task) task 143 drivers/infiniband/sw/rxe/rxe_task.c task->destroyed = true; task 146 drivers/infiniband/sw/rxe/rxe_task.c spin_lock_irqsave(&task->state_lock, flags); task 147 drivers/infiniband/sw/rxe/rxe_task.c idle = (task->state == TASK_STATE_START); task 148 drivers/infiniband/sw/rxe/rxe_task.c spin_unlock_irqrestore(&task->state_lock, flags); task 151 drivers/infiniband/sw/rxe/rxe_task.c tasklet_kill(&task->tasklet); task 154 drivers/infiniband/sw/rxe/rxe_task.c void rxe_run_task(struct rxe_task *task, int sched) task 156 drivers/infiniband/sw/rxe/rxe_task.c if (task->destroyed) task 160 drivers/infiniband/sw/rxe/rxe_task.c tasklet_schedule(&task->tasklet); task 162 drivers/infiniband/sw/rxe/rxe_task.c rxe_do_task((unsigned long)task); task 165 drivers/infiniband/sw/rxe/rxe_task.c void rxe_disable_task(struct rxe_task *task) task 167 drivers/infiniband/sw/rxe/rxe_task.c tasklet_disable(&task->tasklet); task 170 drivers/infiniband/sw/rxe/rxe_task.c void rxe_enable_task(struct rxe_task *task) task 172 drivers/infiniband/sw/rxe/rxe_task.c tasklet_enable(&task->tasklet); task 65 drivers/infiniband/sw/rxe/rxe_task.h int rxe_init_task(void *obj, struct rxe_task *task, task 69 drivers/infiniband/sw/rxe/rxe_task.h void rxe_cleanup_task(struct rxe_task *task); task 75 drivers/infiniband/sw/rxe/rxe_task.h int __rxe_do_task(struct rxe_task *task); task 88 drivers/infiniband/sw/rxe/rxe_task.h void rxe_run_task(struct rxe_task *task, int sched); task 91 drivers/infiniband/sw/rxe/rxe_task.h void rxe_disable_task(struct rxe_task *task); task 94 drivers/infiniband/sw/rxe/rxe_task.h void rxe_enable_task(struct rxe_task *task); task 711 drivers/infiniband/sw/rxe/rxe_verbs.c rxe_run_task(&qp->req.task, 1); task 713 drivers/infiniband/sw/rxe/rxe_verbs.c rxe_run_task(&qp->comp.task, 1); task 735 drivers/infiniband/sw/rxe/rxe_verbs.c rxe_run_task(&qp->req.task, 0); task 775 drivers/infiniband/sw/rxe/rxe_verbs.c rxe_run_task(&qp->resp.task, 1); task 153 drivers/infiniband/sw/rxe/rxe_verbs.h struct rxe_task task; task 164 drivers/infiniband/sw/rxe/rxe_verbs.h struct rxe_task task; task 232 drivers/infiniband/sw/rxe/rxe_verbs.h struct rxe_task task; task 161 drivers/infiniband/ulp/iser/iscsi_iser.c iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) task 163 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_iser_task *iser_task = task->dd_data; task 165 drivers/infiniband/ulp/iser/iscsi_iser.c task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header; task 166 drivers/infiniband/ulp/iser/iscsi_iser.c task->hdr_max = sizeof(iser_task->desc.iscsi_header); task 183 drivers/infiniband/ulp/iser/iscsi_iser.c iser_initialize_task_headers(struct iscsi_task *task, task 186 drivers/infiniband/ulp/iser/iscsi_iser.c struct iser_conn *iser_conn = task->conn->dd_data; task 188 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_iser_task *iser_task = task->dd_data; task 190 drivers/infiniband/ulp/iser/iscsi_iser.c const bool mgmt_task = !task->sc && !in_interrupt(); task 234 drivers/infiniband/ulp/iser/iscsi_iser.c iscsi_iser_task_init(struct iscsi_task *task) task 236 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_iser_task *iser_task = task->dd_data; task 239 drivers/infiniband/ulp/iser/iscsi_iser.c ret = iser_initialize_task_headers(task, &iser_task->desc); task 247 drivers/infiniband/ulp/iser/iscsi_iser.c if (!task->sc) task 252 drivers/infiniband/ulp/iser/iscsi_iser.c iser_task->sc = task->sc; task 269 drivers/infiniband/ulp/iser/iscsi_iser.c iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) task 273 drivers/infiniband/ulp/iser/iscsi_iser.c iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt); task 275 drivers/infiniband/ulp/iser/iscsi_iser.c error = iser_send_control(conn, task); task 288 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_task *task) task 290 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_r2t_info *r2t = &task->unsol_r2t; task 295 drivers/infiniband/ulp/iser/iscsi_iser.c while (iscsi_task_has_unsol_data(task)) { task 296 drivers/infiniband/ulp/iser/iscsi_iser.c iscsi_prep_data_out_pdu(task, r2t, &hdr); task 302 drivers/infiniband/ulp/iser/iscsi_iser.c error = iser_send_data_out(conn, task, &hdr); task 323 drivers/infiniband/ulp/iser/iscsi_iser.c iscsi_iser_task_xmit(struct iscsi_task *task) task 325 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_conn *conn = task->conn; task 326 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_iser_task *iser_task = task->dd_data; task 329 drivers/infiniband/ulp/iser/iscsi_iser.c if (!task->sc) task 330 drivers/infiniband/ulp/iser/iscsi_iser.c return iscsi_iser_mtask_xmit(conn, task); task 332 drivers/infiniband/ulp/iser/iscsi_iser.c if (task->sc->sc_data_direction == DMA_TO_DEVICE) { task 333 drivers/infiniband/ulp/iser/iscsi_iser.c BUG_ON(scsi_bufflen(task->sc) == 0); task 336 drivers/infiniband/ulp/iser/iscsi_iser.c task->itt, scsi_bufflen(task->sc), task 337 drivers/infiniband/ulp/iser/iscsi_iser.c task->imm_count, task->unsol_r2t.data_length); task 341 drivers/infiniband/ulp/iser/iscsi_iser.c conn->id, task->itt); task 345 drivers/infiniband/ulp/iser/iscsi_iser.c error = iser_send_command(conn, task); task 352 drivers/infiniband/ulp/iser/iscsi_iser.c if (iscsi_task_has_unsol_data(task)) task 353 drivers/infiniband/ulp/iser/iscsi_iser.c error = iscsi_iser_task_xmit_unsol_data(conn, task); task 367 drivers/infiniband/ulp/iser/iscsi_iser.c static void iscsi_iser_cleanup_task(struct iscsi_task *task) task 369 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_iser_task *iser_task = task->dd_data; task 371 drivers/infiniband/ulp/iser/iscsi_iser.c struct iser_conn *iser_conn = task->conn->dd_data; task 385 drivers/infiniband/ulp/iser/iscsi_iser.c if (!task->sc) task 407 drivers/infiniband/ulp/iser/iscsi_iser.c iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) task 409 drivers/infiniband/ulp/iser/iscsi_iser.c struct iscsi_iser_task *iser_task = task->dd_data; task 554 drivers/infiniband/ulp/iser/iscsi_iser.h struct iscsi_task *task); task 557 drivers/infiniband/ulp/iser/iscsi_iser.h struct iscsi_task *task); task 560 drivers/infiniband/ulp/iser/iscsi_iser.h struct iscsi_task *task, task 584 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_task_rdma_init(struct iscsi_iser_task *task); task 586 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_task_rdma_finalize(struct iscsi_iser_task *task); task 594 drivers/infiniband/ulp/iser/iscsi_iser.h int iser_reg_rdma_mem(struct iscsi_iser_task *task, task 597 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_unreg_rdma_mem(struct iscsi_iser_task *task, task 624 drivers/infiniband/ulp/iser/iscsi_iser.h int iser_initialize_task_headers(struct iscsi_task *task, task 48 drivers/infiniband/ulp/iser/iser_initiator.c static int iser_prepare_read_cmd(struct iscsi_task *task) task 51 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_iser_task *iser_task = task->dd_data; task 87 drivers/infiniband/ulp/iser/iser_initiator.c task->itt, mem_reg->rkey, task 99 drivers/infiniband/ulp/iser/iser_initiator.c iser_prepare_write_cmd(struct iscsi_task *task, task 104 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_iser_task *iser_task = task->dd_data; task 146 drivers/infiniband/ulp/iser/iser_initiator.c task->itt, mem_reg->rkey, task 152 drivers/infiniband/ulp/iser/iser_initiator.c task->itt, imm_sz); task 363 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_task *task) task 366 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_iser_task *iser_task = task->dd_data; task 370 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; task 371 drivers/infiniband/ulp/iser/iser_initiator.c struct scsi_cmnd *sc = task->sc; task 404 drivers/infiniband/ulp/iser/iser_initiator.c err = iser_prepare_read_cmd(task); task 409 drivers/infiniband/ulp/iser/iser_initiator.c err = iser_prepare_write_cmd(task, task 410 drivers/infiniband/ulp/iser/iser_initiator.c task->imm_count, task 411 drivers/infiniband/ulp/iser/iser_initiator.c task->imm_count + task 412 drivers/infiniband/ulp/iser/iser_initiator.c task->unsol_r2t.data_length, task 426 drivers/infiniband/ulp/iser/iser_initiator.c iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); task 434 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_task *task, task 438 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_iser_task *iser_task = task->dd_data; task 464 drivers/infiniband/ulp/iser/iser_initiator.c err = iser_initialize_task_headers(task, tx_desc); task 497 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_task *task) task 500 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_iser_task *iser_task = task->dd_data; task 513 drivers/infiniband/ulp/iser/iser_initiator.c data_seg_len = ntoh24(task->hdr->dlength); task 519 drivers/infiniband/ulp/iser/iser_initiator.c if (task != conn->login_task) { task 525 drivers/infiniband/ulp/iser/iser_initiator.c task->data_count, DMA_TO_DEVICE); task 527 drivers/infiniband/ulp/iser/iser_initiator.c memcpy(desc->req, task->data, task->data_count); task 530 drivers/infiniband/ulp/iser/iser_initiator.c task->data_count, DMA_TO_DEVICE); task 533 drivers/infiniband/ulp/iser/iser_initiator.c tx_dsg->length = task->data_count; task 538 drivers/infiniband/ulp/iser/iser_initiator.c if (task == conn->login_task) { task 540 drivers/infiniband/ulp/iser/iser_initiator.c task->hdr->opcode, data_seg_len); task 544 drivers/infiniband/ulp/iser/iser_initiator.c err = iser_post_rx_bufs(conn, task->hdr); task 612 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_task *task; task 624 drivers/infiniband/ulp/iser/iser_initiator.c task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt); task 625 drivers/infiniband/ulp/iser/iser_initiator.c if (likely(task)) { task 626 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_iser_task *iser_task = task->dd_data; task 711 drivers/infiniband/ulp/iser/iser_initiator.c struct iscsi_task *task; task 719 drivers/infiniband/ulp/iser/iser_initiator.c task = (void *)desc - sizeof(struct iscsi_task); task 720 drivers/infiniband/ulp/iser/iser_initiator.c if (task->hdr->itt == RESERVED_ITT) task 721 drivers/infiniband/ulp/iser/iser_initiator.c iscsi_put_task(task); task 486 drivers/infiniband/ulp/iser/iser_memory.c iser_reg_data_sg(struct iscsi_iser_task *task, task 492 drivers/infiniband/ulp/iser/iser_memory.c struct iser_device *device = task->iser_conn->ib_conn.device; task 497 drivers/infiniband/ulp/iser/iser_memory.c return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); task 500 drivers/infiniband/ulp/iser/iser_memory.c int iser_reg_rdma_mem(struct iscsi_iser_task *task, task 504 drivers/infiniband/ulp/iser/iser_memory.c struct ib_conn *ib_conn = &task->iser_conn->ib_conn; task 506 drivers/infiniband/ulp/iser/iser_memory.c struct iser_data_buf *mem = &task->data[dir]; task 507 drivers/infiniband/ulp/iser/iser_memory.c struct iser_mem_reg *reg = &task->rdma_reg[dir]; task 513 drivers/infiniband/ulp/iser/iser_memory.c scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL; task 520 drivers/infiniband/ulp/iser/iser_memory.c if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) { task 521 drivers/infiniband/ulp/iser/iser_memory.c err = iser_reg_data_sg(task, mem, desc, use_dma_key, reg); task 525 drivers/infiniband/ulp/iser/iser_memory.c err = iser_reg_sig_mr(task, mem, &task->prot[dir], task 542 drivers/infiniband/ulp/iser/iser_memory.c void iser_unreg_rdma_mem(struct iscsi_iser_task *task, task 545 drivers/infiniband/ulp/iser/iser_memory.c struct iser_device *device = task->iser_conn->ib_conn.device; task 547 drivers/infiniband/ulp/iser/iser_memory.c device->reg_ops->unreg_mem(task, dir); task 215 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 591 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 619 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 681 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 702 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 724 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 745 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 766 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 787 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 796 drivers/input/serio/hp_sdc.c tasklet_schedule(&hp_sdc.task); task 893 drivers/input/serio/hp_sdc.c tasklet_init(&hp_sdc.task, hp_sdc_tasklet, 0); task 985 drivers/input/serio/hp_sdc.c tasklet_kill(&hp_sdc.task); task 602 drivers/iommu/amd_iommu_v2.c struct task_struct *task) task 635 drivers/iommu/amd_iommu_v2.c mm = get_task_mm(task); task 146 drivers/mailbox/mtk-cmdq-mailbox.c static void cmdq_task_insert_into_thread(struct cmdq_task *task) task 148 drivers/mailbox/mtk-cmdq-mailbox.c struct device *dev = task->cmdq->mbox.dev; task 149 drivers/mailbox/mtk-cmdq-mailbox.c struct cmdq_thread *thread = task->thread; task 151 drivers/mailbox/mtk-cmdq-mailbox.c &thread->task_busy_list, typeof(*task), list_entry); task 158 drivers/mailbox/mtk-cmdq-mailbox.c (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base; task 175 drivers/mailbox/mtk-cmdq-mailbox.c static void cmdq_task_remove_wfe(struct cmdq_task *task) task 177 drivers/mailbox/mtk-cmdq-mailbox.c struct device *dev = task->cmdq->mbox.dev; task 178 drivers/mailbox/mtk-cmdq-mailbox.c u64 *base = task->pkt->va_base; task 181 drivers/mailbox/mtk-cmdq-mailbox.c dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size, task 183 drivers/mailbox/mtk-cmdq-mailbox.c for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++) task 187 drivers/mailbox/mtk-cmdq-mailbox.c dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size, task 207 drivers/mailbox/mtk-cmdq-mailbox.c static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta) task 209 drivers/mailbox/mtk-cmdq-mailbox.c struct cmdq_task_cb *cb = &task->pkt->async_cb; task 217 drivers/mailbox/mtk-cmdq-mailbox.c list_del(&task->list_entry); task 220 drivers/mailbox/mtk-cmdq-mailbox.c static void cmdq_task_handle_error(struct cmdq_task *task) task 222 drivers/mailbox/mtk-cmdq-mailbox.c struct cmdq_thread *thread = task->thread; task 225 drivers/mailbox/mtk-cmdq-mailbox.c dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task); task 226 drivers/mailbox/mtk-cmdq-mailbox.c WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0); task 237 drivers/mailbox/mtk-cmdq-mailbox.c struct cmdq_task *task, *tmp, *curr_task = NULL; task 262 drivers/mailbox/mtk-cmdq-mailbox.c list_for_each_entry_safe(task, tmp, &thread->task_busy_list, task 264 drivers/mailbox/mtk-cmdq-mailbox.c task_end_pa = task->pa_base + task->pkt->cmd_buf_size; task 265 drivers/mailbox/mtk-cmdq-mailbox.c if (curr_pa >= task->pa_base && curr_pa < task_end_pa) task 266 drivers/mailbox/mtk-cmdq-mailbox.c curr_task = task; task 269 drivers/mailbox/mtk-cmdq-mailbox.c cmdq_task_exec_done(task, CMDQ_CB_NORMAL); task 270 drivers/mailbox/mtk-cmdq-mailbox.c kfree(task); task 272 drivers/mailbox/mtk-cmdq-mailbox.c cmdq_task_exec_done(task, CMDQ_CB_ERROR); task 274 drivers/mailbox/mtk-cmdq-mailbox.c kfree(task); task 356 drivers/mailbox/mtk-cmdq-mailbox.c struct cmdq_task *task; task 362 drivers/mailbox/mtk-cmdq-mailbox.c task = kzalloc(sizeof(*task), GFP_ATOMIC); task 363 drivers/mailbox/mtk-cmdq-mailbox.c if (!task) task 366 drivers/mailbox/mtk-cmdq-mailbox.c task->cmdq = cmdq; task 367 drivers/mailbox/mtk-cmdq-mailbox.c INIT_LIST_HEAD(&task->list_entry); task 368 drivers/mailbox/mtk-cmdq-mailbox.c task->pa_base = pkt->pa_base; task 369 drivers/mailbox/mtk-cmdq-mailbox.c task->thread = thread; task 370 drivers/mailbox/mtk-cmdq-mailbox.c task->pkt = pkt; task 376 drivers/mailbox/mtk-cmdq-mailbox.c writel(task->pa_base, thread->base + CMDQ_THR_CURR_ADDR); task 377 drivers/mailbox/mtk-cmdq-mailbox.c writel(task->pa_base + pkt->cmd_buf_size, task 398 drivers/mailbox/mtk-cmdq-mailbox.c writel(task->pa_base, task 401 drivers/mailbox/mtk-cmdq-mailbox.c cmdq_task_insert_into_thread(task); task 402 drivers/mailbox/mtk-cmdq-mailbox.c cmdq_task_remove_wfe(task); task 410 drivers/mailbox/mtk-cmdq-mailbox.c writel(task->pa_base, task 413 drivers/mailbox/mtk-cmdq-mailbox.c cmdq_task_insert_into_thread(task); task 417 drivers/mailbox/mtk-cmdq-mailbox.c writel(task->pa_base + pkt->cmd_buf_size, task 421 drivers/mailbox/mtk-cmdq-mailbox.c list_move_tail(&task->list_entry, &thread->task_busy_list); task 102 drivers/md/bcache/closure.c struct task_struct *task; task 112 drivers/md/bcache/closure.c p = READ_ONCE(s->task); task 120 drivers/md/bcache/closure.c struct closure_syncer s = { .task = current }; task 380 drivers/md/bcache/request.c struct task_struct *task = current; task 435 drivers/md/bcache/request.c add_sequential(task); task 443 drivers/md/bcache/request.c task->sequential_io = i->sequential; task 451 drivers/md/bcache/request.c sectors = max(task->sequential_io, task 452 drivers/md/bcache/request.c task->sequential_io_avg) >> 9; task 274 drivers/md/dm-integrity.c struct task_struct *task; task 1141 drivers/md/dm-integrity.c last_range_task = last_range->task; task 1144 drivers/md/dm-integrity.c last_range->task = last_range_task; task 1166 drivers/md/dm-integrity.c new_range->task = current; task 56 drivers/md/persistent-data/dm-block-manager.c struct task_struct *task; task 61 drivers/md/persistent-data/dm-block-manager.c struct task_struct *task) task 66 drivers/md/persistent-data/dm-block-manager.c if (lock->holders[i] == task) task 74 drivers/md/persistent-data/dm-block-manager.c static void __add_holder(struct block_lock *lock, struct task_struct *task) task 81 drivers/md/persistent-data/dm-block-manager.c get_task_struct(task); task 82 drivers/md/persistent-data/dm-block-manager.c lock->holders[h] = task; task 91 drivers/md/persistent-data/dm-block-manager.c static void __del_holder(struct block_lock *lock, struct task_struct *task) task 93 drivers/md/persistent-data/dm-block-manager.c unsigned h = __find_holder(lock, task); task 95 drivers/md/persistent-data/dm-block-manager.c put_task_struct(task); task 125 drivers/md/persistent-data/dm-block-manager.c if (!w->task) task 136 drivers/md/persistent-data/dm-block-manager.c struct task_struct *task; task 139 drivers/md/persistent-data/dm-block-manager.c task = w->task; task 141 drivers/md/persistent-data/dm-block-manager.c w->task = NULL; task 142 drivers/md/persistent-data/dm-block-manager.c wake_up_process(task); task 162 drivers/md/persistent-data/dm-block-manager.c __add_holder(lock, w->task); task 168 drivers/md/persistent-data/dm-block-manager.c __add_holder(lock, w->task); task 212 drivers/md/persistent-data/dm-block-manager.c w.task = current; task 274 drivers/md/persistent-data/dm-block-manager.c w.task = current; task 807 drivers/media/i2c/saa717x.c int task, int prescale) task 831 drivers/media/i2c/saa717x.c task_shift = task * 0x40; task 851 drivers/media/i2c/saa717x.c static void set_v_scale(struct v4l2_subdev *sd, int task, int yscale) task 855 drivers/media/i2c/saa717x.c task_shift = task * 0x40; task 380 drivers/media/pci/saa7134/saa7134-core.c u32 split, task=0, ctrl=0, irq=0; task 391 drivers/media/pci/saa7134/saa7134-core.c task |= 0x01; task 406 drivers/media/pci/saa7134/saa7134-core.c task |= 0x10; task 413 drivers/media/pci/saa7134/saa7134-core.c task |= 0x22; task 461 drivers/media/pci/saa7134/saa7134-core.c saa_writeb(SAA7134_REGION_ENABLE, task); task 473 drivers/media/pci/saa7134/saa7134-core.c task, ctrl, irq, split ? "no" : "yes"); task 40 drivers/media/pci/saa7134/saa7134-vbi.c int task) task 45 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_H_START1(task), norm->h_start & 0xff); task 46 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_H_START2(task), norm->h_start >> 8); task 47 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_H_STOP1(task), norm->h_stop & 0xff); task 48 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_H_STOP2(task), norm->h_stop >> 8); task 49 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_V_START1(task), norm->vbi_v_start_0 & 0xff); task 50 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_V_START2(task), norm->vbi_v_start_0 >> 8); task 51 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_V_STOP1(task), norm->vbi_v_stop_0 & 0xff); task 52 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_V_STOP2(task), norm->vbi_v_stop_0 >> 8); task 54 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_H_SCALE_INC1(task), VBI_SCALE & 0xff); task 55 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_H_SCALE_INC2(task), VBI_SCALE >> 8); task 56 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_PHASE_OFFSET_LUMA(task), 0x00); task 57 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_PHASE_OFFSET_CHROMA(task), 0x00); task 59 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_H_LEN1(task), dev->vbi_hlen & 0xff); task 60 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_H_LEN2(task), dev->vbi_hlen >> 8); task 61 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_V_LEN1(task), dev->vbi_vlen & 0xff); task 62 drivers/media/pci/saa7134/saa7134-vbi.c saa_writeb(SAA7134_VBI_V_LEN2(task), dev->vbi_vlen >> 8); task 64 drivers/media/pci/saa7134/saa7134-vbi.c saa_andorb(SAA7134_DATA_PATH(task), 0xc0, 0x00); task 461 drivers/media/pci/saa7134/saa7134-video.c static void set_h_prescale(struct saa7134_dev *dev, int task, int prescale) task 491 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_H_PRESCALE(task), vals[i].xpsc); task 492 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_ACC_LENGTH(task), vals[i].xacl); task 493 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_LEVEL_CTRL(task), task 495 drivers/media/pci/saa7134/saa7134-video.c saa_andorb(SAA7134_FIR_PREFILTER_CTRL(task), 0x0f, task 499 drivers/media/pci/saa7134/saa7134-video.c static void set_v_scale(struct saa7134_dev *dev, int task, int yscale) task 503 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_V_SCALE_RATIO1(task), yscale & 0xff); task 504 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_V_SCALE_RATIO2(task), yscale >> 8); task 510 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_V_FILTER(task), 0x00 | mirror); task 511 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_LUMA_CONTRAST(task), 0x40); task 512 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_CHROMA_SATURATION(task), 0x40); task 517 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_V_FILTER(task), 0x01 | mirror); task 518 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_LUMA_CONTRAST(task), val); task 519 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_CHROMA_SATURATION(task), val); task 521 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_LUMA_BRIGHT(task), 0x80); task 524 drivers/media/pci/saa7134/saa7134-video.c static void set_size(struct saa7134_dev *dev, int task, task 537 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_H_START1(task), h_start & 0xff); task 538 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_H_START2(task), h_start >> 8); task 539 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_H_STOP1(task), h_stop & 0xff); task 540 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_H_STOP2(task), h_stop >> 8); task 541 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_V_START1(task), v_start & 0xff); task 542 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_V_START2(task), v_start >> 8); task 543 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_V_STOP1(task), v_stop & 0xff); task 544 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_V_STOP2(task), v_stop >> 8); task 553 drivers/media/pci/saa7134/saa7134-video.c set_h_prescale(dev,task,prescale); task 554 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_H_SCALE_INC1(task), xscale & 0xff); task 555 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_H_SCALE_INC2(task), xscale >> 8); task 556 drivers/media/pci/saa7134/saa7134-video.c set_v_scale(dev,task,yscale); task 558 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_PIXELS1(task), width & 0xff); task 559 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_PIXELS2(task), width >> 8); task 560 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_LINES1(task), height/div & 0xff); task 561 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_VIDEO_LINES2(task), height/div >> 8); task 566 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_V_PHASE_OFFSET0(task), y_odd); task 567 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_V_PHASE_OFFSET1(task), y_even); task 568 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_V_PHASE_OFFSET2(task), y_odd); task 569 drivers/media/pci/saa7134/saa7134-video.c saa_writeb(SAA7134_V_PHASE_OFFSET3(task), y_even); task 412 drivers/media/platform/sti/hva/hva-h264.c struct hva_buffer *task; task 586 drivers/media/platform/sti/hva/hva-h264.c struct hva_h264_task *task, task 596 drivers/media/platform/sti/hva/hva-h264.c struct hva_h264_td *td = &task->td; task 779 drivers/media/platform/sti/hva/hva-h264.c td->addr_param_out = (u32)ctx->task->paddr + task 873 drivers/media/platform/sti/hva/hva-h264.c static unsigned int hva_h264_get_stream_size(struct hva_h264_task *task) task 875 drivers/media/platform/sti/hva/hva-h264.c struct hva_h264_po *po = &task->po; task 880 drivers/media/platform/sti/hva/hva-h264.c static u32 hva_h264_get_stuffing_bytes(struct hva_h264_task *task) task 882 drivers/media/platform/sti/hva/hva-h264.c struct hva_h264_po *po = &task->po; task 958 drivers/media/platform/sti/hva/hva-h264.c &ctx->task); task 997 drivers/media/platform/sti/hva/hva-h264.c if (ctx->task) task 998 drivers/media/platform/sti/hva/hva-h264.c hva_mem_free(pctx, ctx->task); task 1009 drivers/media/platform/sti/hva/hva-h264.c struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr; task 1013 drivers/media/platform/sti/hva/hva-h264.c ret = hva_h264_prepare_task(pctx, task, frame, stream); task 1017 drivers/media/platform/sti/hva/hva-h264.c ret = hva_hw_execute_task(pctx, H264_ENC, ctx->task); task 1022 drivers/media/platform/sti/hva/hva-h264.c stream->bytesused += hva_h264_get_stream_size(task); task 1024 drivers/media/platform/sti/hva/hva-h264.c stuffing_bytes = hva_h264_get_stuffing_bytes(task); task 456 drivers/media/platform/sti/hva/hva-hw.c struct hva_buffer *task) task 507 drivers/media/platform/sti/hva/hva-hw.c ctx->name, __func__, cmd + (client_id << 8), &task->paddr); task 509 drivers/media/platform/sti/hva/hva-hw.c writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD); task 40 drivers/media/platform/sti/hva/hva-hw.h struct hva_buffer *task); task 266 drivers/misc/cxl/api.c struct task_struct *task) task 285 drivers/misc/cxl/api.c if (task) { task 286 drivers/misc/cxl/api.c ctx->pid = get_task_pid(task, PIDTYPE_PID); task 317 drivers/misc/cxl/api.c if (task) { task 169 drivers/misc/cxl/cxllib.c int cxllib_get_PE_attributes(struct task_struct *task, task 180 drivers/misc/cxl/cxllib.c task == NULL, task 184 drivers/misc/cxl/cxllib.c if (task) { task 185 drivers/misc/cxl/cxllib.c mm = get_task_mm(task); task 194 drivers/misc/cxl/cxllib.c attr->tid = task->thread.tidr; task 727 drivers/misc/habanalabs/device.c struct task_struct *task = NULL; task 746 drivers/misc/habanalabs/device.c task = get_pid_task(hpriv->taskpid, PIDTYPE_PID); task 747 drivers/misc/habanalabs/device.c if (task) { task 749 drivers/misc/habanalabs/device.c task_pid_nr(task)); task 750 drivers/misc/habanalabs/device.c send_sig(SIGKILL, task, 1); task 753 drivers/misc/habanalabs/device.c put_task_struct(task); task 764 drivers/mmc/core/core.c struct task_struct *task) task 767 drivers/mmc/core/core.c (!ctx && task && host->claimer->task == task); task 772 drivers/mmc/core/core.c struct task_struct *task) task 780 drivers/mmc/core/core.c if (task) task 781 drivers/mmc/core/core.c host->claimer->task = task; task 799 drivers/mmc/core/core.c struct task_struct *task = ctx ? NULL : current; task 812 drivers/mmc/core/core.c if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task)) task 821 drivers/mmc/core/core.c mmc_ctx_set_claimer(host, ctx, task); task 856 drivers/mmc/core/core.c host->claimer->task = NULL; task 304 drivers/net/ethernet/hisilicon/hisi_femac.c int work_done = 0, task = budget; task 309 drivers/net/ethernet/hisilicon/hisi_femac.c num = hisi_femac_rx(dev, task); task 311 drivers/net/ethernet/hisilicon/hisi_femac.c task -= num; task 645 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c int work_done = 0, task = budget; task 650 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c num = hix5hd2_rx(dev, task); task 652 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c task -= num; task 491 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c static void sq_prepare_task(struct hinic_sq_task *task) task 493 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info0 = 0; task 494 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info1 = 0; task 495 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info2 = 0; task 497 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->ufo_v6_identify = 0; task 499 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE); task 501 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->zero_pad = 0; task 504 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len) task 506 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN); task 509 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_task_set_outter_l3(struct hinic_sq_task *task, task 513 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) | task 517 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_task_set_inner_l3(struct hinic_sq_task *task, task 521 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE); task 522 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN); task 525 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, task 529 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | task 533 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info, task 546 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); task 547 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); task 557 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info, task 568 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->ufo_v6_identify = ip_ident; task 570 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); task 571 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG); task 572 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); task 600 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq_prepare_task(&sq_wqe->task); task 731 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task); task 144 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len); task 146 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_task_set_outter_l3(struct hinic_sq_task *task, task 150 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_task_set_inner_l3(struct hinic_sq_task *task, task 154 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, task 158 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_set_cs_inner_l4(struct hinic_sq_task *task, task 163 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_set_tso_inner_l4(struct hinic_sq_task *task, task 409 drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h struct hinic_sq_task task; task 274 drivers/net/ethernet/huawei/hinic/hinic_tx.c static int offload_tso(struct hinic_sq_task *task, u32 *queue_info, task 308 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_task_set_outter_l3(task, l3_offload, task 320 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); task 339 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_task_set_inner_l3(task, l3_offload, network_hdr_len); task 348 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset, task 354 drivers/net/ethernet/huawei/hinic/hinic_tx.c static int offload_csum(struct hinic_sq_task *task, u32 *queue_info, task 379 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_task_set_outter_l3(task, l3_type, task 385 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM, task 400 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_task_set_inner_l3(task, l3_type, network_hdr_len); task 405 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset); task 410 drivers/net/ethernet/huawei/hinic/hinic_tx.c static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info, task 413 drivers/net/ethernet/huawei/hinic/hinic_tx.c task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) | task 419 drivers/net/ethernet/huawei/hinic/hinic_tx.c static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task, task 426 drivers/net/ethernet/huawei/hinic/hinic_tx.c enabled = offload_tso(task, queue_info, skb); task 430 drivers/net/ethernet/huawei/hinic/hinic_tx.c enabled = offload_csum(task, queue_info, skb); task 439 drivers/net/ethernet/huawei/hinic/hinic_tx.c offload_vlan(task, queue_info, vlan_tag, task 445 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_task_set_l2hdr(task, skb_network_offset(skb)); task 529 drivers/net/ethernet/huawei/hinic/hinic_tx.c err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info); task 79 drivers/net/ethernet/mellanox/mlx4/cq.c tasklet_schedule(&ctx->task); task 99 drivers/net/ethernet/mellanox/mlx4/cq.c tasklet_schedule(&tasklet_ctx->task); task 1060 drivers/net/ethernet/mellanox/mlx4/eq.c tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb, task 1104 drivers/net/ethernet/mellanox/mlx4/eq.c tasklet_disable(&eq->tasklet_ctx.task); task 386 drivers/net/ethernet/mellanox/mlx4/mlx4.h struct tasklet_struct task; task 68 drivers/net/ethernet/mellanox/mlx5/core/cq.c tasklet_schedule(&ctx->task); task 170 drivers/net/ethernet/mellanox/mlx5/core/eq.c tasklet_schedule(&eq_comp->tasklet_ctx.task); task 775 drivers/net/ethernet/mellanox/mlx5/core/eq.c tasklet_disable(&eq->tasklet_ctx.task); task 805 drivers/net/ethernet/mellanox/mlx5/core/eq.c tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, task 15 drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h struct tasklet_struct task; task 66 drivers/net/wireless/rsi/rsi_common.h thread->task = kthread_run(func_ptr, common, "%s", name); task 67 drivers/net/wireless/rsi/rsi_common.h if (IS_ERR(thread->task)) task 68 drivers/net/wireless/rsi/rsi_common.h return (int)PTR_ERR(thread->task); task 78 drivers/net/wireless/rsi/rsi_common.h return kthread_stop(handle->task); task 204 drivers/net/wireless/rsi/rsi_main.h struct task_struct *task; task 180 drivers/net/xen-netback/common.h struct task_struct *task; task 205 drivers/net/xen-netback/interface.c if (queue->task == NULL || task 635 drivers/net/xen-netback/interface.c struct task_struct *task; task 639 drivers/net/xen-netback/interface.c BUG_ON(queue->task); task 688 drivers/net/xen-netback/interface.c task = kthread_create(xenvif_kthread_guest_rx, task 690 drivers/net/xen-netback/interface.c if (IS_ERR(task)) { task 692 drivers/net/xen-netback/interface.c err = PTR_ERR(task); task 695 drivers/net/xen-netback/interface.c queue->task = task; task 696 drivers/net/xen-netback/interface.c get_task_struct(task); task 698 drivers/net/xen-netback/interface.c task = kthread_create(xenvif_dealloc_kthread, task 700 drivers/net/xen-netback/interface.c if (IS_ERR(task)) { task 702 drivers/net/xen-netback/interface.c err = PTR_ERR(task); task 705 drivers/net/xen-netback/interface.c queue->dealloc_task = task; task 707 drivers/net/xen-netback/interface.c wake_up_process(queue->task); task 751 drivers/net/xen-netback/interface.c if (queue->task) { task 752 drivers/net/xen-netback/interface.c kthread_stop(queue->task); task 753 drivers/net/xen-netback/interface.c put_task_struct(queue->task); task 754 drivers/net/xen-netback/interface.c queue->task = NULL; task 60 drivers/oprofile/buffer_sync.c struct task_struct *task = data; task 62 drivers/oprofile/buffer_sync.c list_add(&task->tasks, &dying_tasks); task 304 drivers/oprofile/buffer_sync.c add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) task 308 drivers/oprofile/buffer_sync.c add_event_entry(task->pid); task 313 drivers/oprofile/buffer_sync.c add_event_entry(task->tgid); task 439 drivers/oprofile/buffer_sync.c struct task_struct *task; task 449 drivers/oprofile/buffer_sync.c list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { task 450 drivers/oprofile/buffer_sync.c list_del(&task->tasks); task 451 drivers/oprofile/buffer_sync.c free_task(task); task 185 drivers/oprofile/cpu_buffer.c int is_kernel, struct task_struct *task) task 207 drivers/oprofile/cpu_buffer.c if (cpu_buf->last_task != task) { task 208 drivers/oprofile/cpu_buffer.c cpu_buf->last_task = task; task 229 drivers/oprofile/cpu_buffer.c op_cpu_buffer_add_data(&entry, (unsigned long)task); task 264 drivers/oprofile/cpu_buffer.c struct task_struct *task) task 266 drivers/oprofile/cpu_buffer.c struct task_struct *tsk = task ? task : current; task 300 drivers/oprofile/cpu_buffer.c struct task_struct *task) task 309 drivers/oprofile/cpu_buffer.c if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) task 323 drivers/oprofile/cpu_buffer.c struct task_struct *task) task 325 drivers/oprofile/cpu_buffer.c __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); task 727 drivers/platform/chrome/cros_ec_spi.c err = sched_setscheduler_nocheck(ec_spi->high_pri_worker->task, task 564 drivers/pnp/pnpbios/core.c struct task_struct *task; task 570 drivers/pnp/pnpbios/core.c task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd"); task 571 drivers/pnp/pnpbios/core.c return PTR_ERR_OR_ZERO(task); task 155 drivers/s390/cio/crw.c struct task_struct *task; task 157 drivers/s390/cio/crw.c task = kthread_run(crw_collect_info, NULL, "kmcheck"); task 158 drivers/s390/cio/crw.c if (IS_ERR(task)) task 159 drivers/s390/cio/crw.c return PTR_ERR(task); task 64 drivers/scsi/aic94xx/aic94xx.h int asd_execute_task(struct sas_task *task, gfp_t gfp_flags); task 443 drivers/scsi/aic94xx/aic94xx_scb.c struct sas_task *task = a->uldd_task; task 448 drivers/scsi/aic94xx/aic94xx_scb.c if (task) { task 449 drivers/scsi/aic94xx/aic94xx_scb.c failed_dev = task->dev; task 450 drivers/scsi/aic94xx/aic94xx_scb.c sas_task_abort(task); task 469 drivers/scsi/aic94xx/aic94xx_scb.c struct sas_task *task = a->uldd_task; task 471 drivers/scsi/aic94xx/aic94xx_scb.c if (task && task 472 drivers/scsi/aic94xx/aic94xx_scb.c task->dev == failed_dev && task 474 drivers/scsi/aic94xx/aic94xx_scb.c sas_task_abort(task); task 495 drivers/scsi/aic94xx/aic94xx_scb.c struct sas_task *task = a->uldd_task; task 497 drivers/scsi/aic94xx/aic94xx_scb.c if (!task) task 499 drivers/scsi/aic94xx/aic94xx_scb.c dev = task->dev; task 503 drivers/scsi/aic94xx/aic94xx_scb.c last_dev_task = task; task 521 drivers/scsi/aic94xx/aic94xx_scb.c struct sas_task *task = a->uldd_task; task 523 drivers/scsi/aic94xx/aic94xx_scb.c if (!task) task 525 drivers/scsi/aic94xx/aic94xx_scb.c dev = task->dev; task 529 drivers/scsi/aic94xx/aic94xx_scb.c sas_task_abort(task); task 36 drivers/scsi/aic94xx/aic94xx_task.c static int asd_map_scatterlist(struct sas_task *task, task 40 drivers/scsi/aic94xx/aic94xx_task.c struct asd_ascb *ascb = task->lldd_task; task 45 drivers/scsi/aic94xx/aic94xx_task.c if (task->data_dir == DMA_NONE) task 48 drivers/scsi/aic94xx/aic94xx_task.c if (task->num_scatter == 0) { task 49 drivers/scsi/aic94xx/aic94xx_task.c void *p = task->scatter; task 51 drivers/scsi/aic94xx/aic94xx_task.c task->total_xfer_len, task 52 drivers/scsi/aic94xx/aic94xx_task.c task->data_dir); task 54 drivers/scsi/aic94xx/aic94xx_task.c sg_arr[0].size = cpu_to_le32(task->total_xfer_len); task 61 drivers/scsi/aic94xx/aic94xx_task.c if (sas_protocol_ata(task->task_proto)) task 62 drivers/scsi/aic94xx/aic94xx_task.c num_sg = task->num_scatter; task 64 drivers/scsi/aic94xx/aic94xx_task.c num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter, task 65 drivers/scsi/aic94xx/aic94xx_task.c task->num_scatter, task->data_dir); task 79 drivers/scsi/aic94xx/aic94xx_task.c for_each_sg(task->scatter, sc, num_sg, i) { task 88 drivers/scsi/aic94xx/aic94xx_task.c for_each_sg(task->scatter, sc, 2, i) { task 100 drivers/scsi/aic94xx/aic94xx_task.c for_each_sg(task->scatter, sc, num_sg, i) { task 110 drivers/scsi/aic94xx/aic94xx_task.c if (sas_protocol_ata(task->task_proto)) task 111 drivers/scsi/aic94xx/aic94xx_task.c dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter, task 112 drivers/scsi/aic94xx/aic94xx_task.c task->num_scatter, task->data_dir); task 119 drivers/scsi/aic94xx/aic94xx_task.c struct sas_task *task = ascb->uldd_task; task 121 drivers/scsi/aic94xx/aic94xx_task.c if (task->data_dir == DMA_NONE) task 124 drivers/scsi/aic94xx/aic94xx_task.c if (task->num_scatter == 0) { task 128 drivers/scsi/aic94xx/aic94xx_task.c task->total_xfer_len, task->data_dir); task 133 drivers/scsi/aic94xx/aic94xx_task.c if (task->task_proto != SAS_PROTOCOL_STP) task 134 drivers/scsi/aic94xx/aic94xx_task.c dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter, task 135 drivers/scsi/aic94xx/aic94xx_task.c task->num_scatter, task->data_dir); task 144 drivers/scsi/aic94xx/aic94xx_task.c struct sas_task *task = ascb->uldd_task; task 145 drivers/scsi/aic94xx/aic94xx_task.c struct task_status_struct *ts = &task->task_status; task 172 drivers/scsi/aic94xx/aic94xx_task.c if (task->task_proto == SAS_PROTOCOL_SSP) { task 178 drivers/scsi/aic94xx/aic94xx_task.c sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu); task 197 drivers/scsi/aic94xx/aic94xx_task.c struct sas_task *task = ascb->uldd_task; task 198 drivers/scsi/aic94xx/aic94xx_task.c struct task_status_struct *ts = &task->task_status; task 309 drivers/scsi/aic94xx/aic94xx_task.c switch (task->task_proto) { task 323 drivers/scsi/aic94xx/aic94xx_task.c spin_lock_irqsave(&task->task_state_lock, flags); task 324 drivers/scsi/aic94xx/aic94xx_task.c task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task 325 drivers/scsi/aic94xx/aic94xx_task.c task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task 326 drivers/scsi/aic94xx/aic94xx_task.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 327 drivers/scsi/aic94xx/aic94xx_task.c if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { task 329 drivers/scsi/aic94xx/aic94xx_task.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 332 drivers/scsi/aic94xx/aic94xx_task.c task, opcode, ts->resp, ts->stat); task 336 drivers/scsi/aic94xx/aic94xx_task.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 337 drivers/scsi/aic94xx/aic94xx_task.c task->lldd_task = NULL; task 340 drivers/scsi/aic94xx/aic94xx_task.c task->task_done(task); task 346 drivers/scsi/aic94xx/aic94xx_task.c static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, task 349 drivers/scsi/aic94xx/aic94xx_task.c struct domain_device *dev = task->dev; task 356 drivers/scsi/aic94xx/aic94xx_task.c if (unlikely(task->ata_task.device_control_reg_update)) task 367 drivers/scsi/aic94xx/aic94xx_task.c scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); task 368 drivers/scsi/aic94xx/aic94xx_task.c scb->ata_task.fis = task->ata_task.fis; task 369 drivers/scsi/aic94xx/aic94xx_task.c if (likely(!task->ata_task.device_control_reg_update)) task 373 drivers/scsi/aic94xx/aic94xx_task.c memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, task 379 drivers/scsi/aic94xx/aic94xx_task.c if (likely(!task->ata_task.device_control_reg_update)) { task 381 drivers/scsi/aic94xx/aic94xx_task.c if (task->ata_task.dma_xfer) task 383 drivers/scsi/aic94xx/aic94xx_task.c if (task->ata_task.use_ncq && task 386 drivers/scsi/aic94xx/aic94xx_task.c flags |= data_dir_flags[task->data_dir]; task 389 drivers/scsi/aic94xx/aic94xx_task.c scb->ata_task.retry_count = task->ata_task.retry_count; task 392 drivers/scsi/aic94xx/aic94xx_task.c if (task->ata_task.set_affil_pol) task 394 drivers/scsi/aic94xx/aic94xx_task.c if (task->ata_task.stp_affil_pol) task 400 drivers/scsi/aic94xx/aic94xx_task.c if (likely(!task->ata_task.device_control_reg_update)) task 401 drivers/scsi/aic94xx/aic94xx_task.c res = asd_map_scatterlist(task, scb->ata_task.sg_element, task 414 drivers/scsi/aic94xx/aic94xx_task.c static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, task 418 drivers/scsi/aic94xx/aic94xx_task.c struct domain_device *dev = task->dev; task 421 drivers/scsi/aic94xx/aic94xx_task.c dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1, task 423 drivers/scsi/aic94xx/aic94xx_task.c dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1, task 433 drivers/scsi/aic94xx/aic94xx_task.c cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); task 435 drivers/scsi/aic94xx/aic94xx_task.c cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); task 438 drivers/scsi/aic94xx/aic94xx_task.c cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); task 440 drivers/scsi/aic94xx/aic94xx_task.c cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); task 453 drivers/scsi/aic94xx/aic94xx_task.c struct sas_task *task = a->uldd_task; task 455 drivers/scsi/aic94xx/aic94xx_task.c BUG_ON(!task); task 456 drivers/scsi/aic94xx/aic94xx_task.c dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1, task 458 drivers/scsi/aic94xx/aic94xx_task.c dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1, task 464 drivers/scsi/aic94xx/aic94xx_task.c static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, task 467 drivers/scsi/aic94xx/aic94xx_task.c struct domain_device *dev = task->dev; task 477 drivers/scsi/aic94xx/aic94xx_task.c scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); task 485 drivers/scsi/aic94xx/aic94xx_task.c memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8); task 486 drivers/scsi/aic94xx/aic94xx_task.c if (task->ssp_task.enable_first_burst) task 488 drivers/scsi/aic94xx/aic94xx_task.c scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3); task 489 drivers/scsi/aic94xx/aic94xx_task.c scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); task 490 drivers/scsi/aic94xx/aic94xx_task.c memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd, task 491 drivers/scsi/aic94xx/aic94xx_task.c task->ssp_task.cmd->cmd_len); task 496 drivers/scsi/aic94xx/aic94xx_task.c scb->ssp_task.data_dir = data_dir_flags[task->data_dir]; task 501 drivers/scsi/aic94xx/aic94xx_task.c res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags); task 528 drivers/scsi/aic94xx/aic94xx_task.c int asd_execute_task(struct sas_task *task, gfp_t gfp_flags) task 532 drivers/scsi/aic94xx/aic94xx_task.c struct sas_task *t = task; task 534 drivers/scsi/aic94xx/aic94xx_task.c struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; task 214 drivers/scsi/aic94xx/aic94xx_tmf.c static int asd_clear_nexus_tag(struct sas_task *task) task 216 drivers/scsi/aic94xx/aic94xx_tmf.c struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; task 217 drivers/scsi/aic94xx/aic94xx_tmf.c struct asd_ascb *tascb = task->lldd_task; task 221 drivers/scsi/aic94xx/aic94xx_tmf.c memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8); task 223 drivers/scsi/aic94xx/aic94xx_tmf.c if (task->dev->tproto) task 225 drivers/scsi/aic94xx/aic94xx_tmf.c task->dev->lldd_dev); task 229 drivers/scsi/aic94xx/aic94xx_tmf.c static int asd_clear_nexus_index(struct sas_task *task) task 231 drivers/scsi/aic94xx/aic94xx_tmf.c struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; task 232 drivers/scsi/aic94xx/aic94xx_tmf.c struct asd_ascb *tascb = task->lldd_task; task 236 drivers/scsi/aic94xx/aic94xx_tmf.c if (task->dev->tproto) task 238 drivers/scsi/aic94xx/aic94xx_tmf.c task->dev->lldd_dev); task 324 drivers/scsi/aic94xx/aic94xx_tmf.c static int asd_clear_nexus(struct sas_task *task) task 328 drivers/scsi/aic94xx/aic94xx_tmf.c struct asd_ascb *tascb = task->lldd_task; task 336 drivers/scsi/aic94xx/aic94xx_tmf.c res = asd_clear_nexus_tag(task); task 338 drivers/scsi/aic94xx/aic94xx_tmf.c res = asd_clear_nexus_index(task); task 343 drivers/scsi/aic94xx/aic94xx_tmf.c spin_lock_irqsave(&task->task_state_lock, flags); task 346 drivers/scsi/aic94xx/aic94xx_tmf.c if (task->task_state_flags & SAS_TASK_STATE_DONE) task 348 drivers/scsi/aic94xx/aic94xx_tmf.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 386 drivers/scsi/aic94xx/aic94xx_tmf.c int asd_abort_task(struct sas_task *task) task 388 drivers/scsi/aic94xx/aic94xx_tmf.c struct asd_ascb *tascb = task->lldd_task; task 401 drivers/scsi/aic94xx/aic94xx_tmf.c spin_lock_irqsave(&task->task_state_lock, flags); task 402 drivers/scsi/aic94xx/aic94xx_tmf.c if (task->task_state_flags & SAS_TASK_STATE_DONE) { task 403 drivers/scsi/aic94xx/aic94xx_tmf.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 405 drivers/scsi/aic94xx/aic94xx_tmf.c ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); task 408 drivers/scsi/aic94xx/aic94xx_tmf.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 419 drivers/scsi/aic94xx/aic94xx_tmf.c switch (task->task_proto) { task 426 drivers/scsi/aic94xx/aic94xx_tmf.c scb->abort_task.proto_conn_rate |= task->dev->linkrate; task 434 drivers/scsi/aic94xx/aic94xx_tmf.c if (task->task_proto == SAS_PROTOCOL_SSP) { task 437 drivers/scsi/aic94xx/aic94xx_tmf.c task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); task 439 drivers/scsi/aic94xx/aic94xx_tmf.c task->dev->port->ha->hashed_sas_addr, task 443 drivers/scsi/aic94xx/aic94xx_tmf.c memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8); task 450 drivers/scsi/aic94xx/aic94xx_tmf.c (u16)(unsigned long)task->dev->lldd_dev); task 465 drivers/scsi/aic94xx/aic94xx_tmf.c spin_lock_irqsave(&task->task_state_lock, flags); task 466 drivers/scsi/aic94xx/aic94xx_tmf.c if (task->task_state_flags & SAS_TASK_STATE_DONE) { task 467 drivers/scsi/aic94xx/aic94xx_tmf.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 469 drivers/scsi/aic94xx/aic94xx_tmf.c ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); task 472 drivers/scsi/aic94xx/aic94xx_tmf.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 478 drivers/scsi/aic94xx/aic94xx_tmf.c res = asd_clear_nexus(task); task 492 drivers/scsi/aic94xx/aic94xx_tmf.c res = asd_clear_nexus(task); task 508 drivers/scsi/aic94xx/aic94xx_tmf.c spin_lock_irqsave(&task->task_state_lock, flags); task 511 drivers/scsi/aic94xx/aic94xx_tmf.c if (task->task_state_flags & SAS_TASK_STATE_DONE) task 513 drivers/scsi/aic94xx/aic94xx_tmf.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 528 drivers/scsi/aic94xx/aic94xx_tmf.c task->lldd_task = NULL; task 532 drivers/scsi/aic94xx/aic94xx_tmf.c ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); task 537 drivers/scsi/aic94xx/aic94xx_tmf.c ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); task 684 drivers/scsi/aic94xx/aic94xx_tmf.c int asd_query_task(struct sas_task *task) task 686 drivers/scsi/aic94xx/aic94xx_tmf.c struct asd_ascb *ascb = task->lldd_task; task 691 drivers/scsi/aic94xx/aic94xx_tmf.c return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN, task 1023 drivers/scsi/be2iscsi/be_iscsi.c beiscsi_conn->task); task 272 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; task 280 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task; task 308 drivers/scsi/be2iscsi/be_main.c task = conn->session->cmds[i]; task 309 drivers/scsi/be2iscsi/be_main.c if (!task->sc) task 312 drivers/scsi/be2iscsi/be_main.c if (sc->device->lun != task->sc->device->lun) task 324 drivers/scsi/be2iscsi/be_main.c __iscsi_get_task(task); task 325 drivers/scsi/be2iscsi/be_main.c io_task = task->dd_data; task 337 drivers/scsi/be2iscsi/be_main.c inv_tbl->task[nents] = task; task 363 drivers/scsi/be2iscsi/be_main.c iscsi_put_task(inv_tbl->task[i]); task 1101 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task, task 1104 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 1120 drivers/scsi/be2iscsi/be_main.c if (!task->sc) { task 1128 drivers/scsi/be2iscsi/be_main.c task->sc->result = (DID_OK << 16) | status; task 1130 drivers/scsi/be2iscsi/be_main.c task->sc->result = DID_ERROR << 16; task 1137 drivers/scsi/be2iscsi/be_main.c task->sc->result = DID_ERROR << 16; task 1140 drivers/scsi/be2iscsi/be_main.c scsi_set_resid(task->sc, resid); task 1141 drivers/scsi/be2iscsi/be_main.c if (!status && (scsi_bufflen(task->sc) - resid < task 1142 drivers/scsi/be2iscsi/be_main.c task->sc->underflow)) task 1143 drivers/scsi/be2iscsi/be_main.c task->sc->result = DID_ERROR << 16; task 1153 drivers/scsi/be2iscsi/be_main.c memcpy(task->sc->sense_buffer, sense, task 1164 drivers/scsi/be2iscsi/be_main.c iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); task 1169 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task, task 1173 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 1176 drivers/scsi/be2iscsi/be_main.c hdr = (struct iscsi_logout_rsp *)task->hdr; task 1196 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task, task 1201 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 1203 drivers/scsi/be2iscsi/be_main.c hdr = (struct iscsi_tm_rsp *)task->hdr; task 1224 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task; task 1244 drivers/scsi/be2iscsi/be_main.c task = pwrb_handle->pio_handle; task 1245 drivers/scsi/be2iscsi/be_main.c if (task) task 1246 drivers/scsi/be2iscsi/be_main.c __iscsi_put_task(task); task 1252 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task, task 1257 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 1259 drivers/scsi/be2iscsi/be_main.c hdr = (struct iscsi_nopin *)task->hdr; task 1333 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task; task 1349 drivers/scsi/be2iscsi/be_main.c task = pwrb_handle->pio_handle; task 1350 drivers/scsi/be2iscsi/be_main.c if (!task) { task 1354 drivers/scsi/be2iscsi/be_main.c type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; task 1359 drivers/scsi/be2iscsi/be_main.c if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == task 1361 drivers/scsi/be2iscsi/be_main.c be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); task 1363 drivers/scsi/be2iscsi/be_main.c be_complete_io(beiscsi_conn, task, &csol_cqe); task 1367 drivers/scsi/be2iscsi/be_main.c if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) task 1368 drivers/scsi/be2iscsi/be_main.c be_complete_logout(beiscsi_conn, task, &csol_cqe); task 1370 drivers/scsi/be2iscsi/be_main.c be_complete_tmf(beiscsi_conn, task, &csol_cqe); task 1381 drivers/scsi/be2iscsi/be_main.c be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); task 1414 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task; task 1434 drivers/scsi/be2iscsi/be_main.c task = conn->login_task; task 1435 drivers/scsi/be2iscsi/be_main.c io_task = task->dd_data; task 2261 drivers/scsi/be2iscsi/be_main.c static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) task 2264 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 2275 drivers/scsi/be2iscsi/be_main.c if (task->data) { task 2278 drivers/scsi/be2iscsi/be_main.c dsp_value = (task->data_count) ? 1 : 0; task 2290 drivers/scsi/be2iscsi/be_main.c task->data, task 2291 drivers/scsi/be2iscsi/be_main.c task->data_count, task 2296 drivers/scsi/be2iscsi/be_main.c io_task->mtask_data_count = task->data_count; task 2305 drivers/scsi/be2iscsi/be_main.c task->data_count); task 2321 drivers/scsi/be2iscsi/be_main.c if (task->data) { task 2331 drivers/scsi/be2iscsi/be_main.c if (task->data) { task 4215 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task) task 4227 drivers/scsi/be2iscsi/be_main.c io_task = task->dd_data; task 4253 drivers/scsi/be2iscsi/be_main.c static void beiscsi_cleanup_task(struct iscsi_task *task) task 4255 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 4256 drivers/scsi/be2iscsi/be_main.c struct iscsi_conn *conn = task->conn; task 4272 drivers/scsi/be2iscsi/be_main.c task->hdr = NULL; task 4275 drivers/scsi/be2iscsi/be_main.c if (task->sc) { task 4294 drivers/scsi/be2iscsi/be_main.c beiscsi_free_mgmt_task_handles(beiscsi_conn, task); task 4305 drivers/scsi/be2iscsi/be_main.c struct iscsi_task *task = beiscsi_conn->task; task 4306 drivers/scsi/be2iscsi/be_main.c struct iscsi_session *session = task->conn->session; task 4315 drivers/scsi/be2iscsi/be_main.c beiscsi_cleanup_task(task); task 4373 drivers/scsi/be2iscsi/be_main.c static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) task 4375 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 4376 drivers/scsi/be2iscsi/be_main.c struct iscsi_conn *conn = task->conn; task 4391 drivers/scsi/be2iscsi/be_main.c io_task->libiscsi_itt = (itt_t)task->itt; task 4394 drivers/scsi/be2iscsi/be_main.c task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; task 4395 drivers/scsi/be2iscsi/be_main.c task->hdr_max = sizeof(struct be_cmd_bhs); task 4399 drivers/scsi/be2iscsi/be_main.c if (task->sc) { task 4423 drivers/scsi/be2iscsi/be_main.c beiscsi_conn->task = task; task 4494 drivers/scsi/be2iscsi/be_main.c io_task->pwrb_handle->pio_handle = task; task 4518 drivers/scsi/be2iscsi/be_main.c static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, task 4523 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 4524 drivers/scsi/be2iscsi/be_main.c struct iscsi_conn *conn = task->conn; task 4554 drivers/scsi/be2iscsi/be_main.c be32_to_cpu(task->cmdsn)); task 4579 drivers/scsi/be2iscsi/be_main.c static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, task 4584 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 4585 drivers/scsi/be2iscsi/be_main.c struct iscsi_conn *conn = task->conn; task 4614 drivers/scsi/be2iscsi/be_main.c be32_to_cpu(task->cmdsn)); task 4640 drivers/scsi/be2iscsi/be_main.c static int beiscsi_mtask(struct iscsi_task *task) task 4642 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 4643 drivers/scsi/be2iscsi/be_main.c struct iscsi_conn *conn = task->conn; task 4657 drivers/scsi/be2iscsi/be_main.c be32_to_cpu(task->cmdsn)); task 4663 drivers/scsi/be2iscsi/be_main.c task->data_count); task 4675 drivers/scsi/be2iscsi/be_main.c be32_to_cpu(task->cmdsn)); task 4681 drivers/scsi/be2iscsi/be_main.c task->data_count); task 4694 drivers/scsi/be2iscsi/be_main.c switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { task 4698 drivers/scsi/be2iscsi/be_main.c ret = hwi_write_buffer(pwrb, task); task 4701 drivers/scsi/be2iscsi/be_main.c if (task->hdr->ttt != ISCSI_RESERVED_TAG) { task 4718 drivers/scsi/be2iscsi/be_main.c ret = hwi_write_buffer(pwrb, task); task 4722 drivers/scsi/be2iscsi/be_main.c ret = hwi_write_buffer(pwrb, task); task 4726 drivers/scsi/be2iscsi/be_main.c ret = hwi_write_buffer(pwrb, task); task 4730 drivers/scsi/be2iscsi/be_main.c ret = hwi_write_buffer(pwrb, task); task 4736 drivers/scsi/be2iscsi/be_main.c task->hdr->opcode & ISCSI_OPCODE_MASK); task 4758 drivers/scsi/be2iscsi/be_main.c static int beiscsi_task_xmit(struct iscsi_task *task) task 4760 drivers/scsi/be2iscsi/be_main.c struct beiscsi_io_task *io_task = task->dd_data; task 4761 drivers/scsi/be2iscsi/be_main.c struct scsi_cmnd *sc = task->sc; task 4777 drivers/scsi/be2iscsi/be_main.c task->hdr->exp_statsn = 0; task 4780 drivers/scsi/be2iscsi/be_main.c return beiscsi_mtask(task); task 4807 drivers/scsi/be2iscsi/be_main.c return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); task 212 drivers/scsi/be2iscsi/be_main.h struct iscsi_task *task; task 418 drivers/scsi/be2iscsi/be_main.h struct iscsi_task *task; task 796 drivers/scsi/be2iscsi/be_main.h struct iscsi_task *task); task 443 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task; task 533 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task, task 536 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task, task 540 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task); task 542 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task); task 552 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task, task 555 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task, task 558 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task, task 561 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task, task 564 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task, task 591 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_task_ctx_entry *task, task 95 drivers/scsi/bnx2fc/bnx2fc_els.c rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id); task 608 drivers/scsi/bnx2fc/bnx2fc_els.c rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); task 648 drivers/scsi/bnx2fc/bnx2fc_els.c srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); task 682 drivers/scsi/bnx2fc/bnx2fc_els.c struct fcoe_task_ctx_entry *task; task 776 drivers/scsi/bnx2fc/bnx2fc_els.c task = &(task_page[index]); task 777 drivers/scsi/bnx2fc/bnx2fc_els.c bnx2fc_init_mp_task(els_req, task); task 806 drivers/scsi/bnx2fc/bnx2fc_els.c struct fcoe_task_ctx_entry *task, u8 num_rq) task 840 drivers/scsi/bnx2fc/bnx2fc_els.c &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; task 846 drivers/scsi/bnx2fc/bnx2fc_els.c task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; task 868 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_task_ctx_entry *task; task 890 drivers/scsi/bnx2fc/bnx2fc_hwi.c task = &(task_page[index]); task 892 drivers/scsi/bnx2fc/bnx2fc_hwi.c num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & task 907 drivers/scsi/bnx2fc/bnx2fc_hwi.c rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & task 915 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); task 921 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_abts_compl(io_req, task, num_rq); task 924 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_cleanup_compl(io_req, task, num_rq); task 932 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_tm_compl(io_req, task, num_rq); task 947 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_els_compl(io_req, task, num_rq); task 949 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_abts_compl(io_req, task, num_rq); task 952 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_cleanup_compl(io_req, task, num_rq); task 966 drivers/scsi/bnx2fc/bnx2fc_hwi.c bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); task 1451 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_task_ctx_entry *task, task 1469 drivers/scsi/bnx2fc/bnx2fc_hwi.c memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); task 1477 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.tx_flags = task 1481 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags = task_type << task 1483 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << task 1485 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.const_ctx.init_flags = context_id << task 1487 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.const_ctx.init_flags = context_id << task 1490 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; task 1492 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; task 1493 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; task 1506 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = task 1508 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = task 1510 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = task 1512 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = task 1514 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; task 1520 drivers/scsi/bnx2fc/bnx2fc_hwi.c sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; task 1527 drivers/scsi/bnx2fc/bnx2fc_hwi.c memset(&task->rxwr_only.rx_seq_ctx, 0, task 1529 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; task 1530 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; task 1534 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_task_ctx_entry *task, task 1541 drivers/scsi/bnx2fc/bnx2fc_hwi.c memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); task 1545 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags = task_type << task 1547 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << task 1550 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= task 1554 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= task 1557 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; task 1560 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.tx_flags = task 1565 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.const_ctx.init_flags = context_id << task 1567 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.var_ctx.rx_flags |= 1 << task 1572 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_task_ctx_entry *task) task 1592 drivers/scsi/bnx2fc/bnx2fc_hwi.c memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); task 1595 drivers/scsi/bnx2fc/bnx2fc_hwi.c io_req->task = task; task 1603 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = task 1605 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = task 1607 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; task 1612 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags = task_type << task 1615 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= task 1619 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= task 1622 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << task 1626 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << task 1630 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; task 1633 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.var_ctx.rx_flags |= 1 << task 1637 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.const_ctx.init_flags = context_id << task 1644 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.var_ctx.rx_id = 0xffff; task 1650 drivers/scsi/bnx2fc/bnx2fc_hwi.c hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; task 1658 drivers/scsi/bnx2fc/bnx2fc_hwi.c sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; task 1668 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_task_ctx_entry *task) task 1683 drivers/scsi/bnx2fc/bnx2fc_hwi.c memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); task 1686 drivers/scsi/bnx2fc/bnx2fc_hwi.c io_req->task = task; task 1695 drivers/scsi/bnx2fc/bnx2fc_hwi.c cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; task 1700 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = task 1703 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = task 1706 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = task 1710 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= 1 << task 1713 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = task 1715 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = task 1717 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = task 1724 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= task_type << task 1727 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= task 1733 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= task 1736 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << task 1739 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << task 1743 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; task 1747 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.union_ctx.fcp_cmd.opaque; task 1759 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; task 1762 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.const_ctx.init_flags = context_id << task 1767 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.var_ctx.rx_flags |= 1 << task 1770 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->rxwr_txrd.var_ctx.rx_id = 0xffff; task 1776 drivers/scsi/bnx2fc/bnx2fc_hwi.c sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; task 1787 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= 1 << task 1802 drivers/scsi/bnx2fc/bnx2fc_hwi.c task->txwr_rxrd.const_ctx.init_flags |= 1 << task 671 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task; task 765 drivers/scsi/bnx2fc/bnx2fc_io.c task = &(task_page[index]); task 766 drivers/scsi/bnx2fc/bnx2fc_io.c bnx2fc_init_mp_task(io_req, task); task 830 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task; task 886 drivers/scsi/bnx2fc/bnx2fc_io.c fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); task 903 drivers/scsi/bnx2fc/bnx2fc_io.c task = &(task_page[index]); task 904 drivers/scsi/bnx2fc/bnx2fc_io.c bnx2fc_init_mp_task(abts_io_req, task); task 937 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task; task 979 drivers/scsi/bnx2fc/bnx2fc_io.c task = &(task_page[index]); task 987 drivers/scsi/bnx2fc/bnx2fc_io.c bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); task 1004 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task; task 1037 drivers/scsi/bnx2fc/bnx2fc_io.c task = &(task_page[index]); task 1042 drivers/scsi/bnx2fc/bnx2fc_io.c bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); task 1285 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task, task 1320 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task, task 1352 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task, task 1398 drivers/scsi/bnx2fc/bnx2fc_io.c r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; task 1521 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task, u8 num_rq) task 1547 drivers/scsi/bnx2fc/bnx2fc_io.c &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; task 1553 drivers/scsi/bnx2fc/bnx2fc_io.c task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; task 1920 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task, task 1950 drivers/scsi/bnx2fc/bnx2fc_io.c &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); task 2035 drivers/scsi/bnx2fc/bnx2fc_io.c struct fcoe_task_ctx_entry *task; task 2084 drivers/scsi/bnx2fc/bnx2fc_io.c task = &(task_page[index]); task 2085 drivers/scsi/bnx2fc/bnx2fc_io.c bnx2fc_init_task(io_req, task); task 201 drivers/scsi/bnx2fc/bnx2fc_tgt.c bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); task 235 drivers/scsi/bnx2fc/bnx2fc_tgt.c bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); task 330 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task) task 336 drivers/scsi/bnx2i/bnx2i_hwi.c login_hdr = (struct iscsi_login_req *)task->hdr; task 348 drivers/scsi/bnx2i/bnx2i_hwi.c login_wqe->itt = task->itt | task 531 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task, task 538 drivers/scsi/bnx2i/bnx2i_hwi.c nopout_hdr = (struct iscsi_nopout *)task->hdr; task 551 drivers/scsi/bnx2i/bnx2i_hwi.c nopout_wqe->itt = ((u16)task->itt | task 589 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task) task 594 drivers/scsi/bnx2i/bnx2i_hwi.c logout_hdr = (struct iscsi_logout *)task->hdr; task 604 drivers/scsi/bnx2i/bnx2i_hwi.c logout_wqe->itt = ((u16)task->itt | task 1343 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1349 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(conn, task 1351 drivers/scsi/bnx2i/bnx2i_hwi.c if (!task) task 1354 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_cmd = task->dd_data; task 1381 drivers/scsi/bnx2i/bnx2i_hwi.c hdr = (struct iscsi_scsi_rsp *)task->hdr; task 1437 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1445 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(conn, task 1447 drivers/scsi/bnx2i/bnx2i_hwi.c if (!task) task 1461 drivers/scsi/bnx2i/bnx2i_hwi.c resp_hdr->itt = task->hdr->itt; task 1505 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1513 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX); task 1514 drivers/scsi/bnx2i/bnx2i_hwi.c if (!task) task 1524 drivers/scsi/bnx2i/bnx2i_hwi.c resp_hdr->itt = task->hdr->itt; task 1526 drivers/scsi/bnx2i/bnx2i_hwi.c resp_hdr->statsn = task->hdr->exp_statsn; task 1566 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1572 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(conn, task 1574 drivers/scsi/bnx2i/bnx2i_hwi.c if (!task) task 1582 drivers/scsi/bnx2i/bnx2i_hwi.c resp_hdr->itt = task->hdr->itt; task 1605 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1611 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(conn, task 1613 drivers/scsi/bnx2i/bnx2i_hwi.c if (!task) task 1622 drivers/scsi/bnx2i/bnx2i_hwi.c resp_hdr->itt = task->hdr->itt; task 1623 drivers/scsi/bnx2i/bnx2i_hwi.c resp_hdr->statsn = task->hdr->exp_statsn; task 1652 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1656 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(conn, task 1658 drivers/scsi/bnx2i/bnx2i_hwi.c if (task) task 1659 drivers/scsi/bnx2i/bnx2i_hwi.c __iscsi_put_task(task); task 1692 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1715 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(conn, task 1717 drivers/scsi/bnx2i/bnx2i_hwi.c if (task) { task 1719 drivers/scsi/bnx2i/bnx2i_hwi.c hdr->itt = task->hdr->itt; task 1832 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1836 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(conn, task 1838 drivers/scsi/bnx2i/bnx2i_hwi.c if (!task) task 1903 drivers/scsi/bnx2i/bnx2i_hwi.c struct iscsi_task *task; task 1908 drivers/scsi/bnx2i/bnx2i_hwi.c task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data, task 1910 drivers/scsi/bnx2i/bnx2i_hwi.c if (!task || !task->sc) { task 1914 drivers/scsi/bnx2i/bnx2i_hwi.c sc = task->sc; task 84 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) task 86 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_cmd *cmd = task->dd_data; task 95 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!iscsi_task_has_unsol_data(task) && !task->imm_count) task 99 drivers/scsi/bnx2i/bnx2i_iscsi.c buffer_offset += task->imm_count; task 100 drivers/scsi/bnx2i/bnx2i_iscsi.c if (task->imm_count == cmd_len) task 103 drivers/scsi/bnx2i/bnx2i_iscsi.c if (iscsi_task_has_unsol_data(task)) { task 108 drivers/scsi/bnx2i/bnx2i_iscsi.c buffer_offset += task->unsol_r2t.data_length; task 114 drivers/scsi/bnx2i/bnx2i_iscsi.c if ((start_bd_offset > task->conn->session->first_burst) || task 118 drivers/scsi/bnx2i/bnx2i_iscsi.c iscsi_conn_printk(KERN_ALERT, task->conn, task 124 drivers/scsi/bnx2i/bnx2i_iscsi.c iscsi_conn_printk(KERN_ALERT, task->conn, task 474 drivers/scsi/bnx2i/bnx2i_iscsi.c struct iscsi_task *task = session->cmds[i]; task 475 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_cmd *cmd = task->dd_data; task 499 drivers/scsi/bnx2i/bnx2i_iscsi.c struct iscsi_task *task = session->cmds[i]; task 500 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_cmd *cmd = task->dd_data; task 502 drivers/scsi/bnx2i/bnx2i_iscsi.c task->hdr = &cmd->hdr; task 503 drivers/scsi/bnx2i/bnx2i_iscsi.c task->hdr_max = sizeof(struct iscsi_hdr); task 1081 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) task 1083 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_cmd *cmd = task->dd_data; task 1090 drivers/scsi/bnx2i/bnx2i_iscsi.c switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { task 1092 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_send_iscsi_login(bnx2i_conn, task); task 1098 drivers/scsi/bnx2i/bnx2i_iscsi.c rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, task 1101 drivers/scsi/bnx2i/bnx2i_iscsi.c rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, task 1105 drivers/scsi/bnx2i/bnx2i_iscsi.c rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); task 1108 drivers/scsi/bnx2i/bnx2i_iscsi.c rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); task 1111 drivers/scsi/bnx2i/bnx2i_iscsi.c rc = bnx2i_send_iscsi_text(bnx2i_conn, task); task 1116 drivers/scsi/bnx2i/bnx2i_iscsi.c task->hdr->opcode); task 1158 drivers/scsi/bnx2i/bnx2i_iscsi.c static void bnx2i_cleanup_task(struct iscsi_task *task) task 1160 drivers/scsi/bnx2i/bnx2i_iscsi.c struct iscsi_conn *conn = task->conn; task 1167 drivers/scsi/bnx2i/bnx2i_iscsi.c if (!task->sc || task->state == ISCSI_TASK_PENDING) task 1172 drivers/scsi/bnx2i/bnx2i_iscsi.c if (task->state == ISCSI_TASK_ABRT_TMF) { task 1173 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_send_cmd_cleanup_req(hba, task->dd_data); task 1182 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_iscsi_unmap_sg_list(task->dd_data); task 1191 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) task 1195 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_cmd *cmd = task->dd_data; task 1200 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_conn->gen_pdu.req_buf_size = task->data_count; task 1204 drivers/scsi/bnx2i/bnx2i_iscsi.c ADD_STATS_64(hba, tx_bytes, task->data_count); task 1206 drivers/scsi/bnx2i/bnx2i_iscsi.c if (task->data_count) { task 1207 drivers/scsi/bnx2i/bnx2i_iscsi.c memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, task 1208 drivers/scsi/bnx2i/bnx2i_iscsi.c task->data_count); task 1210 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_conn->gen_pdu.req_buf + task->data_count; task 1214 drivers/scsi/bnx2i/bnx2i_iscsi.c return bnx2i_iscsi_send_generic_request(task); task 1223 drivers/scsi/bnx2i/bnx2i_iscsi.c static int bnx2i_task_xmit(struct iscsi_task *task) task 1225 drivers/scsi/bnx2i/bnx2i_iscsi.c struct iscsi_conn *conn = task->conn; task 1230 drivers/scsi/bnx2i/bnx2i_iscsi.c struct scsi_cmnd *sc = task->sc; task 1231 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_cmd *cmd = task->dd_data; task 1232 drivers/scsi/bnx2i/bnx2i_iscsi.c struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; task 1242 drivers/scsi/bnx2i/bnx2i_iscsi.c return bnx2i_mtask_xmit(conn, task); task 1257 drivers/scsi/bnx2i/bnx2i_iscsi.c cmd->req.itt = task->itt | task 1259 drivers/scsi/bnx2i/bnx2i_iscsi.c bnx2i_setup_write_cmd_bd_info(task); task 1263 drivers/scsi/bnx2i/bnx2i_iscsi.c cmd->req.itt = task->itt | task 1427 drivers/scsi/cxgbi/libcxgbi.c static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) task 1429 drivers/scsi/cxgbi/libcxgbi.c struct scsi_cmnd *sc = task->sc; task 1430 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; task 1438 drivers/scsi/cxgbi/libcxgbi.c cdev, task, tag); task 1441 drivers/scsi/cxgbi/libcxgbi.c struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); task 1458 drivers/scsi/cxgbi/libcxgbi.c static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) task 1460 drivers/scsi/cxgbi/libcxgbi.c struct scsi_cmnd *sc = task->sc; task 1461 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_conn *conn = task->conn; task 1467 drivers/scsi/cxgbi/libcxgbi.c u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age); task 1472 drivers/scsi/cxgbi/libcxgbi.c struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); task 1483 drivers/scsi/cxgbi/libcxgbi.c cconn->cep->csk, task, tdata->dlen, task 1497 drivers/scsi/cxgbi/libcxgbi.c cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); task 1616 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt); task 1619 drivers/scsi/cxgbi/libcxgbi.c if (task && task->sc) { task 1620 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 1886 drivers/scsi/cxgbi/libcxgbi.c int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) task 1888 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; task 1891 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_conn *conn = task->conn; task 1892 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 1893 drivers/scsi/cxgbi/libcxgbi.c struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); task 1894 drivers/scsi/cxgbi/libcxgbi.c struct scsi_cmnd *sc = task->sc; task 1900 drivers/scsi/cxgbi/libcxgbi.c task->hdr = NULL; task 1919 drivers/scsi/cxgbi/libcxgbi.c if (task->sc) { task 1920 drivers/scsi/cxgbi/libcxgbi.c task->hdr = (struct iscsi_hdr *)tdata->skb->data; task 1922 drivers/scsi/cxgbi/libcxgbi.c task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC); task 1923 drivers/scsi/cxgbi/libcxgbi.c if (!task->hdr) { task 1930 drivers/scsi/cxgbi/libcxgbi.c task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ task 1934 drivers/scsi/cxgbi/libcxgbi.c task_reserve_itt(task, &task->hdr->itt); task 1938 drivers/scsi/cxgbi/libcxgbi.c task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, task 1939 drivers/scsi/cxgbi/libcxgbi.c conn->max_xmit_dlength, ntohl(task->hdr->itt)); task 1959 drivers/scsi/cxgbi/libcxgbi.c int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, task 1962 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_conn *conn = task->conn; task 1963 drivers/scsi/cxgbi/libcxgbi.c struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); task 1971 drivers/scsi/cxgbi/libcxgbi.c task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, task 1972 drivers/scsi/cxgbi/libcxgbi.c ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); task 1974 drivers/scsi/cxgbi/libcxgbi.c skb_put(skb, task->hdr_len); task 1979 drivers/scsi/cxgbi/libcxgbi.c if (task->sc) { task 1980 drivers/scsi/cxgbi/libcxgbi.c struct scsi_data_buffer *sdb = &task->sc->sdb; task 2005 drivers/scsi/cxgbi/libcxgbi.c char *dst = skb->data + task->hdr_len; task 2037 drivers/scsi/cxgbi/libcxgbi.c pg = virt_to_page(task->data); task 2040 drivers/scsi/cxgbi/libcxgbi.c skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), task 2062 drivers/scsi/cxgbi/libcxgbi.c int cxgbi_conn_xmit_pdu(struct iscsi_task *task) task 2064 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; task 2066 drivers/scsi/cxgbi/libcxgbi.c struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); task 2075 drivers/scsi/cxgbi/libcxgbi.c "task 0x%p\n", task); task 2083 drivers/scsi/cxgbi/libcxgbi.c "task 0x%p, csk gone.\n", task); task 2097 drivers/scsi/cxgbi/libcxgbi.c task); task 2101 drivers/scsi/cxgbi/libcxgbi.c if (!task->sc) task 2102 drivers/scsi/cxgbi/libcxgbi.c memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX); task 2110 drivers/scsi/cxgbi/libcxgbi.c task, task->sc, skb, skb->len, skb->data_len, err); task 2112 drivers/scsi/cxgbi/libcxgbi.c if (task->conn->hdrdgst_en) task 2115 drivers/scsi/cxgbi/libcxgbi.c if (datalen && task->conn->datadgst_en) task 2118 drivers/scsi/cxgbi/libcxgbi.c task->conn->txdata_octets += pdulen; task 2125 drivers/scsi/cxgbi/libcxgbi.c task, skb, skb->len, skb->data_len, err); task 2133 drivers/scsi/cxgbi/libcxgbi.c task->itt, skb, skb->len, skb->data_len, err); task 2137 drivers/scsi/cxgbi/libcxgbi.c iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); task 2138 drivers/scsi/cxgbi/libcxgbi.c iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); task 2143 drivers/scsi/cxgbi/libcxgbi.c void cxgbi_cleanup_task(struct iscsi_task *task) task 2145 drivers/scsi/cxgbi/libcxgbi.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 2146 drivers/scsi/cxgbi/libcxgbi.c struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); task 2150 drivers/scsi/cxgbi/libcxgbi.c task, task->sc, tcp_task, task 2157 drivers/scsi/cxgbi/libcxgbi.c task, tdata->skb, task->hdr_itt); task 2161 drivers/scsi/cxgbi/libcxgbi.c if (!task->sc) task 2162 drivers/scsi/cxgbi/libcxgbi.c kfree(task->hdr); task 2163 drivers/scsi/cxgbi/libcxgbi.c task->hdr = NULL; task 2171 drivers/scsi/cxgbi/libcxgbi.c task_release_itt(task, task->hdr_itt); task 2174 drivers/scsi/cxgbi/libcxgbi.c iscsi_tcp_cleanup_task(task); task 537 drivers/scsi/cxgbi/libcxgbi.h #define iscsi_task_cxgbi_data(task) \ task 538 drivers/scsi/cxgbi/libcxgbi.h ((task)->dd_data + sizeof(struct iscsi_tcp_task)) task 587 drivers/scsi/cxgbi/libcxgbi.h void cxgbi_cleanup_task(struct iscsi_task *task); task 696 drivers/scsi/esas2r/esas2r.h u8 task; task 381 drivers/scsi/esas2r/esas2r_flash.c switch (fc->task) { task 388 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_WRTBIOS; task 402 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_READBIOS; task 425 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_WRTMAC; task 437 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_READMAC; task 460 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_WRTEFI; task 473 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_READEFI; task 496 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_WRTCFG; task 507 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_READCFG; task 1453 drivers/scsi/esas2r/esas2r_flash.c fc->task = FMTSK_ERASE_BOOT; task 217 drivers/scsi/hisi_sas/hisi_sas.h struct sas_task *task; task 525 drivers/scsi/hisi_sas/hisi_sas.h struct ssp_command_iu task; task 567 drivers/scsi/hisi_sas/hisi_sas.h extern void hisi_sas_sata_done(struct sas_task *task, task 582 drivers/scsi/hisi_sas/hisi_sas.h struct sas_task *task, task 104 drivers/scsi/hisi_sas/hisi_sas_main.c void hisi_sas_sata_done(struct sas_task *task, task 107 drivers/scsi/hisi_sas/hisi_sas_main.c struct task_status_struct *ts = &task->task_status; task 220 drivers/scsi/hisi_sas/hisi_sas_main.c void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, task 227 drivers/scsi/hisi_sas/hisi_sas_main.c if (task) { task 230 drivers/scsi/hisi_sas/hisi_sas_main.c if (!task->lldd_task) task 233 drivers/scsi/hisi_sas/hisi_sas_main.c task->lldd_task = NULL; task 235 drivers/scsi/hisi_sas/hisi_sas_main.c if (!sas_protocol_ata(task->task_proto)) { task 236 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_ssp_task *ssp_task = &task->ssp_task; task 240 drivers/scsi/hisi_sas/hisi_sas_main.c dma_unmap_sg(dev, task->scatter, task 241 drivers/scsi/hisi_sas/hisi_sas_main.c task->num_scatter, task 242 drivers/scsi/hisi_sas/hisi_sas_main.c task->data_dir); task 246 drivers/scsi/hisi_sas/hisi_sas_main.c task->data_dir); task 287 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_task *task, int n_elem, task 292 drivers/scsi/hisi_sas/hisi_sas_main.c if (!sas_protocol_ata(task->task_proto)) { task 293 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->num_scatter) { task 295 drivers/scsi/hisi_sas/hisi_sas_main.c dma_unmap_sg(dev, task->scatter, task 296 drivers/scsi/hisi_sas/hisi_sas_main.c task->num_scatter, task 297 drivers/scsi/hisi_sas/hisi_sas_main.c task->data_dir); task 298 drivers/scsi/hisi_sas/hisi_sas_main.c } else if (task->task_proto & SAS_PROTOCOL_SMP) { task 300 drivers/scsi/hisi_sas/hisi_sas_main.c dma_unmap_sg(dev, &task->smp_task.smp_req, task 307 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_task *task, int *n_elem, task 313 drivers/scsi/hisi_sas/hisi_sas_main.c if (sas_protocol_ata(task->task_proto)) { task 314 drivers/scsi/hisi_sas/hisi_sas_main.c *n_elem = task->num_scatter; task 318 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->num_scatter) { task 319 drivers/scsi/hisi_sas/hisi_sas_main.c *n_elem = dma_map_sg(dev, task->scatter, task 320 drivers/scsi/hisi_sas/hisi_sas_main.c task->num_scatter, task->data_dir); task 325 drivers/scsi/hisi_sas/hisi_sas_main.c } else if (task->task_proto & SAS_PROTOCOL_SMP) { task 326 drivers/scsi/hisi_sas/hisi_sas_main.c *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, task 332 drivers/scsi/hisi_sas/hisi_sas_main.c req_len = sg_dma_len(&task->smp_task.smp_req); task 350 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_sas_dma_unmap(hisi_hba, task, *n_elem, task 357 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_task *task, int n_elem_dif) task 362 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_ssp_task *ssp_task = &task->ssp_task; task 367 drivers/scsi/hisi_sas/hisi_sas_main.c task->data_dir); task 372 drivers/scsi/hisi_sas/hisi_sas_main.c int *n_elem_dif, struct sas_task *task) task 379 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->num_scatter) { task 380 drivers/scsi/hisi_sas/hisi_sas_main.c ssp_task = &task->ssp_task; task 387 drivers/scsi/hisi_sas/hisi_sas_main.c task->data_dir); task 405 drivers/scsi/hisi_sas/hisi_sas_main.c scsi_prot_sg_count(scsi_cmnd), task->data_dir); task 409 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_task_prep(struct sas_task *task, task 414 drivers/scsi/hisi_sas/hisi_sas_main.c struct domain_device *device = task->dev; task 458 drivers/scsi/hisi_sas/hisi_sas_main.c rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, task 463 drivers/scsi/hisi_sas/hisi_sas_main.c if (!sas_protocol_ata(task->task_proto)) { task 464 drivers/scsi/hisi_sas/hisi_sas_main.c rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); task 474 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->uldd_task) { task 478 drivers/scsi/hisi_sas/hisi_sas_main.c qc = task->uldd_task; task 481 drivers/scsi/hisi_sas/hisi_sas_main.c scsi_cmnd = task->uldd_task; task 511 drivers/scsi/hisi_sas/hisi_sas_main.c slot->task = task; task 515 drivers/scsi/hisi_sas/hisi_sas_main.c task->lldd_task = slot; task 522 drivers/scsi/hisi_sas/hisi_sas_main.c switch (task->task_proto) { task 536 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_proto); task 540 drivers/scsi/hisi_sas/hisi_sas_main.c spin_lock_irqsave(&task->task_state_lock, flags); task 541 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_state_flags |= SAS_TASK_AT_INITIATOR; task 542 drivers/scsi/hisi_sas/hisi_sas_main.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 550 drivers/scsi/hisi_sas/hisi_sas_main.c if (!sas_protocol_ata(task->task_proto)) task 551 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); task 553 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_sas_dma_unmap(hisi_hba, task, n_elem, task 560 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, task 568 drivers/scsi/hisi_sas/hisi_sas_main.c struct domain_device *device = task->dev; task 573 drivers/scsi/hisi_sas/hisi_sas_main.c struct task_status_struct *ts = &task->task_status; task 582 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_done(task); task 604 drivers/scsi/hisi_sas/hisi_sas_main.c rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); task 992 drivers/scsi/hisi_sas/hisi_sas_main.c static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, task 995 drivers/scsi/hisi_sas/hisi_sas_main.c if (task) { task 999 drivers/scsi/hisi_sas/hisi_sas_main.c ts = &task->task_status; task 1003 drivers/scsi/hisi_sas/hisi_sas_main.c spin_lock_irqsave(&task->task_state_lock, flags); task 1004 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_state_flags &= task 1006 drivers/scsi/hisi_sas/hisi_sas_main.c if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) task 1007 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 1008 drivers/scsi/hisi_sas/hisi_sas_main.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1011 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_sas_slot_task_free(hisi_hba, task, slot); task 1021 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_sas_do_release_task(hisi_hba, slot->task, slot); task 1077 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) task 1079 drivers/scsi/hisi_sas/hisi_sas_main.c return hisi_sas_task_exec(task, gfp_flags, 0, NULL); task 1154 drivers/scsi/hisi_sas/hisi_sas_main.c static void hisi_sas_task_done(struct sas_task *task) task 1156 drivers/scsi/hisi_sas/hisi_sas_main.c del_timer(&task->slow_task->timer); task 1157 drivers/scsi/hisi_sas/hisi_sas_main.c complete(&task->slow_task->completion); task 1163 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_task *task = slow->task; task 1167 drivers/scsi/hisi_sas/hisi_sas_main.c spin_lock_irqsave(&task->task_state_lock, flags); task 1168 drivers/scsi/hisi_sas/hisi_sas_main.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task 1169 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_state_flags |= SAS_TASK_STATE_ABORTED; task 1172 drivers/scsi/hisi_sas/hisi_sas_main.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1175 drivers/scsi/hisi_sas/hisi_sas_main.c complete(&task->slow_task->completion); task 1188 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_task *task; task 1192 drivers/scsi/hisi_sas/hisi_sas_main.c task = sas_alloc_slow_task(GFP_KERNEL); task 1193 drivers/scsi/hisi_sas/hisi_sas_main.c if (!task) task 1196 drivers/scsi/hisi_sas/hisi_sas_main.c task->dev = device; task 1197 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_proto = device->tproto; task 1200 drivers/scsi/hisi_sas/hisi_sas_main.c task->ata_task.device_control_reg_update = 1; task 1201 drivers/scsi/hisi_sas/hisi_sas_main.c memcpy(&task->ata_task.fis, parameter, para_len); task 1203 drivers/scsi/hisi_sas/hisi_sas_main.c memcpy(&task->ssp_task, parameter, para_len); task 1205 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_done = hisi_sas_task_done; task 1207 drivers/scsi/hisi_sas/hisi_sas_main.c task->slow_task->timer.function = hisi_sas_tmf_timedout; task 1208 drivers/scsi/hisi_sas/hisi_sas_main.c task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ; task 1209 drivers/scsi/hisi_sas/hisi_sas_main.c add_timer(&task->slow_task->timer); task 1211 drivers/scsi/hisi_sas/hisi_sas_main.c res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); task 1214 drivers/scsi/hisi_sas/hisi_sas_main.c del_timer(&task->slow_task->timer); task 1220 drivers/scsi/hisi_sas/hisi_sas_main.c wait_for_completion(&task->slow_task->completion); task 1223 drivers/scsi/hisi_sas/hisi_sas_main.c if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { task 1224 drivers/scsi/hisi_sas/hisi_sas_main.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task 1225 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_slot *slot = task->lldd_task; task 1236 drivers/scsi/hisi_sas/hisi_sas_main.c slot->task = NULL; task 1244 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 1245 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { task 1250 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 1251 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat == TMF_RESP_FUNC_SUCC) { task 1256 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 1257 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat == SAS_DATA_UNDERRUN) { task 1263 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.resp, task 1264 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat); task 1265 drivers/scsi/hisi_sas/hisi_sas_main.c res = task->task_status.residual; task 1269 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 1270 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat == SAS_DATA_OVERRUN) { task 1276 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 1277 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat == SAS_OPEN_REJECT) { task 1283 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.resp, task 1284 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat); task 1286 drivers/scsi/hisi_sas/hisi_sas_main.c sas_free_task(task); task 1287 drivers/scsi/hisi_sas/hisi_sas_main.c task = NULL; task 1292 drivers/scsi/hisi_sas/hisi_sas_main.c sas_free_task(task); task 1601 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_abort_task(struct sas_task *task) task 1605 drivers/scsi/hisi_sas/hisi_sas_main.c struct domain_device *device = task->dev; task 1615 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_hba = dev_to_hisi_hba(task->dev); task 1618 drivers/scsi/hisi_sas/hisi_sas_main.c spin_lock_irqsave(&task->task_state_lock, flags); task 1619 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->task_state_flags & SAS_TASK_STATE_DONE) { task 1620 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_slot *slot = task->lldd_task; task 1631 drivers/scsi/hisi_sas/hisi_sas_main.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1635 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_state_flags |= SAS_TASK_STATE_ABORTED; task 1636 drivers/scsi/hisi_sas/hisi_sas_main.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1638 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { task 1639 drivers/scsi/hisi_sas/hisi_sas_main.c struct scsi_cmnd *cmnd = task->uldd_task; task 1640 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_slot *slot = task->lldd_task; task 1648 drivers/scsi/hisi_sas/hisi_sas_main.c rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, task 1666 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->lldd_task) task 1667 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_sas_do_release_task(hisi_hba, task, slot); task 1669 drivers/scsi/hisi_sas/hisi_sas_main.c } else if (task->task_proto & SAS_PROTOCOL_SATA || task 1670 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_proto & SAS_PROTOCOL_STP) { task 1671 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->dev->dev_type == SAS_SATA_DEV) { task 1682 drivers/scsi/hisi_sas/hisi_sas_main.c } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { task 1684 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_slot *slot = task->lldd_task; task 1691 drivers/scsi/hisi_sas/hisi_sas_main.c task->lldd_task) { task 1697 drivers/scsi/hisi_sas/hisi_sas_main.c slot->task = NULL; task 1898 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_query_task(struct sas_task *task) task 1904 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { task 1905 drivers/scsi/hisi_sas/hisi_sas_main.c struct scsi_cmnd *cmnd = task->uldd_task; task 1906 drivers/scsi/hisi_sas/hisi_sas_main.c struct domain_device *device = task->dev; task 1907 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_slot *slot = task->lldd_task; task 1934 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_task *task, int abort_flag, task 1937 drivers/scsi/hisi_sas/hisi_sas_main.c struct domain_device *device = task->dev; task 1982 drivers/scsi/hisi_sas/hisi_sas_main.c slot->task = task; task 1985 drivers/scsi/hisi_sas/hisi_sas_main.c task->lldd_task = slot; task 1995 drivers/scsi/hisi_sas/hisi_sas_main.c spin_lock_irqsave(&task->task_state_lock, flags); task 1996 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_state_flags |= SAS_TASK_AT_INITIATOR; task 1997 drivers/scsi/hisi_sas/hisi_sas_main.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 2027 drivers/scsi/hisi_sas/hisi_sas_main.c struct sas_task *task; task 2041 drivers/scsi/hisi_sas/hisi_sas_main.c task = sas_alloc_slow_task(GFP_KERNEL); task 2042 drivers/scsi/hisi_sas/hisi_sas_main.c if (!task) task 2045 drivers/scsi/hisi_sas/hisi_sas_main.c task->dev = device; task 2046 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_proto = device->tproto; task 2047 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_done = hisi_sas_task_done; task 2048 drivers/scsi/hisi_sas/hisi_sas_main.c task->slow_task->timer.function = hisi_sas_tmf_timedout; task 2049 drivers/scsi/hisi_sas/hisi_sas_main.c task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ; task 2050 drivers/scsi/hisi_sas/hisi_sas_main.c add_timer(&task->slow_task->timer); task 2053 drivers/scsi/hisi_sas/hisi_sas_main.c task, abort_flag, tag, dq); task 2055 drivers/scsi/hisi_sas/hisi_sas_main.c del_timer(&task->slow_task->timer); task 2060 drivers/scsi/hisi_sas/hisi_sas_main.c wait_for_completion(&task->slow_task->completion); task 2064 drivers/scsi/hisi_sas/hisi_sas_main.c if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { task 2068 drivers/scsi/hisi_sas/hisi_sas_main.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task 2069 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_slot *slot = task->lldd_task; task 2079 drivers/scsi/hisi_sas/hisi_sas_main.c slot->task = NULL; task 2089 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 2090 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { task 2095 drivers/scsi/hisi_sas/hisi_sas_main.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 2096 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat == TMF_RESP_FUNC_SUCC) { task 2103 drivers/scsi/hisi_sas/hisi_sas_main.c SAS_ADDR(device->sas_addr), task, task 2104 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.resp, /* 0 is complete, -1 is undelivered */ task 2105 drivers/scsi/hisi_sas/hisi_sas_main.c task->task_status.stat); task 2106 drivers/scsi/hisi_sas/hisi_sas_main.c sas_free_task(task); task 914 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct sas_task *task = slot->task; task 916 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct domain_device *device = task->dev; task 924 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c sg_req = &task->smp_task.smp_req; task 952 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct sas_task *task = slot->task; task 954 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct domain_device *device = task->dev; task 957 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct sas_ssp_task *ssp_task = &task->ssp_task; task 1010 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter, task 1013 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); task 1019 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c if (task->ssp_task.enable_first_burst) { task 1025 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c memcpy(buf_cmd, &task->ssp_task.LUN, 8); task 1027 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c buf_cmd[9] = fburst | task->ssp_task.task_attr | task 1028 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c (task->ssp_task.task_prio << 3); task 1029 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, task 1030 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c task->ssp_task.cmd->cmd_len); task 1049 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct sas_task *task, task 1052 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct task_status_struct *ts = &task->task_status; task 1057 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c switch (task->task_proto) { task 1179 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct sas_task *task = slot->task; task 1194 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c if (unlikely(!task || !task->lldd_task || !task->dev)) task 1197 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c ts = &task->task_status; task 1198 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c device = task->dev; task 1201 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c spin_lock_irqsave(&task->task_state_lock, flags); task 1202 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c task->task_state_flags &= task 1204 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 1205 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1259 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c slot_err_v1_hw(hisi_hba, task, slot); task 1265 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c switch (task->task_proto) { task 1273 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c sas_ssp_task_response(dev, task, iu); task 1278 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct scatterlist *sg_resp = &task->smp_task.smp_resp; task 1283 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c dma_unmap_sg(dev, &task->smp_task.smp_req, 1, task 1309 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c hisi_sas_slot_task_free(hisi_hba, task, slot); task 1312 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c if (task->task_done) task 1313 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c task->task_done(task); task 1694 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct sas_task *task = slot->task; task 1696 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct domain_device *device = task->dev; task 1704 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c sg_req = &task->smp_task.smp_req; task 1706 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c req_len = sg_dma_len(&task->smp_task.smp_req); task 1733 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct sas_task *task = slot->task; task 1735 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct domain_device *device = task->dev; task 1738 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct sas_ssp_task *ssp_task = &task->ssp_task; task 1784 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, task 1787 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); task 1794 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c memcpy(buf_cmd, &task->ssp_task.LUN, 8); task 1796 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c buf_cmd[9] = task->ssp_task.task_attr | task 1797 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c (task->ssp_task.task_prio << 3); task 1798 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, task 1799 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c task->ssp_task.cmd->cmd_len); task 2012 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct sas_task *task, task 2016 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct task_status_struct *ts = &task->task_status; task 2044 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c switch (task->task_proto) { task 2308 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c hisi_sas_sata_done(task, slot); task 2319 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct sas_task *task = slot->task; task 2334 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (unlikely(!task || !task->lldd_task || !task->dev)) task 2337 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c ts = &task->task_status; task 2338 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c device = task->dev; task 2342 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c spin_lock_irqsave(&task->task_state_lock, flags); task 2343 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c task->task_state_flags &= task 2345 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 2391 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c slot_err_v2_hw(hisi_hba, task, slot, 1); task 2393 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c slot_err_v2_hw(hisi_hba, task, slot, 2); task 2397 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c slot->idx, task, sas_dev->device_id, task 2408 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c switch (task->task_proto) { task 2416 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c sas_ssp_task_response(dev, task, iu); task 2421 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct scatterlist *sg_resp = &task->smp_task.smp_resp; task 2426 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c dma_unmap_sg(dev, &task->smp_task.smp_req, 1, task 2439 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c hisi_sas_sata_done(task, slot); task 2455 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c spin_lock_irqsave(&task->task_state_lock, flags); task 2456 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { task 2457 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 2458 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c dev_info(dev, "slot complete: task(%pK) aborted\n", task); task 2461 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 2462 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 2463 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c hisi_sas_slot_task_free(hisi_hba, task, slot); task 2465 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { task 2470 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c task); task 2476 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (task->task_done) task 2477 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c task->task_done(task); task 2485 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct sas_task *task = slot->task; task 2486 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct domain_device *device = task->dev; task 2513 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c switch (task->data_dir) { task 2526 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && task 2527 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c (task->ata_task.fis.control & ATA_SRST)) task 2531 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c &task->ata_task.fis, task->data_dir)) task 2537 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (task->ata_task.use_ncq) { task 2538 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct ata_queued_cmd *qc = task->uldd_task; task 2541 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); task 2553 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, task 2556 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); task 2562 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (likely(!task->ata_task.device_control_reg_update)) task 2563 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ task 2565 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); task 2606 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct sas_task *task = slot->task; task 2607 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct domain_device *dev = task->dev; task 1170 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct sas_task *task = slot->task; task 1172 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct domain_device *device = task->dev; task 1175 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct sas_ssp_task *ssp_task = &task->ssp_task; task 1221 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, task 1236 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c memcpy(buf_cmd, &task->ssp_task.LUN, 8); task 1280 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c len = (task->total_xfer_len >> ilog2_interval) * 8; task 1286 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len); task 1292 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct sas_task *task = slot->task; task 1294 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct domain_device *device = task->dev; task 1302 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c sg_req = &task->smp_task.smp_req; task 1332 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct sas_task *task = slot->task; task 1333 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct domain_device *device = task->dev; task 1349 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c switch (task->data_dir) { task 1362 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && task 1363 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c (task->ata_task.fis.control & ATA_SRST)) task 1367 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c &task->ata_task.fis, task->data_dir)) task 1371 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) task 1377 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if (task->ata_task.use_ncq) { task 1378 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct ata_queued_cmd *qc = task->uldd_task; task 1381 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); task 1393 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, task 1396 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); task 1402 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if (likely(!task->ata_task.device_control_reg_update)) task 1403 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ task 1405 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); task 1412 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct sas_task *task = slot->task; task 1413 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct domain_device *dev = task->dev; task 2089 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, task 2092 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct task_status_struct *ts = &task->task_status; task 2103 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c switch (task->task_proto) { task 2129 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c hisi_sas_sata_done(task, slot); task 2142 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct sas_task *task = slot->task; task 2157 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if (unlikely(!task || !task->lldd_task || !task->dev)) task 2160 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c ts = &task->task_status; task 2161 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c device = task->dev; task 2165 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c spin_lock_irqsave(&task->task_state_lock, flags); task 2166 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c task->task_state_flags &= task 2168 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 2212 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c slot_err_v3_hw(hisi_hba, task, slot); task 2215 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c slot->idx, task, sas_dev->device_id, task 2224 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c switch (task->task_proto) { task 2230 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c sas_ssp_task_response(dev, task, iu); task 2234 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct scatterlist *sg_resp = &task->smp_task.smp_resp; task 2239 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c dma_unmap_sg(dev, &task->smp_task.smp_req, 1, task 2251 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c hisi_sas_sata_done(task, slot); task 2266 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c spin_lock_irqsave(&task->task_state_lock, flags); task 2267 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { task 2268 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 2269 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c dev_info(dev, "slot complete: task(%pK) aborted\n", task); task 2272 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 2273 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 2274 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c hisi_sas_slot_task_free(hisi_hba, task, slot); task 2276 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { task 2281 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c task); task 2287 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if (task->task_done) task 2288 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c task->task_done(task); task 1080 drivers/scsi/isci/host.c void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task) task 1083 drivers/scsi/isci/host.c !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { task 1088 drivers/scsi/isci/host.c __func__, ireq, task); task 1089 drivers/scsi/isci/host.c task->lldd_task = NULL; task 1090 drivers/scsi/isci/host.c task->task_done(task); task 1094 drivers/scsi/isci/host.c __func__, ireq, task); task 1095 drivers/scsi/isci/host.c if (sas_protocol_ata(task->task_proto)) task 1096 drivers/scsi/isci/host.c task->lldd_task = NULL; task 1097 drivers/scsi/isci/host.c sas_task_abort(task); task 1100 drivers/scsi/isci/host.c task->lldd_task = NULL; task 478 drivers/scsi/isci/host.h void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task); task 701 drivers/scsi/isci/remote_device.c struct sas_task *task = isci_request_access_task(ireq); task 715 drivers/scsi/isci/remote_device.c if (task->ata_task.use_ncq) task 725 drivers/scsi/isci/remote_device.c struct sas_task *task = isci_request_access_task(ireq); task 727 drivers/scsi/isci/remote_device.c if (task->ata_task.use_ncq) { task 116 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 123 drivers/scsi/isci/request.c if (task->num_scatter > 0) { task 124 drivers/scsi/isci/request.c sg = task->scatter; task 154 drivers/scsi/isci/request.c task->scatter, task 155 drivers/scsi/isci/request.c task->total_xfer_len, task 156 drivers/scsi/isci/request.c task->data_dir); task 160 drivers/scsi/isci/request.c scu_sg->A.length = task->total_xfer_len; task 174 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 178 drivers/scsi/isci/request.c memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); task 183 drivers/scsi/isci/request.c cmd_iu->task_prio = task->ssp_task.task_prio; task 184 drivers/scsi/isci/request.c cmd_iu->task_attr = task->ssp_task.task_attr; task 187 drivers/scsi/isci/request.c sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, task 188 drivers/scsi/isci/request.c (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); task 194 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 201 drivers/scsi/isci/request.c memcpy(task_iu->LUN, task->ssp_task.LUN, 8); task 644 drivers/scsi/isci/request.c struct sas_task *task; task 655 drivers/scsi/isci/request.c task = isci_request_access_task(ireq); task 656 drivers/scsi/isci/request.c if (task->data_dir == DMA_NONE) task 657 drivers/scsi/isci/request.c task->total_xfer_len = 0; task 672 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 687 drivers/scsi/isci/request.c if (!sas_protocol_ata(task->task_proto)) { task 691 drivers/scsi/isci/request.c task->task_proto); task 698 drivers/scsi/isci/request.c task->ata_task.fis.command == ATA_CMD_PACKET) { task 704 drivers/scsi/isci/request.c if (task->data_dir == DMA_NONE) { task 710 drivers/scsi/isci/request.c if (task->ata_task.use_ncq) { task 718 drivers/scsi/isci/request.c if (task->ata_task.dma_xfer) { task 731 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 736 drivers/scsi/isci/request.c task->data_dir, task 737 drivers/scsi/isci/request.c task->total_xfer_len); task 764 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 768 drivers/scsi/isci/request.c copy = (task->data_dir == DMA_NONE) ? false : true; task 771 drivers/scsi/isci/request.c task->total_xfer_len, task 772 drivers/scsi/isci/request.c task->data_dir, task 1414 drivers/scsi/isci/request.c struct sas_task *task; task 1420 drivers/scsi/isci/request.c task = isci_request_access_task(ireq); task 1423 drivers/scsi/isci/request.c if (task->num_scatter > 0) { task 1424 drivers/scsi/isci/request.c sg = task->scatter; task 1438 drivers/scsi/isci/request.c BUG_ON(task->total_xfer_len < total_len); task 1439 drivers/scsi/isci/request.c memcpy(task->scatter, src_addr, total_len); task 1622 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 1643 drivers/scsi/isci/request.c if (task->data_dir == DMA_NONE) task 1669 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 1676 drivers/scsi/isci/request.c if (task->data_dir == DMA_TO_DEVICE) { task 1689 drivers/scsi/isci/request.c memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); task 1699 drivers/scsi/isci/request.c task_context->transfer_length_bytes = task->total_xfer_len; task 1772 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 1773 drivers/scsi/isci/request.c struct scatterlist *sg = &task->smp_task.smp_resp; task 1895 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 1939 drivers/scsi/isci/request.c if (task->data_dir == DMA_FROM_DEVICE) { task 1941 drivers/scsi/isci/request.c } else if (task->data_dir == DMA_TO_DEVICE) { task 2065 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 2069 drivers/scsi/isci/request.c if (task->data_dir == DMA_NONE) { task 2468 drivers/scsi/isci/request.c struct sas_task *task, task 2484 drivers/scsi/isci/request.c task->task_status.stat = resp_iu->status; task 2487 drivers/scsi/isci/request.c sas_ssp_task_response(dev, task, resp_iu); task 2503 drivers/scsi/isci/request.c struct sas_task *task, task 2512 drivers/scsi/isci/request.c task->task_status.open_rej_reason = open_rej_reason; task 2527 drivers/scsi/isci/request.c struct sas_task *task, task 2556 drivers/scsi/isci/request.c if (task->task_proto == SAS_PROTOCOL_SMP) { task 2617 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2627 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2634 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2641 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2648 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2655 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2662 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2669 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2676 drivers/scsi/isci/request.c request, task, response_ptr, status_ptr, task 2709 drivers/scsi/isci/request.c if (task->task_proto == SAS_PROTOCOL_SMP) task 2717 drivers/scsi/isci/request.c static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) task 2719 drivers/scsi/isci/request.c struct task_status_struct *ts = &task->task_status; task 2739 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(request); task 2749 drivers/scsi/isci/request.c __func__, request, task, task->data_dir, completion_status); task 2759 drivers/scsi/isci/request.c __func__, request, task); task 2761 drivers/scsi/isci/request.c if (sas_protocol_ata(task->task_proto)) { task 2762 drivers/scsi/isci/request.c isci_process_stp_response(task, &request->stp.rsp); task 2763 drivers/scsi/isci/request.c } else if (SAS_PROTOCOL_SSP == task->task_proto) { task 2767 drivers/scsi/isci/request.c isci_request_process_response_iu(task, resp_iu, task 2770 drivers/scsi/isci/request.c } else if (SAS_PROTOCOL_SMP == task->task_proto) { task 2785 drivers/scsi/isci/request.c response = task->task_status.resp; task 2786 drivers/scsi/isci/request.c status = task->task_status.stat; task 2804 drivers/scsi/isci/request.c task->task_status.residual task 2805 drivers/scsi/isci/request.c = task->total_xfer_len - transferred_length; task 2810 drivers/scsi/isci/request.c if (task->task_status.residual != 0) task 2826 drivers/scsi/isci/request.c __func__, request, task); task 2845 drivers/scsi/isci/request.c task, &response, task 2857 drivers/scsi/isci/request.c spin_lock_irqsave(&task->task_state_lock, task_flags); task 2858 drivers/scsi/isci/request.c task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; task 2859 drivers/scsi/isci/request.c spin_unlock_irqrestore(&task->task_state_lock, task_flags); task 2899 drivers/scsi/isci/request.c if (SAS_PROTOCOL_SMP == task->task_proto) task 2906 drivers/scsi/isci/request.c switch (task->task_proto) { task 2908 drivers/scsi/isci/request.c if (task->data_dir == DMA_NONE) task 2910 drivers/scsi/isci/request.c if (task->num_scatter == 0) task 2914 drivers/scsi/isci/request.c task->total_xfer_len, task->data_dir); task 2916 drivers/scsi/isci/request.c dma_unmap_sg(&ihost->pdev->dev, task->scatter, task 2917 drivers/scsi/isci/request.c request->num_sg_entries, task->data_dir); task 2920 drivers/scsi/isci/request.c struct scatterlist *sg = &task->smp_task.smp_req; task 2937 drivers/scsi/isci/request.c spin_lock_irqsave(&task->task_state_lock, task_flags); task 2939 drivers/scsi/isci/request.c task->task_status.resp = response; task 2940 drivers/scsi/isci/request.c task->task_status.stat = status; task 2944 drivers/scsi/isci/request.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 2945 drivers/scsi/isci/request.c task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | task 2948 drivers/scsi/isci/request.c spin_unlock_irqrestore(&task->task_state_lock, task_flags); task 2959 drivers/scsi/isci/request.c ireq_done(ihost, request, task); task 2967 drivers/scsi/isci/request.c struct sas_task *task; task 2972 drivers/scsi/isci/request.c task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); task 2977 drivers/scsi/isci/request.c if (!task && dev->dev_type == SAS_END_DEVICE) { task 2979 drivers/scsi/isci/request.c } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { task 2981 drivers/scsi/isci/request.c } else if (task && sas_protocol_ata(task->task_proto) && task 2982 drivers/scsi/isci/request.c !task->ata_task.use_ncq) { task 2984 drivers/scsi/isci/request.c task->ata_task.fis.command == ATA_CMD_PACKET) { task 2986 drivers/scsi/isci/request.c } else if (task->data_dir == DMA_NONE) { task 2988 drivers/scsi/isci/request.c } else if (task->ata_task.dma_xfer) { task 3154 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 3156 drivers/scsi/isci/request.c struct ata_queued_cmd *qc = task->uldd_task; task 3164 drivers/scsi/isci/request.c memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); task 3165 drivers/scsi/isci/request.c if (!task->ata_task.device_control_reg_update) task 3186 drivers/scsi/isci/request.c struct sas_task *task) task 3188 drivers/scsi/isci/request.c struct scatterlist *sg = &task->smp_task.smp_req; task 3316 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(ireq); task 3320 drivers/scsi/isci/request.c status = sci_io_request_construct_smp(dev, ireq, task); task 3345 drivers/scsi/isci/request.c struct sas_task *task = isci_request_access_task(request); task 3353 drivers/scsi/isci/request.c task->num_scatter); task 3359 drivers/scsi/isci/request.c if (task->num_scatter && task 3360 drivers/scsi/isci/request.c !sas_protocol_ata(task->task_proto) && task 3361 drivers/scsi/isci/request.c !(SAS_PROTOCOL_SMP & task->task_proto)) { task 3365 drivers/scsi/isci/request.c task->scatter, task 3366 drivers/scsi/isci/request.c task->num_scatter, task 3367 drivers/scsi/isci/request.c task->data_dir task 3383 drivers/scsi/isci/request.c switch (task->task_proto) { task 3418 drivers/scsi/isci/request.c struct sas_task *task, task 3424 drivers/scsi/isci/request.c ireq->ttype_ptr.io_task_ptr = task; task 3426 drivers/scsi/isci/request.c task->lldd_task = ireq; task 3445 drivers/scsi/isci/request.c struct sas_task *task, u16 tag) task 3453 drivers/scsi/isci/request.c ireq = isci_io_request_from_tag(ihost, task, tag); task 3468 drivers/scsi/isci/request.c if (isci_task_is_ncq_recovery(task)) { task 3514 drivers/scsi/isci/request.c spin_lock_irqsave(&task->task_state_lock, flags); task 3515 drivers/scsi/isci/request.c task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; task 3516 drivers/scsi/isci/request.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 3521 drivers/scsi/isci/request.c sas_task_abort(task); task 294 drivers/scsi/isci/request.h struct sas_task *task, u16 tag); task 303 drivers/scsi/isci/request.h static inline int isci_task_is_ncq_recovery(struct sas_task *task) task 305 drivers/scsi/isci/request.h return (sas_protocol_ata(task->task_proto) && task 306 drivers/scsi/isci/request.h task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT && task 307 drivers/scsi/isci/request.h task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); task 76 drivers/scsi/isci/task.c static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, task 85 drivers/scsi/isci/task.c __func__, task, response, status); task 87 drivers/scsi/isci/task.c spin_lock_irqsave(&task->task_state_lock, flags); task 89 drivers/scsi/isci/task.c task->task_status.resp = response; task 90 drivers/scsi/isci/task.c task->task_status.stat = status; task 93 drivers/scsi/isci/task.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 94 drivers/scsi/isci/task.c task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | task 96 drivers/scsi/isci/task.c task->lldd_task = NULL; task 97 drivers/scsi/isci/task.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 99 drivers/scsi/isci/task.c task->task_done(task); task 102 drivers/scsi/isci/task.c #define for_each_sas_task(num, task) \ task 104 drivers/scsi/isci/task.c task = list_entry(task->list.next, struct sas_task, list)) task 108 drivers/scsi/isci/task.c struct sas_task *task) task 112 drivers/scsi/isci/task.c isci_task_is_ncq_recovery(task)) task 124 drivers/scsi/isci/task.c int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags) task 126 drivers/scsi/isci/task.c struct isci_host *ihost = dev_to_ihost(task->dev); task 134 drivers/scsi/isci/task.c idev = isci_lookup_device(task->dev); task 135 drivers/scsi/isci/task.c io_ready = isci_device_io_ready(idev, task); task 141 drivers/scsi/isci/task.c task, task->dev, idev, idev ? idev->flags : 0, task 142 drivers/scsi/isci/task.c task->uldd_task); task 145 drivers/scsi/isci/task.c isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, task 151 drivers/scsi/isci/task.c isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, task 155 drivers/scsi/isci/task.c spin_lock_irqsave(&task->task_state_lock, flags); task 157 drivers/scsi/isci/task.c if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { task 159 drivers/scsi/isci/task.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 161 drivers/scsi/isci/task.c isci_task_refuse(ihost, task, task 165 drivers/scsi/isci/task.c task->task_state_flags |= SAS_TASK_AT_INITIATOR; task 166 drivers/scsi/isci/task.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 169 drivers/scsi/isci/task.c status = isci_request_execute(ihost, idev, task, tag); task 172 drivers/scsi/isci/task.c spin_lock_irqsave(&task->task_state_lock, flags); task 174 drivers/scsi/isci/task.c task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task 175 drivers/scsi/isci/task.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 181 drivers/scsi/isci/task.c isci_task_refuse(ihost, task, task 193 drivers/scsi/isci/task.c isci_task_refuse(ihost, task, task 478 drivers/scsi/isci/task.c int isci_task_abort_task(struct sas_task *task) task 480 drivers/scsi/isci/task.c struct isci_host *ihost = dev_to_ihost(task->dev); task 495 drivers/scsi/isci/task.c spin_lock(&task->task_state_lock); task 497 drivers/scsi/isci/task.c old_request = task->lldd_task; task 500 drivers/scsi/isci/task.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && task 501 drivers/scsi/isci/task.c (task->task_state_flags & SAS_TASK_AT_INITIATOR) && task 503 drivers/scsi/isci/task.c idev = isci_get_device(task->dev->lldd_dev); task 507 drivers/scsi/isci/task.c spin_unlock(&task->task_state_lock); task 513 drivers/scsi/isci/task.c (dev_is_sata(task->dev) ? "STP/SATA" task 514 drivers/scsi/isci/task.c : ((dev_is_expander(task->dev->dev_type)) task 521 drivers/scsi/isci/task.c task, old_request); task 533 drivers/scsi/isci/task.c spin_lock_irqsave(&task->task_state_lock, flags); task 534 drivers/scsi/isci/task.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 535 drivers/scsi/isci/task.c task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | task 537 drivers/scsi/isci/task.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 543 drivers/scsi/isci/task.c __func__, task); task 552 drivers/scsi/isci/task.c __func__, idev, old_request, task); task 558 drivers/scsi/isci/task.c if (task->task_proto == SAS_PROTOCOL_SMP || task 559 drivers/scsi/isci/task.c sas_protocol_ata(task->task_proto) || task 573 drivers/scsi/isci/task.c ((task->task_proto == SAS_PROTOCOL_SMP) task 575 drivers/scsi/isci/task.c : (sas_protocol_ata(task->task_proto) task 583 drivers/scsi/isci/task.c spin_lock_irqsave(&task->task_state_lock, flags); task 584 drivers/scsi/isci/task.c task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | task 586 drivers/scsi/isci/task.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 587 drivers/scsi/isci/task.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 605 drivers/scsi/isci/task.c __func__, idev, task, old_request); task 676 drivers/scsi/isci/task.c struct sas_task *task) task 679 drivers/scsi/isci/task.c if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) task 133 drivers/scsi/isci/task.h struct sas_task *task, task 137 drivers/scsi/isci/task.h struct sas_task *task); task 152 drivers/scsi/isci/task.h struct sas_task *task); task 368 drivers/scsi/iscsi_tcp.c static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) task 370 drivers/scsi/iscsi_tcp.c struct iscsi_conn *conn = task->conn; task 506 drivers/scsi/iscsi_tcp.c static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task, task 509 drivers/scsi/iscsi_tcp.c struct iscsi_conn *conn = task->conn; task 512 drivers/scsi/iscsi_tcp.c iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len); task 517 drivers/scsi/iscsi_tcp.c if (!task->sc) task 518 drivers/scsi/iscsi_tcp.c iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count); task 520 drivers/scsi/iscsi_tcp.c struct scsi_data_buffer *sdb = &task->sc->sdb; task 534 drivers/scsi/iscsi_tcp.c static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode) task 536 drivers/scsi/iscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 538 drivers/scsi/iscsi_tcp.c task->hdr = task->dd_data + sizeof(*tcp_task); task 539 drivers/scsi/iscsi_tcp.c task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE; task 134 drivers/scsi/libiscsi.c void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t, task 137 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 140 drivers/scsi/libiscsi.c task->hdr_len = sizeof(struct iscsi_data); task 147 drivers/scsi/libiscsi.c hdr->lun = task->lun; task 148 drivers/scsi/libiscsi.c hdr->itt = task->hdr_itt; task 164 drivers/scsi/libiscsi.c static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) task 166 drivers/scsi/libiscsi.c unsigned exp_len = task->hdr_len + len; task 168 drivers/scsi/libiscsi.c if (exp_len > task->hdr_max) { task 174 drivers/scsi/libiscsi.c task->hdr_len = exp_len; task 181 drivers/scsi/libiscsi.c static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) task 183 drivers/scsi/libiscsi.c struct scsi_cmnd *cmd = task->sc; task 189 drivers/scsi/libiscsi.c ecdb_ahdr = iscsi_next_hdr(task); task 197 drivers/scsi/libiscsi.c rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + task 210 drivers/scsi/libiscsi.c ISCSI_DBG_SESSION(task->conn->session, task 214 drivers/scsi/libiscsi.c task->hdr_len); task 231 drivers/scsi/libiscsi.c static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) task 233 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 249 drivers/scsi/libiscsi.c if (hdr_lun != task->sc->device->lun) task 261 drivers/scsi/libiscsi.c opcode, task->itt, task 262 drivers/scsi/libiscsi.c task->hdr_itt); task 273 drivers/scsi/libiscsi.c opcode, task->itt, task 274 drivers/scsi/libiscsi.c task->hdr_itt); task 286 drivers/scsi/libiscsi.c task->hdr_itt == tmf->rtt) { task 290 drivers/scsi/libiscsi.c "progress\n", task->itt, task 291 drivers/scsi/libiscsi.c task->hdr_itt); task 307 drivers/scsi/libiscsi.c static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) task 309 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 311 drivers/scsi/libiscsi.c struct scsi_cmnd *sc = task->sc; task 317 drivers/scsi/libiscsi.c rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD); task 322 drivers/scsi/libiscsi.c rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); task 326 drivers/scsi/libiscsi.c hdr = (struct iscsi_scsi_req *)task->hdr; task 331 drivers/scsi/libiscsi.c hdr->itt = task->hdr_itt = itt; task 333 drivers/scsi/libiscsi.c hdr->itt = task->hdr_itt = build_itt(task->itt, task 334 drivers/scsi/libiscsi.c task->conn->session->age); task 335 drivers/scsi/libiscsi.c task->hdr_len = 0; task 336 drivers/scsi/libiscsi.c rc = iscsi_add_hdr(task, sizeof(*hdr)); task 342 drivers/scsi/libiscsi.c task->lun = hdr->lun; task 348 drivers/scsi/libiscsi.c rc = iscsi_prep_ecdb_ahs(task); task 355 drivers/scsi/libiscsi.c task->imm_count = 0; task 357 drivers/scsi/libiscsi.c task->protected = true; task 362 drivers/scsi/libiscsi.c struct iscsi_r2t_info *r2t = &task->unsol_r2t; task 383 drivers/scsi/libiscsi.c task->imm_count = min(session->first_burst, task 386 drivers/scsi/libiscsi.c task->imm_count = min(transfer_length, task 388 drivers/scsi/libiscsi.c hton24(hdr->dlength, task->imm_count); task 395 drivers/scsi/libiscsi.c task->imm_count; task 396 drivers/scsi/libiscsi.c r2t->data_offset = task->imm_count; task 401 drivers/scsi/libiscsi.c if (!task->unsol_r2t.data_length) task 413 drivers/scsi/libiscsi.c hdrlength = task->hdr_len - sizeof(*hdr); task 420 drivers/scsi/libiscsi.c hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); task 422 drivers/scsi/libiscsi.c if (session->tt->init_task && session->tt->init_task(task)) task 425 drivers/scsi/libiscsi.c task->state = ISCSI_TASK_RUNNING; task 433 drivers/scsi/libiscsi.c task->itt, transfer_length, task 447 drivers/scsi/libiscsi.c static void iscsi_free_task(struct iscsi_task *task) task 449 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 451 drivers/scsi/libiscsi.c struct scsi_cmnd *sc = task->sc; task 452 drivers/scsi/libiscsi.c int oldstate = task->state; task 455 drivers/scsi/libiscsi.c task->itt, task->state, task->sc); task 457 drivers/scsi/libiscsi.c session->tt->cleanup_task(task); task 458 drivers/scsi/libiscsi.c task->state = ISCSI_TASK_FREE; task 459 drivers/scsi/libiscsi.c task->sc = NULL; task 463 drivers/scsi/libiscsi.c if (conn->login_task == task) task 466 drivers/scsi/libiscsi.c kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); task 480 drivers/scsi/libiscsi.c void __iscsi_get_task(struct iscsi_task *task) task 482 drivers/scsi/libiscsi.c refcount_inc(&task->refcount); task 486 drivers/scsi/libiscsi.c void __iscsi_put_task(struct iscsi_task *task) task 488 drivers/scsi/libiscsi.c if (refcount_dec_and_test(&task->refcount)) task 489 drivers/scsi/libiscsi.c iscsi_free_task(task); task 493 drivers/scsi/libiscsi.c void iscsi_put_task(struct iscsi_task *task) task 495 drivers/scsi/libiscsi.c struct iscsi_session *session = task->conn->session; task 499 drivers/scsi/libiscsi.c __iscsi_put_task(task); task 511 drivers/scsi/libiscsi.c static void iscsi_complete_task(struct iscsi_task *task, int state) task 513 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 517 drivers/scsi/libiscsi.c task->itt, task->state, task->sc); task 518 drivers/scsi/libiscsi.c if (task->state == ISCSI_TASK_COMPLETED || task 519 drivers/scsi/libiscsi.c task->state == ISCSI_TASK_ABRT_TMF || task 520 drivers/scsi/libiscsi.c task->state == ISCSI_TASK_ABRT_SESS_RECOV || task 521 drivers/scsi/libiscsi.c task->state == ISCSI_TASK_REQUEUE_SCSIQ) task 523 drivers/scsi/libiscsi.c WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); task 524 drivers/scsi/libiscsi.c task->state = state; task 527 drivers/scsi/libiscsi.c if (!list_empty(&task->running)) { task 529 drivers/scsi/libiscsi.c list_del_init(&task->running); task 533 drivers/scsi/libiscsi.c if (conn->task == task) task 534 drivers/scsi/libiscsi.c conn->task = NULL; task 536 drivers/scsi/libiscsi.c if (conn->ping_task == task) task 540 drivers/scsi/libiscsi.c __iscsi_put_task(task); task 554 drivers/scsi/libiscsi.c void iscsi_complete_scsi_task(struct iscsi_task *task, task 557 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 559 drivers/scsi/libiscsi.c ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt); task 563 drivers/scsi/libiscsi.c iscsi_complete_task(task, ISCSI_TASK_COMPLETED); task 573 drivers/scsi/libiscsi.c static void fail_scsi_task(struct iscsi_task *task, int err) task 575 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 584 drivers/scsi/libiscsi.c sc = task->sc; task 588 drivers/scsi/libiscsi.c if (task->state == ISCSI_TASK_PENDING) { task 606 drivers/scsi/libiscsi.c iscsi_complete_task(task, state); task 611 drivers/scsi/libiscsi.c struct iscsi_task *task) task 614 drivers/scsi/libiscsi.c struct iscsi_hdr *hdr = task->hdr; task 643 drivers/scsi/libiscsi.c if (session->tt->init_task && session->tt->init_task(task)) task 649 drivers/scsi/libiscsi.c task->state = ISCSI_TASK_RUNNING; task 652 drivers/scsi/libiscsi.c hdr->itt, task->data_count); task 663 drivers/scsi/libiscsi.c struct iscsi_task *task; task 687 drivers/scsi/libiscsi.c task = conn->login_task; task 701 drivers/scsi/libiscsi.c (void*)&task, sizeof(void*))) task 709 drivers/scsi/libiscsi.c refcount_set(&task->refcount, 1); task 710 drivers/scsi/libiscsi.c task->conn = conn; task 711 drivers/scsi/libiscsi.c task->sc = NULL; task 712 drivers/scsi/libiscsi.c INIT_LIST_HEAD(&task->running); task 713 drivers/scsi/libiscsi.c task->state = ISCSI_TASK_PENDING; task 716 drivers/scsi/libiscsi.c memcpy(task->data, data, data_size); task 717 drivers/scsi/libiscsi.c task->data_count = data_size; task 719 drivers/scsi/libiscsi.c task->data_count = 0; task 722 drivers/scsi/libiscsi.c if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { task 729 drivers/scsi/libiscsi.c itt = task->hdr->itt; task 730 drivers/scsi/libiscsi.c task->hdr_len = sizeof(struct iscsi_hdr); task 731 drivers/scsi/libiscsi.c memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); task 735 drivers/scsi/libiscsi.c task->hdr->itt = itt; task 737 drivers/scsi/libiscsi.c task->hdr->itt = build_itt(task->itt, task 738 drivers/scsi/libiscsi.c task->conn->session->age); task 742 drivers/scsi/libiscsi.c if (iscsi_prep_mgmt_task(conn, task)) task 745 drivers/scsi/libiscsi.c if (session->tt->xmit_task(task)) task 749 drivers/scsi/libiscsi.c list_add_tail(&task->running, &conn->mgmtqueue); task 754 drivers/scsi/libiscsi.c return task; task 759 drivers/scsi/libiscsi.c __iscsi_put_task(task); task 791 drivers/scsi/libiscsi.c struct iscsi_task *task, char *data, task 796 drivers/scsi/libiscsi.c struct scsi_cmnd *sc = task->sc; task 803 drivers/scsi/libiscsi.c if (task->protected) { task 814 drivers/scsi/libiscsi.c ascq = session->tt->check_protection(task, §or); task 874 drivers/scsi/libiscsi.c sc, sc->result, task->itt); task 876 drivers/scsi/libiscsi.c iscsi_complete_task(task, ISCSI_TASK_COMPLETED); task 890 drivers/scsi/libiscsi.c struct iscsi_task *task) task 893 drivers/scsi/libiscsi.c struct scsi_cmnd *sc = task->sc; task 915 drivers/scsi/libiscsi.c sc, sc->result, task->itt); task 917 drivers/scsi/libiscsi.c iscsi_complete_task(task, ISCSI_TASK_COMPLETED); task 942 drivers/scsi/libiscsi.c struct iscsi_task *task; task 958 drivers/scsi/libiscsi.c task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); task 959 drivers/scsi/libiscsi.c if (!task) { task 964 drivers/scsi/libiscsi.c conn->ping_task = task; task 981 drivers/scsi/libiscsi.c static int iscsi_nop_out_rsp(struct iscsi_task *task, task 984 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 987 drivers/scsi/libiscsi.c if (conn->ping_task != task) { task 997 drivers/scsi/libiscsi.c iscsi_complete_task(task, ISCSI_TASK_COMPLETED); task 1057 drivers/scsi/libiscsi.c struct iscsi_task *task; task 1062 drivers/scsi/libiscsi.c task = iscsi_itt_to_task(conn, rejected_pdu.itt); task 1063 drivers/scsi/libiscsi.c if (!task) { task 1069 drivers/scsi/libiscsi.c rc = iscsi_nop_out_rsp(task, task 1129 drivers/scsi/libiscsi.c struct iscsi_task *task; task 1183 drivers/scsi/libiscsi.c task = iscsi_itt_to_ctask(conn, hdr->itt); task 1184 drivers/scsi/libiscsi.c if (!task) task 1186 drivers/scsi/libiscsi.c task->last_xfer = jiffies; task 1198 drivers/scsi/libiscsi.c task = iscsi_itt_to_task(conn, hdr->itt); task 1199 drivers/scsi/libiscsi.c if (!task) task 1208 drivers/scsi/libiscsi.c iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); task 1211 drivers/scsi/libiscsi.c iscsi_data_in_rsp(conn, hdr, task); task 1237 drivers/scsi/libiscsi.c iscsi_complete_task(task, ISCSI_TASK_COMPLETED); task 1247 drivers/scsi/libiscsi.c rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr, task 1260 drivers/scsi/libiscsi.c iscsi_complete_task(task, ISCSI_TASK_COMPLETED); task 1320 drivers/scsi/libiscsi.c struct iscsi_task *task; task 1325 drivers/scsi/libiscsi.c task = iscsi_itt_to_task(conn, itt); task 1326 drivers/scsi/libiscsi.c if (!task || !task->sc) task 1329 drivers/scsi/libiscsi.c if (task->sc->SCp.phase != conn->session->age) { task 1332 drivers/scsi/libiscsi.c task->sc->SCp.phase, conn->session->age); task 1336 drivers/scsi/libiscsi.c return task; task 1409 drivers/scsi/libiscsi.c struct iscsi_task *task = conn->task; task 1416 drivers/scsi/libiscsi.c if (conn->task == NULL) { task 1420 drivers/scsi/libiscsi.c __iscsi_get_task(task); task 1423 drivers/scsi/libiscsi.c rc = conn->session->tt->xmit_task(task); task 1427 drivers/scsi/libiscsi.c task->last_xfer = jiffies; task 1428 drivers/scsi/libiscsi.c conn->task = NULL; task 1432 drivers/scsi/libiscsi.c __iscsi_put_task(task); task 1445 drivers/scsi/libiscsi.c void iscsi_requeue_task(struct iscsi_task *task) task 1447 drivers/scsi/libiscsi.c struct iscsi_conn *conn = task->conn; task 1454 drivers/scsi/libiscsi.c if (list_empty(&task->running)) task 1455 drivers/scsi/libiscsi.c list_add_tail(&task->running, &conn->requeue); task 1472 drivers/scsi/libiscsi.c struct iscsi_task *task; task 1482 drivers/scsi/libiscsi.c if (conn->task) { task 1496 drivers/scsi/libiscsi.c conn->task = list_entry(conn->mgmtqueue.next, task 1498 drivers/scsi/libiscsi.c list_del_init(&conn->task->running); task 1500 drivers/scsi/libiscsi.c if (iscsi_prep_mgmt_task(conn, conn->task)) { task 1503 drivers/scsi/libiscsi.c __iscsi_put_task(conn->task); task 1505 drivers/scsi/libiscsi.c conn->task = NULL; task 1517 drivers/scsi/libiscsi.c conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, task 1519 drivers/scsi/libiscsi.c list_del_init(&conn->task->running); task 1522 drivers/scsi/libiscsi.c fail_scsi_task(conn->task, DID_IMM_RETRY); task 1526 drivers/scsi/libiscsi.c rc = iscsi_prep_scsi_cmd_pdu(conn->task); task 1530 drivers/scsi/libiscsi.c list_add_tail(&conn->task->running, task 1532 drivers/scsi/libiscsi.c conn->task = NULL; task 1536 drivers/scsi/libiscsi.c fail_scsi_task(conn->task, DID_ABORT); task 1560 drivers/scsi/libiscsi.c task = list_entry(conn->requeue.next, struct iscsi_task, task 1562 drivers/scsi/libiscsi.c if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) task 1565 drivers/scsi/libiscsi.c conn->task = task; task 1566 drivers/scsi/libiscsi.c list_del_init(&conn->task->running); task 1567 drivers/scsi/libiscsi.c conn->task->state = ISCSI_TASK_RUNNING; task 1601 drivers/scsi/libiscsi.c struct iscsi_task *task; task 1604 drivers/scsi/libiscsi.c (void *) &task, sizeof(void *))) task 1608 drivers/scsi/libiscsi.c sc->SCp.ptr = (char *) task; task 1610 drivers/scsi/libiscsi.c refcount_set(&task->refcount, 1); task 1611 drivers/scsi/libiscsi.c task->state = ISCSI_TASK_PENDING; task 1612 drivers/scsi/libiscsi.c task->conn = conn; task 1613 drivers/scsi/libiscsi.c task->sc = sc; task 1614 drivers/scsi/libiscsi.c task->have_checked_conn = false; task 1615 drivers/scsi/libiscsi.c task->last_timeout = jiffies; task 1616 drivers/scsi/libiscsi.c task->last_xfer = jiffies; task 1617 drivers/scsi/libiscsi.c task->protected = false; task 1618 drivers/scsi/libiscsi.c INIT_LIST_HEAD(&task->running); task 1619 drivers/scsi/libiscsi.c return task; task 1642 drivers/scsi/libiscsi.c struct iscsi_task *task = NULL; task 1719 drivers/scsi/libiscsi.c task = iscsi_alloc_task(conn, sc); task 1720 drivers/scsi/libiscsi.c if (!task) { task 1726 drivers/scsi/libiscsi.c reason = iscsi_prep_scsi_cmd_pdu(task); task 1736 drivers/scsi/libiscsi.c if (session->tt->xmit_task(task)) { task 1743 drivers/scsi/libiscsi.c list_add_tail(&task->running, &conn->cmdqueue); task 1754 drivers/scsi/libiscsi.c iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); task 1764 drivers/scsi/libiscsi.c iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); task 1807 drivers/scsi/libiscsi.c struct iscsi_task *task; task 1809 drivers/scsi/libiscsi.c task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, task 1811 drivers/scsi/libiscsi.c if (!task) { task 1856 drivers/scsi/libiscsi.c struct iscsi_task *task; task 1860 drivers/scsi/libiscsi.c task = conn->session->cmds[i]; task 1861 drivers/scsi/libiscsi.c if (!task->sc || task->state == ISCSI_TASK_FREE) task 1864 drivers/scsi/libiscsi.c if (lun != -1 && lun != task->sc->device->lun) task 1869 drivers/scsi/libiscsi.c task->sc, task->itt, task->state); task 1870 drivers/scsi/libiscsi.c fail_scsi_task(task, error); task 1937 drivers/scsi/libiscsi.c struct iscsi_task *task = NULL, *running_task; task 1949 drivers/scsi/libiscsi.c task = (struct iscsi_task *)sc->SCp.ptr; task 1950 drivers/scsi/libiscsi.c if (!task) { task 1995 drivers/scsi/libiscsi.c if (time_after(task->last_xfer, task->last_timeout)) { task 1999 drivers/scsi/libiscsi.c "%lu\n.", task->last_xfer, task->last_timeout); task 2000 drivers/scsi/libiscsi.c task->have_checked_conn = false; task 2018 drivers/scsi/libiscsi.c if (!running_task->sc || running_task == task || task 2027 drivers/scsi/libiscsi.c task->sc->jiffies_at_alloc)) task 2030 drivers/scsi/libiscsi.c if (time_after(running_task->last_xfer, task->last_timeout)) { task 2046 drivers/scsi/libiscsi.c task->last_xfer, running_task->last_xfer, task 2047 drivers/scsi/libiscsi.c task->last_timeout); task 2054 drivers/scsi/libiscsi.c if (task->have_checked_conn) task 2062 drivers/scsi/libiscsi.c task->have_checked_conn = true; task 2069 drivers/scsi/libiscsi.c task->have_checked_conn = true; task 2073 drivers/scsi/libiscsi.c if (task) task 2074 drivers/scsi/libiscsi.c task->last_timeout = jiffies; task 2126 drivers/scsi/libiscsi.c static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, task 2133 drivers/scsi/libiscsi.c hdr->lun = task->lun; task 2134 drivers/scsi/libiscsi.c hdr->rtt = task->hdr_itt; task 2135 drivers/scsi/libiscsi.c hdr->refcmdsn = task->cmdsn; task 2143 drivers/scsi/libiscsi.c struct iscsi_task *task; task 2183 drivers/scsi/libiscsi.c task = (struct iscsi_task *)sc->SCp.ptr; task 2185 drivers/scsi/libiscsi.c sc, task->itt); task 2188 drivers/scsi/libiscsi.c if (!task->sc) { task 2193 drivers/scsi/libiscsi.c if (task->state == ISCSI_TASK_PENDING) { task 2194 drivers/scsi/libiscsi.c fail_scsi_task(task, DID_ABORT); task 2204 drivers/scsi/libiscsi.c iscsi_prep_abort_task_pdu(task, hdr); task 2223 drivers/scsi/libiscsi.c fail_scsi_task(task, DID_ABORT); task 2252 drivers/scsi/libiscsi.c sc, task->itt); task 2260 drivers/scsi/libiscsi.c task ? task->itt : 0); task 2814 drivers/scsi/libiscsi.c struct iscsi_task *task = session->cmds[cmd_i]; task 2817 drivers/scsi/libiscsi.c task->dd_data = &task[1]; task 2818 drivers/scsi/libiscsi.c task->itt = cmd_i; task 2819 drivers/scsi/libiscsi.c task->state = ISCSI_TASK_FREE; task 2820 drivers/scsi/libiscsi.c INIT_LIST_HEAD(&task->running); task 3065 drivers/scsi/libiscsi.c struct iscsi_task *task; task 3069 drivers/scsi/libiscsi.c task = conn->session->cmds[i]; task 3070 drivers/scsi/libiscsi.c if (task->sc) task 3073 drivers/scsi/libiscsi.c if (task->state == ISCSI_TASK_FREE) task 3078 drivers/scsi/libiscsi.c task->itt, task->state); task 3080 drivers/scsi/libiscsi.c if (task->state == ISCSI_TASK_PENDING) task 3083 drivers/scsi/libiscsi.c iscsi_complete_task(task, state); task 454 drivers/scsi/libiscsi_tcp.c void iscsi_tcp_cleanup_task(struct iscsi_task *task) task 456 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 460 drivers/scsi/libiscsi_tcp.c if (!task->sc) task 468 drivers/scsi/libiscsi_tcp.c ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n"); task 486 drivers/scsi/libiscsi_tcp.c static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) task 489 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 492 drivers/scsi/libiscsi_tcp.c unsigned total_in_length = task->sc->sdb.length; task 529 drivers/scsi/libiscsi_tcp.c static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) task 532 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 557 drivers/scsi/libiscsi_tcp.c if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) { task 560 drivers/scsi/libiscsi_tcp.c task->itt); task 577 drivers/scsi/libiscsi_tcp.c if (data_offset + data_length > task->sc->sdb.length) { task 581 drivers/scsi/libiscsi_tcp.c data_offset, task->sc->sdb.length); task 608 drivers/scsi/libiscsi_tcp.c iscsi_requeue_task(task); task 652 drivers/scsi/libiscsi_tcp.c struct iscsi_task *task; task 680 drivers/scsi/libiscsi_tcp.c task = iscsi_itt_to_ctask(conn, hdr->itt); task 681 drivers/scsi/libiscsi_tcp.c if (!task) task 684 drivers/scsi/libiscsi_tcp.c rc = iscsi_tcp_data_in(conn, task); task 691 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 693 drivers/scsi/libiscsi_tcp.c struct scsi_data_buffer *sdb = &task->sc->sdb; task 711 drivers/scsi/libiscsi_tcp.c task->last_xfer = jiffies; task 734 drivers/scsi/libiscsi_tcp.c task = iscsi_itt_to_ctask(conn, hdr->itt); task 736 drivers/scsi/libiscsi_tcp.c if (!task) task 740 drivers/scsi/libiscsi_tcp.c else if (task->sc->sc_data_direction == DMA_TO_DEVICE) { task 741 drivers/scsi/libiscsi_tcp.c task->last_xfer = jiffies; task 743 drivers/scsi/libiscsi_tcp.c rc = iscsi_tcp_r2t_rsp(conn, task); task 961 drivers/scsi/libiscsi_tcp.c int iscsi_tcp_task_init(struct iscsi_task *task) task 963 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 964 drivers/scsi/libiscsi_tcp.c struct iscsi_conn *conn = task->conn; task 965 drivers/scsi/libiscsi_tcp.c struct scsi_cmnd *sc = task->sc; task 973 drivers/scsi/libiscsi_tcp.c ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt); task 975 drivers/scsi/libiscsi_tcp.c return conn->session->tt->init_pdu(task, 0, task->data_count); task 983 drivers/scsi/libiscsi_tcp.c task->itt, task->imm_count, task->unsol_r2t.data_length); task 985 drivers/scsi/libiscsi_tcp.c err = conn->session->tt->init_pdu(task, 0, task->imm_count); task 988 drivers/scsi/libiscsi_tcp.c task->imm_count = 0; task 993 drivers/scsi/libiscsi_tcp.c static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) task 995 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 998 drivers/scsi/libiscsi_tcp.c if (iscsi_task_has_unsol_data(task)) task 999 drivers/scsi/libiscsi_tcp.c r2t = &task->unsol_r2t; task 1006 drivers/scsi/libiscsi_tcp.c ISCSI_DBG_TCP(task->conn, task 1037 drivers/scsi/libiscsi_tcp.c int iscsi_tcp_task_xmit(struct iscsi_task *task) task 1039 drivers/scsi/libiscsi_tcp.c struct iscsi_conn *conn = task->conn; task 1046 drivers/scsi/libiscsi_tcp.c rc = session->tt->xmit_pdu(task); task 1051 drivers/scsi/libiscsi_tcp.c if (!task->sc) { task 1052 drivers/scsi/libiscsi_tcp.c if (task->hdr->itt == RESERVED_ITT) task 1053 drivers/scsi/libiscsi_tcp.c iscsi_put_task(task); task 1058 drivers/scsi/libiscsi_tcp.c if (task->sc->sc_data_direction != DMA_TO_DEVICE) task 1061 drivers/scsi/libiscsi_tcp.c r2t = iscsi_tcp_get_curr_r2t(task); task 1068 drivers/scsi/libiscsi_tcp.c rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT); task 1071 drivers/scsi/libiscsi_tcp.c iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr); task 1074 drivers/scsi/libiscsi_tcp.c r2t, r2t->datasn - 1, task->hdr->itt, task 1077 drivers/scsi/libiscsi_tcp.c rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent, task 1131 drivers/scsi/libiscsi_tcp.c struct iscsi_task *task = session->cmds[cmd_i]; task 1132 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 1161 drivers/scsi/libiscsi_tcp.c struct iscsi_task *task = session->cmds[i]; task 1162 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 1176 drivers/scsi/libiscsi_tcp.c struct iscsi_task *task = session->cmds[i]; task 1177 drivers/scsi/libiscsi_tcp.c struct iscsi_tcp_task *tcp_task = task->dd_data; task 81 drivers/scsi/libsas/sas_ata.c static void sas_ata_task_done(struct sas_task *task) task 83 drivers/scsi/libsas/sas_ata.c struct ata_queued_cmd *qc = task->uldd_task; task 84 drivers/scsi/libsas/sas_ata.c struct domain_device *dev = task->dev; task 85 drivers/scsi/libsas/sas_ata.c struct task_status_struct *stat = &task->task_status; task 95 drivers/scsi/libsas/sas_ata.c task = NULL; task 101 drivers/scsi/libsas/sas_ata.c if (unlikely(!task)) task 159 drivers/scsi/libsas/sas_ata.c sas_free_task(task); task 164 drivers/scsi/libsas/sas_ata.c struct sas_task *task; task 181 drivers/scsi/libsas/sas_ata.c task = sas_alloc_task(GFP_ATOMIC); task 182 drivers/scsi/libsas/sas_ata.c if (!task) task 184 drivers/scsi/libsas/sas_ata.c task->dev = dev; task 185 drivers/scsi/libsas/sas_ata.c task->task_proto = SAS_PROTOCOL_STP; task 186 drivers/scsi/libsas/sas_ata.c task->task_done = sas_ata_task_done; task 197 drivers/scsi/libsas/sas_ata.c ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); task 198 drivers/scsi/libsas/sas_ata.c task->uldd_task = qc; task 200 drivers/scsi/libsas/sas_ata.c memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task 201 drivers/scsi/libsas/sas_ata.c task->total_xfer_len = qc->nbytes; task 202 drivers/scsi/libsas/sas_ata.c task->num_scatter = qc->n_elem; task 207 drivers/scsi/libsas/sas_ata.c task->total_xfer_len = xfer; task 208 drivers/scsi/libsas/sas_ata.c task->num_scatter = si; task 211 drivers/scsi/libsas/sas_ata.c task->data_dir = qc->dma_dir; task 212 drivers/scsi/libsas/sas_ata.c task->scatter = qc->sg; task 213 drivers/scsi/libsas/sas_ata.c task->ata_task.retry_count = 1; task 214 drivers/scsi/libsas/sas_ata.c task->task_state_flags = SAS_TASK_STATE_PENDING; task 215 drivers/scsi/libsas/sas_ata.c qc->lldd_task = task; task 217 drivers/scsi/libsas/sas_ata.c task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol); task 218 drivers/scsi/libsas/sas_ata.c task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol); task 221 drivers/scsi/libsas/sas_ata.c ASSIGN_SAS_TASK(qc->scsicmd, task); task 223 drivers/scsi/libsas/sas_ata.c ret = i->dft->lldd_execute_task(task, GFP_ATOMIC); task 229 drivers/scsi/libsas/sas_ata.c sas_free_task(task); task 409 drivers/scsi/libsas/sas_ata.c static void sas_ata_internal_abort(struct sas_task *task) task 411 drivers/scsi/libsas/sas_ata.c struct sas_internal *si = dev_to_sas_internal(task->dev); task 415 drivers/scsi/libsas/sas_ata.c spin_lock_irqsave(&task->task_state_lock, flags); task 416 drivers/scsi/libsas/sas_ata.c if (task->task_state_flags & SAS_TASK_STATE_ABORTED || task 417 drivers/scsi/libsas/sas_ata.c task->task_state_flags & SAS_TASK_STATE_DONE) { task 418 drivers/scsi/libsas/sas_ata.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 419 drivers/scsi/libsas/sas_ata.c pr_debug("%s: Task %p already finished.\n", __func__, task); task 422 drivers/scsi/libsas/sas_ata.c task->task_state_flags |= SAS_TASK_STATE_ABORTED; task 423 drivers/scsi/libsas/sas_ata.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 425 drivers/scsi/libsas/sas_ata.c res = si->dft->lldd_abort_task(task); task 427 drivers/scsi/libsas/sas_ata.c spin_lock_irqsave(&task->task_state_lock, flags); task 428 drivers/scsi/libsas/sas_ata.c if (task->task_state_flags & SAS_TASK_STATE_DONE || task 430 drivers/scsi/libsas/sas_ata.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 439 drivers/scsi/libsas/sas_ata.c pr_warn("%s: Task %p leaked.\n", __func__, task); task 441 drivers/scsi/libsas/sas_ata.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task 442 drivers/scsi/libsas/sas_ata.c task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; task 443 drivers/scsi/libsas/sas_ata.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 447 drivers/scsi/libsas/sas_ata.c sas_free_task(task); task 464 drivers/scsi/libsas/sas_ata.c struct sas_task *task = qc->lldd_task; task 467 drivers/scsi/libsas/sas_ata.c if (!task) task 469 drivers/scsi/libsas/sas_ata.c task->uldd_task = NULL; task 470 drivers/scsi/libsas/sas_ata.c sas_ata_internal_abort(task); task 581 drivers/scsi/libsas/sas_ata.c void sas_ata_task_abort(struct sas_task *task) task 583 drivers/scsi/libsas/sas_ata.c struct ata_queued_cmd *qc = task->uldd_task; task 34 drivers/scsi/libsas/sas_expander.c struct sas_task *task = slow->task; task 37 drivers/scsi/libsas/sas_expander.c spin_lock_irqsave(&task->task_state_lock, flags); task 38 drivers/scsi/libsas/sas_expander.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task 39 drivers/scsi/libsas/sas_expander.c task->task_state_flags |= SAS_TASK_STATE_ABORTED; task 40 drivers/scsi/libsas/sas_expander.c complete(&task->slow_task->completion); task 42 drivers/scsi/libsas/sas_expander.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 45 drivers/scsi/libsas/sas_expander.c static void smp_task_done(struct sas_task *task) task 47 drivers/scsi/libsas/sas_expander.c del_timer(&task->slow_task->timer); task 48 drivers/scsi/libsas/sas_expander.c complete(&task->slow_task->completion); task 58 drivers/scsi/libsas/sas_expander.c struct sas_task *task = NULL; task 69 drivers/scsi/libsas/sas_expander.c task = sas_alloc_slow_task(GFP_KERNEL); task 70 drivers/scsi/libsas/sas_expander.c if (!task) { task 74 drivers/scsi/libsas/sas_expander.c task->dev = dev; task 75 drivers/scsi/libsas/sas_expander.c task->task_proto = dev->tproto; task 76 drivers/scsi/libsas/sas_expander.c task->smp_task.smp_req = *req; task 77 drivers/scsi/libsas/sas_expander.c task->smp_task.smp_resp = *resp; task 79 drivers/scsi/libsas/sas_expander.c task->task_done = smp_task_done; task 81 drivers/scsi/libsas/sas_expander.c task->slow_task->timer.function = smp_task_timedout; task 82 drivers/scsi/libsas/sas_expander.c task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; task 83 drivers/scsi/libsas/sas_expander.c add_timer(&task->slow_task->timer); task 85 drivers/scsi/libsas/sas_expander.c res = i->dft->lldd_execute_task(task, GFP_KERNEL); task 88 drivers/scsi/libsas/sas_expander.c del_timer(&task->slow_task->timer); task 93 drivers/scsi/libsas/sas_expander.c wait_for_completion(&task->slow_task->completion); task 95 drivers/scsi/libsas/sas_expander.c if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { task 97 drivers/scsi/libsas/sas_expander.c i->dft->lldd_abort_task(task); task 98 drivers/scsi/libsas/sas_expander.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task 103 drivers/scsi/libsas/sas_expander.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 104 drivers/scsi/libsas/sas_expander.c task->task_status.stat == SAM_STAT_GOOD) { task 108 drivers/scsi/libsas/sas_expander.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 109 drivers/scsi/libsas/sas_expander.c task->task_status.stat == SAS_DATA_UNDERRUN) { task 112 drivers/scsi/libsas/sas_expander.c res = task->task_status.residual; task 115 drivers/scsi/libsas/sas_expander.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 116 drivers/scsi/libsas/sas_expander.c task->task_status.stat == SAS_DATA_OVERRUN) { task 120 drivers/scsi/libsas/sas_expander.c if (task->task_status.resp == SAS_TASK_UNDELIVERED && task 121 drivers/scsi/libsas/sas_expander.c task->task_status.stat == SAS_DEVICE_UNKNOWN) task 127 drivers/scsi/libsas/sas_expander.c task->task_status.resp, task 128 drivers/scsi/libsas/sas_expander.c task->task_status.stat); task 129 drivers/scsi/libsas/sas_expander.c sas_free_task(task); task 130 drivers/scsi/libsas/sas_expander.c task = NULL; task 135 drivers/scsi/libsas/sas_expander.c BUG_ON(retry == 3 && task != NULL); task 136 drivers/scsi/libsas/sas_expander.c sas_free_task(task); task 29 drivers/scsi/libsas/sas_init.c struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); task 31 drivers/scsi/libsas/sas_init.c if (task) { task 32 drivers/scsi/libsas/sas_init.c spin_lock_init(&task->task_state_lock); task 33 drivers/scsi/libsas/sas_init.c task->task_state_flags = SAS_TASK_STATE_PENDING; task 36 drivers/scsi/libsas/sas_init.c return task; task 42 drivers/scsi/libsas/sas_init.c struct sas_task *task = sas_alloc_task(flags); task 45 drivers/scsi/libsas/sas_init.c if (!task || !slow) { task 46 drivers/scsi/libsas/sas_init.c if (task) task 47 drivers/scsi/libsas/sas_init.c kmem_cache_free(sas_task_cache, task); task 52 drivers/scsi/libsas/sas_init.c task->slow_task = slow; task 53 drivers/scsi/libsas/sas_init.c slow->task = task; task 57 drivers/scsi/libsas/sas_init.c return task; task 61 drivers/scsi/libsas/sas_init.c void sas_free_task(struct sas_task *task) task 63 drivers/scsi/libsas/sas_init.c if (task) { task 64 drivers/scsi/libsas/sas_init.c kfree(task->slow_task); task 65 drivers/scsi/libsas/sas_init.c kmem_cache_free(sas_task_cache, task); task 37 drivers/scsi/libsas/sas_scsi_host.c static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) task 39 drivers/scsi/libsas/sas_scsi_host.c struct task_status_struct *ts = &task->task_status; task 80 drivers/scsi/libsas/sas_scsi_host.c task->dev->port->ha->sas_ha_name); task 98 drivers/scsi/libsas/sas_scsi_host.c sas_free_task(task); task 101 drivers/scsi/libsas/sas_scsi_host.c static void sas_scsi_task_done(struct sas_task *task) task 103 drivers/scsi/libsas/sas_scsi_host.c struct scsi_cmnd *sc = task->uldd_task; task 104 drivers/scsi/libsas/sas_scsi_host.c struct domain_device *dev = task->dev; task 110 drivers/scsi/libsas/sas_scsi_host.c task = NULL; task 115 drivers/scsi/libsas/sas_scsi_host.c if (unlikely(!task)) { task 123 drivers/scsi/libsas/sas_scsi_host.c sas_free_task(task); task 127 drivers/scsi/libsas/sas_scsi_host.c sas_end_task(sc, task); task 135 drivers/scsi/libsas/sas_scsi_host.c struct sas_task *task = sas_alloc_task(gfp_flags); task 138 drivers/scsi/libsas/sas_scsi_host.c if (!task) task 141 drivers/scsi/libsas/sas_scsi_host.c task->uldd_task = cmd; task 142 drivers/scsi/libsas/sas_scsi_host.c ASSIGN_SAS_TASK(cmd, task); task 144 drivers/scsi/libsas/sas_scsi_host.c task->dev = dev; task 145 drivers/scsi/libsas/sas_scsi_host.c task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ task 147 drivers/scsi/libsas/sas_scsi_host.c task->ssp_task.retry_count = 1; task 149 drivers/scsi/libsas/sas_scsi_host.c memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); task 150 drivers/scsi/libsas/sas_scsi_host.c task->ssp_task.task_attr = TASK_ATTR_SIMPLE; task 151 drivers/scsi/libsas/sas_scsi_host.c task->ssp_task.cmd = cmd; task 153 drivers/scsi/libsas/sas_scsi_host.c task->scatter = scsi_sglist(cmd); task 154 drivers/scsi/libsas/sas_scsi_host.c task->num_scatter = scsi_sg_count(cmd); task 155 drivers/scsi/libsas/sas_scsi_host.c task->total_xfer_len = scsi_bufflen(cmd); task 156 drivers/scsi/libsas/sas_scsi_host.c task->data_dir = cmd->sc_data_direction; task 158 drivers/scsi/libsas/sas_scsi_host.c task->task_done = sas_scsi_task_done; task 160 drivers/scsi/libsas/sas_scsi_host.c return task; task 167 drivers/scsi/libsas/sas_scsi_host.c struct sas_task *task; task 183 drivers/scsi/libsas/sas_scsi_host.c task = sas_create_task(cmd, dev, GFP_ATOMIC); task 184 drivers/scsi/libsas/sas_scsi_host.c if (!task) task 187 drivers/scsi/libsas/sas_scsi_host.c res = i->dft->lldd_execute_task(task, GFP_ATOMIC); task 195 drivers/scsi/libsas/sas_scsi_host.c sas_free_task(task); task 209 drivers/scsi/libsas/sas_scsi_host.c struct sas_task *task = TO_SAS_TASK(cmd); task 215 drivers/scsi/libsas/sas_scsi_host.c sas_end_task(cmd, task); task 278 drivers/scsi/libsas/sas_scsi_host.c static enum task_disposition sas_scsi_find_task(struct sas_task *task) task 283 drivers/scsi/libsas/sas_scsi_host.c to_sas_internal(task->dev->port->ha->core.shost->transportt); task 286 drivers/scsi/libsas/sas_scsi_host.c pr_notice("%s: aborting task 0x%p\n", __func__, task); task 287 drivers/scsi/libsas/sas_scsi_host.c res = si->dft->lldd_abort_task(task); task 289 drivers/scsi/libsas/sas_scsi_host.c spin_lock_irqsave(&task->task_state_lock, flags); task 290 drivers/scsi/libsas/sas_scsi_host.c if (task->task_state_flags & SAS_TASK_STATE_DONE) { task 291 drivers/scsi/libsas/sas_scsi_host.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 292 drivers/scsi/libsas/sas_scsi_host.c pr_debug("%s: task 0x%p is done\n", __func__, task); task 295 drivers/scsi/libsas/sas_scsi_host.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 299 drivers/scsi/libsas/sas_scsi_host.c __func__, task); task 302 drivers/scsi/libsas/sas_scsi_host.c pr_notice("%s: querying task 0x%p\n", __func__, task); task 303 drivers/scsi/libsas/sas_scsi_host.c res = si->dft->lldd_query_task(task); task 307 drivers/scsi/libsas/sas_scsi_host.c task); task 311 drivers/scsi/libsas/sas_scsi_host.c __func__, task); task 315 drivers/scsi/libsas/sas_scsi_host.c __func__, task); task 463 drivers/scsi/libsas/sas_scsi_host.c struct sas_task *task = TO_SAS_TASK(cmd); task 480 drivers/scsi/libsas/sas_scsi_host.c if (task) task 481 drivers/scsi/libsas/sas_scsi_host.c res = i->dft->lldd_abort_task(task); task 569 drivers/scsi/libsas/sas_scsi_host.c struct sas_task *task; task 576 drivers/scsi/libsas/sas_scsi_host.c task = TO_SAS_TASK(cmd); task 579 drivers/scsi/libsas/sas_scsi_host.c if (!task) task 585 drivers/scsi/libsas/sas_scsi_host.c struct sas_task *task = TO_SAS_TASK(cmd); task 589 drivers/scsi/libsas/sas_scsi_host.c spin_lock_irqsave(&task->task_state_lock, flags); task 590 drivers/scsi/libsas/sas_scsi_host.c need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; task 591 drivers/scsi/libsas/sas_scsi_host.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 595 drivers/scsi/libsas/sas_scsi_host.c __func__, task); task 599 drivers/scsi/libsas/sas_scsi_host.c pr_debug("trying to find task 0x%p\n", task); task 600 drivers/scsi/libsas/sas_scsi_host.c res = sas_scsi_find_task(task); task 605 drivers/scsi/libsas/sas_scsi_host.c task); task 610 drivers/scsi/libsas/sas_scsi_host.c __func__, task); task 614 drivers/scsi/libsas/sas_scsi_host.c pr_info("task 0x%p is at LU: lu recover\n", task); task 616 drivers/scsi/libsas/sas_scsi_host.c tmf_resp = sas_recover_lu(task->dev, cmd); task 619 drivers/scsi/libsas/sas_scsi_host.c SAS_ADDR(task->dev), task 629 drivers/scsi/libsas/sas_scsi_host.c task); task 630 drivers/scsi/libsas/sas_scsi_host.c tmf_resp = sas_recover_I_T(task->dev); task 633 drivers/scsi/libsas/sas_scsi_host.c struct domain_device *dev = task->dev; task 635 drivers/scsi/libsas/sas_scsi_host.c SAS_ADDR(task->dev->sas_addr)); task 643 drivers/scsi/libsas/sas_scsi_host.c struct asd_sas_port *port = task->dev->port; task 670 drivers/scsi/libsas/sas_scsi_host.c SAS_ADDR(task->dev->sas_addr), task 892 drivers/scsi/libsas/sas_scsi_host.c void sas_task_abort(struct sas_task *task) task 894 drivers/scsi/libsas/sas_scsi_host.c struct scsi_cmnd *sc = task->uldd_task; task 898 drivers/scsi/libsas/sas_scsi_host.c struct sas_task_slow *slow = task->slow_task; task 908 drivers/scsi/libsas/sas_scsi_host.c if (dev_is_sata(task->dev)) task 909 drivers/scsi/libsas/sas_scsi_host.c sas_ata_task_abort(task); task 11 drivers/scsi/libsas/sas_task.c void sas_ssp_task_response(struct device *dev, struct sas_task *task, task 14 drivers/scsi/libsas/sas_task.c struct task_status_struct *tstat = &task->task_status; task 31 drivers/scsi/libsas/sas_task.c SAS_ADDR(task->dev->sas_addr), iu->status); task 12 drivers/scsi/mvsas/mv_sas.c static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) task 14 drivers/scsi/mvsas/mv_sas.c if (task->lldd_task) { task 16 drivers/scsi/mvsas/mv_sas.c slot = task->lldd_task; task 304 drivers/scsi/mvsas/mv_sas.c struct sas_task *task = tei->task; task 306 drivers/scsi/mvsas/mv_sas.c struct domain_device *dev = task->dev; task 322 drivers/scsi/mvsas/mv_sas.c sg_req = &task->smp_task.smp_req; task 328 drivers/scsi/mvsas/mv_sas.c sg_resp = &task->smp_task.smp_resp; task 397 drivers/scsi/mvsas/mv_sas.c MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); task 402 drivers/scsi/mvsas/mv_sas.c dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, task 405 drivers/scsi/mvsas/mv_sas.c dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, task 410 drivers/scsi/mvsas/mv_sas.c static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) task 412 drivers/scsi/mvsas/mv_sas.c struct ata_queued_cmd *qc = task->uldd_task; task 431 drivers/scsi/mvsas/mv_sas.c struct sas_task *task = tei->task; task 432 drivers/scsi/mvsas/mv_sas.c struct domain_device *dev = task->dev; task 459 drivers/scsi/mvsas/mv_sas.c if (task->data_dir == DMA_FROM_DEVICE) task 464 drivers/scsi/mvsas/mv_sas.c if (task->ata_task.use_ncq) task 467 drivers/scsi/mvsas/mv_sas.c if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) task 473 drivers/scsi/mvsas/mv_sas.c if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) task 474 drivers/scsi/mvsas/mv_sas.c task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); task 480 drivers/scsi/mvsas/mv_sas.c hdr->data_len = cpu_to_le32(task->total_xfer_len); task 529 drivers/scsi/mvsas/mv_sas.c if (likely(!task->ata_task.device_control_reg_update)) task 530 drivers/scsi/mvsas/mv_sas.c task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ task 532 drivers/scsi/mvsas/mv_sas.c memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); task 535 drivers/scsi/mvsas/mv_sas.c task->ata_task.atapi_packet, 16); task 545 drivers/scsi/mvsas/mv_sas.c MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); task 547 drivers/scsi/mvsas/mv_sas.c if (task->data_dir == DMA_FROM_DEVICE) task 558 drivers/scsi/mvsas/mv_sas.c struct sas_task *task = tei->task; task 561 drivers/scsi/mvsas/mv_sas.c struct domain_device *dev = task->dev; task 586 drivers/scsi/mvsas/mv_sas.c if (task->ssp_task.enable_first_burst) { task 597 drivers/scsi/mvsas/mv_sas.c hdr->data_len = cpu_to_le32(task->total_xfer_len); task 668 drivers/scsi/mvsas/mv_sas.c memcpy(buf_cmd, &task->ssp_task.LUN, 8); task 671 drivers/scsi/mvsas/mv_sas.c buf_cmd[9] = fburst | task->ssp_task.task_attr | task 672 drivers/scsi/mvsas/mv_sas.c (task->ssp_task.task_prio << 3); task 673 drivers/scsi/mvsas/mv_sas.c memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, task 674 drivers/scsi/mvsas/mv_sas.c task->ssp_task.cmd->cmd_len); task 690 drivers/scsi/mvsas/mv_sas.c MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); task 695 drivers/scsi/mvsas/mv_sas.c static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, task 698 drivers/scsi/mvsas/mv_sas.c struct domain_device *dev = task->dev; task 706 drivers/scsi/mvsas/mv_sas.c struct task_status_struct *tsm = &task->task_status; task 715 drivers/scsi/mvsas/mv_sas.c task->task_done(task); task 732 drivers/scsi/mvsas/mv_sas.c if (sas_protocol_ata(task->task_proto)) { task 733 drivers/scsi/mvsas/mv_sas.c struct task_status_struct *ts = &task->task_status; task 739 drivers/scsi/mvsas/mv_sas.c task->task_done(task); task 742 drivers/scsi/mvsas/mv_sas.c struct task_status_struct *ts = &task->task_status; task 747 drivers/scsi/mvsas/mv_sas.c task->task_done(task); task 752 drivers/scsi/mvsas/mv_sas.c if (!sas_protocol_ata(task->task_proto)) { task 753 drivers/scsi/mvsas/mv_sas.c if (task->num_scatter) { task 755 drivers/scsi/mvsas/mv_sas.c task->scatter, task 756 drivers/scsi/mvsas/mv_sas.c task->num_scatter, task 757 drivers/scsi/mvsas/mv_sas.c task->data_dir); task 764 drivers/scsi/mvsas/mv_sas.c n_elem = task->num_scatter; task 773 drivers/scsi/mvsas/mv_sas.c task->lldd_task = NULL; task 783 drivers/scsi/mvsas/mv_sas.c tei.task = task; task 787 drivers/scsi/mvsas/mv_sas.c switch (task->task_proto) { task 802 drivers/scsi/mvsas/mv_sas.c task->task_proto); task 811 drivers/scsi/mvsas/mv_sas.c slot->task = task; task 813 drivers/scsi/mvsas/mv_sas.c task->lldd_task = slot; task 815 drivers/scsi/mvsas/mv_sas.c spin_lock(&task->task_state_lock); task 816 drivers/scsi/mvsas/mv_sas.c task->task_state_flags |= SAS_TASK_AT_INITIATOR; task 817 drivers/scsi/mvsas/mv_sas.c spin_unlock(&task->task_state_lock); task 832 drivers/scsi/mvsas/mv_sas.c if (!sas_protocol_ata(task->task_proto)) task 834 drivers/scsi/mvsas/mv_sas.c dma_unmap_sg(mvi->dev, task->scatter, n_elem, task 835 drivers/scsi/mvsas/mv_sas.c task->data_dir); task 840 drivers/scsi/mvsas/mv_sas.c static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags, task 849 drivers/scsi/mvsas/mv_sas.c mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; task 852 drivers/scsi/mvsas/mv_sas.c rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); task 864 drivers/scsi/mvsas/mv_sas.c int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) task 866 drivers/scsi/mvsas/mv_sas.c return mvs_task_exec(task, gfp_flags, NULL, 0, NULL); task 875 drivers/scsi/mvsas/mv_sas.c static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, task 880 drivers/scsi/mvsas/mv_sas.c if (!slot->task) task 882 drivers/scsi/mvsas/mv_sas.c if (!sas_protocol_ata(task->task_proto)) task 884 drivers/scsi/mvsas/mv_sas.c dma_unmap_sg(mvi->dev, task->scatter, task 885 drivers/scsi/mvsas/mv_sas.c slot->n_elem, task->data_dir); task 887 drivers/scsi/mvsas/mv_sas.c switch (task->task_proto) { task 889 drivers/scsi/mvsas/mv_sas.c dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, task 891 drivers/scsi/mvsas/mv_sas.c dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, task 908 drivers/scsi/mvsas/mv_sas.c task->lldd_task = NULL; task 909 drivers/scsi/mvsas/mv_sas.c slot->task = NULL; task 1262 drivers/scsi/mvsas/mv_sas.c static void mvs_task_done(struct sas_task *task) task 1264 drivers/scsi/mvsas/mv_sas.c if (!del_timer(&task->slow_task->timer)) task 1266 drivers/scsi/mvsas/mv_sas.c complete(&task->slow_task->completion); task 1272 drivers/scsi/mvsas/mv_sas.c struct sas_task *task = slow->task; task 1274 drivers/scsi/mvsas/mv_sas.c task->task_state_flags |= SAS_TASK_STATE_ABORTED; task 1275 drivers/scsi/mvsas/mv_sas.c complete(&task->slow_task->completion); task 1283 drivers/scsi/mvsas/mv_sas.c struct sas_task *task = NULL; task 1286 drivers/scsi/mvsas/mv_sas.c task = sas_alloc_slow_task(GFP_KERNEL); task 1287 drivers/scsi/mvsas/mv_sas.c if (!task) task 1290 drivers/scsi/mvsas/mv_sas.c task->dev = dev; task 1291 drivers/scsi/mvsas/mv_sas.c task->task_proto = dev->tproto; task 1293 drivers/scsi/mvsas/mv_sas.c memcpy(&task->ssp_task, parameter, para_len); task 1294 drivers/scsi/mvsas/mv_sas.c task->task_done = mvs_task_done; task 1296 drivers/scsi/mvsas/mv_sas.c task->slow_task->timer.function = mvs_tmf_timedout; task 1297 drivers/scsi/mvsas/mv_sas.c task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; task 1298 drivers/scsi/mvsas/mv_sas.c add_timer(&task->slow_task->timer); task 1300 drivers/scsi/mvsas/mv_sas.c res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf); task 1303 drivers/scsi/mvsas/mv_sas.c del_timer(&task->slow_task->timer); task 1308 drivers/scsi/mvsas/mv_sas.c wait_for_completion(&task->slow_task->completion); task 1311 drivers/scsi/mvsas/mv_sas.c if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { task 1312 drivers/scsi/mvsas/mv_sas.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task 1318 drivers/scsi/mvsas/mv_sas.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 1319 drivers/scsi/mvsas/mv_sas.c task->task_status.stat == SAM_STAT_GOOD) { task 1324 drivers/scsi/mvsas/mv_sas.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 1325 drivers/scsi/mvsas/mv_sas.c task->task_status.stat == SAS_DATA_UNDERRUN) { task 1328 drivers/scsi/mvsas/mv_sas.c res = task->task_status.residual; task 1332 drivers/scsi/mvsas/mv_sas.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 1333 drivers/scsi/mvsas/mv_sas.c task->task_status.stat == SAS_DATA_OVERRUN) { task 1341 drivers/scsi/mvsas/mv_sas.c task->task_status.resp, task 1342 drivers/scsi/mvsas/mv_sas.c task->task_status.stat); task 1343 drivers/scsi/mvsas/mv_sas.c sas_free_task(task); task 1344 drivers/scsi/mvsas/mv_sas.c task = NULL; task 1349 drivers/scsi/mvsas/mv_sas.c BUG_ON(retry == 3 && task != NULL); task 1350 drivers/scsi/mvsas/mv_sas.c sas_free_task(task); task 1427 drivers/scsi/mvsas/mv_sas.c int mvs_query_task(struct sas_task *task) task 1434 drivers/scsi/mvsas/mv_sas.c if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { task 1435 drivers/scsi/mvsas/mv_sas.c struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; task 1436 drivers/scsi/mvsas/mv_sas.c struct domain_device *dev = task->dev; task 1441 drivers/scsi/mvsas/mv_sas.c rc = mvs_find_tag(mvi, task, &tag); task 1465 drivers/scsi/mvsas/mv_sas.c int mvs_abort_task(struct sas_task *task) task 1469 drivers/scsi/mvsas/mv_sas.c struct domain_device *dev = task->dev; task 1483 drivers/scsi/mvsas/mv_sas.c spin_lock_irqsave(&task->task_state_lock, flags); task 1484 drivers/scsi/mvsas/mv_sas.c if (task->task_state_flags & SAS_TASK_STATE_DONE) { task 1485 drivers/scsi/mvsas/mv_sas.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1489 drivers/scsi/mvsas/mv_sas.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1491 drivers/scsi/mvsas/mv_sas.c if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { task 1492 drivers/scsi/mvsas/mv_sas.c struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; task 1495 drivers/scsi/mvsas/mv_sas.c rc = mvs_find_tag(mvi, task, &tag); task 1512 drivers/scsi/mvsas/mv_sas.c if (task->lldd_task) { task 1513 drivers/scsi/mvsas/mv_sas.c slot = task->lldd_task; task 1521 drivers/scsi/mvsas/mv_sas.c } else if (task->task_proto & SAS_PROTOCOL_SATA || task 1522 drivers/scsi/mvsas/mv_sas.c task->task_proto & SAS_PROTOCOL_STP) { task 1524 drivers/scsi/mvsas/mv_sas.c struct mvs_slot_info *slot = task->lldd_task; task 1528 drivers/scsi/mvsas/mv_sas.c mvi, task, slot, slot_idx); task 1529 drivers/scsi/mvsas/mv_sas.c task->task_state_flags |= SAS_TASK_STATE_ABORTED; task 1530 drivers/scsi/mvsas/mv_sas.c mvs_slot_task_free(mvi, task, slot, slot_idx); task 1575 drivers/scsi/mvsas/mv_sas.c static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, task 1578 drivers/scsi/mvsas/mv_sas.c struct mvs_device *mvi_dev = task->dev->lldd_dev; task 1579 drivers/scsi/mvsas/mv_sas.c struct task_status_struct *tstat = &task->task_status; task 1649 drivers/scsi/mvsas/mv_sas.c static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, task 1665 drivers/scsi/mvsas/mv_sas.c switch (task->task_proto) { task 1673 drivers/scsi/mvsas/mv_sas.c sas_ssp_task_response(mvi->dev, task, iu); task 1688 drivers/scsi/mvsas/mv_sas.c task->ata_task.use_ncq = 0; task 1690 drivers/scsi/mvsas/mv_sas.c mvs_sata_done(mvi, task, slot_idx, err_dw0); task 1704 drivers/scsi/mvsas/mv_sas.c struct sas_task *task = slot->task; task 1713 drivers/scsi/mvsas/mv_sas.c if (unlikely(!task || !task->lldd_task || !task->dev)) task 1716 drivers/scsi/mvsas/mv_sas.c tstat = &task->task_status; task 1717 drivers/scsi/mvsas/mv_sas.c dev = task->dev; task 1720 drivers/scsi/mvsas/mv_sas.c spin_lock(&task->task_state_lock); task 1721 drivers/scsi/mvsas/mv_sas.c task->task_state_flags &= task 1723 drivers/scsi/mvsas/mv_sas.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 1725 drivers/scsi/mvsas/mv_sas.c aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; task 1726 drivers/scsi/mvsas/mv_sas.c spin_unlock(&task->task_state_lock); task 1735 drivers/scsi/mvsas/mv_sas.c if (sas_protocol_ata(task->task_proto)) task 1738 drivers/scsi/mvsas/mv_sas.c mvs_slot_task_free(mvi, task, slot, slot_idx); task 1760 drivers/scsi/mvsas/mv_sas.c tstat->stat = mvs_slot_err(mvi, task, slot_idx); task 1765 drivers/scsi/mvsas/mv_sas.c switch (task->task_proto) { task 1776 drivers/scsi/mvsas/mv_sas.c sas_ssp_task_response(mvi->dev, task, iu); task 1782 drivers/scsi/mvsas/mv_sas.c struct scatterlist *sg_resp = &task->smp_task.smp_resp; task 1795 drivers/scsi/mvsas/mv_sas.c tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); task 1812 drivers/scsi/mvsas/mv_sas.c if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) task 1815 drivers/scsi/mvsas/mv_sas.c mvs_slot_task_free(mvi, task, slot, slot_idx); task 1819 drivers/scsi/mvsas/mv_sas.c if (task->task_done) task 1820 drivers/scsi/mvsas/mv_sas.c task->task_done(task); task 1845 drivers/scsi/mvsas/mv_sas.c struct sas_task *task; task 1847 drivers/scsi/mvsas/mv_sas.c task = slot->task; task 1849 drivers/scsi/mvsas/mv_sas.c if (dev && task->dev != dev) task 1853 drivers/scsi/mvsas/mv_sas.c slot_idx, slot->slot_tag, task); task 309 drivers/scsi/mvsas/mv_sas.h struct sas_task *task; task 418 drivers/scsi/mvsas/mv_sas.h struct sas_task *task; task 441 drivers/scsi/mvsas/mv_sas.h int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags); task 442 drivers/scsi/mvsas/mv_sas.h int mvs_abort_task(struct sas_task *task); task 453 drivers/scsi/mvsas/mv_sas.h int mvs_query_task(struct sas_task *task); task 1548 drivers/scsi/pm8001/pm8001_hwi.c if ((tag != 0xFFFFFFFF) && (ccb->task == t)) task 1629 drivers/scsi/pm8001/pm8001_hwi.c if ((tag != 0xFFFFFFFF) && (ccb->task == t)) task 1724 drivers/scsi/pm8001/pm8001_hwi.c struct sas_task *task = NULL; task 1735 drivers/scsi/pm8001/pm8001_hwi.c task = sas_alloc_slow_task(GFP_ATOMIC); task 1737 drivers/scsi/pm8001/pm8001_hwi.c if (!task) { task 1743 drivers/scsi/pm8001/pm8001_hwi.c task->task_done = pm8001_task_done; task 1752 drivers/scsi/pm8001/pm8001_hwi.c ccb->task = task; task 1774 drivers/scsi/pm8001/pm8001_hwi.c struct sas_task *task = NULL; task 1780 drivers/scsi/pm8001/pm8001_hwi.c task = sas_alloc_slow_task(GFP_ATOMIC); task 1782 drivers/scsi/pm8001/pm8001_hwi.c if (!task) { task 1787 drivers/scsi/pm8001/pm8001_hwi.c task->task_done = pm8001_task_done; task 1791 drivers/scsi/pm8001/pm8001_hwi.c sas_free_task(task); task 1802 drivers/scsi/pm8001/pm8001_hwi.c sas_free_task(task); task 1808 drivers/scsi/pm8001/pm8001_hwi.c task->dev = dev; task 1809 drivers/scsi/pm8001/pm8001_hwi.c task->dev->lldd_dev = pm8001_ha_dev; task 1814 drivers/scsi/pm8001/pm8001_hwi.c ccb->task = task; task 1836 drivers/scsi/pm8001/pm8001_hwi.c sas_free_task(task); task 1878 drivers/scsi/pm8001/pm8001_hwi.c t = ccb->task; task 2120 drivers/scsi/pm8001/pm8001_hwi.c t = ccb->task; task 2325 drivers/scsi/pm8001/pm8001_hwi.c t = ccb->task; task 2697 drivers/scsi/pm8001/pm8001_hwi.c t = ccb->task; task 2718 drivers/scsi/pm8001/pm8001_hwi.c t = ccb->task; task 2917 drivers/scsi/pm8001/pm8001_hwi.c t = ccb->task; task 3115 drivers/scsi/pm8001/pm8001_hwi.c ccb->task = NULL; task 3134 drivers/scsi/pm8001/pm8001_hwi.c ccb->task = NULL; task 3199 drivers/scsi/pm8001/pm8001_hwi.c ccb->task = NULL; task 3618 drivers/scsi/pm8001/pm8001_hwi.c ccb->task = NULL; task 3697 drivers/scsi/pm8001/pm8001_hwi.c ccb->task = NULL; task 3743 drivers/scsi/pm8001/pm8001_hwi.c t = ccb->task; task 4242 drivers/scsi/pm8001/pm8001_hwi.c struct sas_task *task = ccb->task; task 4243 drivers/scsi/pm8001/pm8001_hwi.c struct domain_device *dev = task->dev; task 4255 drivers/scsi/pm8001/pm8001_hwi.c sg_req = &task->smp_task.smp_req; task 4261 drivers/scsi/pm8001/pm8001_hwi.c sg_resp = &task->smp_task.smp_resp; task 4278 drivers/scsi/pm8001/pm8001_hwi.c cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); task 4280 drivers/scsi/pm8001/pm8001_hwi.c cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); task 4282 drivers/scsi/pm8001/pm8001_hwi.c cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); task 4284 drivers/scsi/pm8001/pm8001_hwi.c cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); task 4294 drivers/scsi/pm8001/pm8001_hwi.c dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, task 4297 drivers/scsi/pm8001/pm8001_hwi.c dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, task 4310 drivers/scsi/pm8001/pm8001_hwi.c struct sas_task *task = ccb->task; task 4311 drivers/scsi/pm8001/pm8001_hwi.c struct domain_device *dev = task->dev; task 4320 drivers/scsi/pm8001/pm8001_hwi.c memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); task 4322 drivers/scsi/pm8001/pm8001_hwi.c cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for task 4324 drivers/scsi/pm8001/pm8001_hwi.c ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); task 4327 drivers/scsi/pm8001/pm8001_hwi.c if (task->ssp_task.enable_first_burst) task 4329 drivers/scsi/pm8001/pm8001_hwi.c ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); task 4330 drivers/scsi/pm8001/pm8001_hwi.c ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); task 4331 drivers/scsi/pm8001/pm8001_hwi.c memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, task 4332 drivers/scsi/pm8001/pm8001_hwi.c task->ssp_task.cmd->cmd_len); task 4336 drivers/scsi/pm8001/pm8001_hwi.c if (task->num_scatter > 1) { task 4337 drivers/scsi/pm8001/pm8001_hwi.c pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); task 4343 drivers/scsi/pm8001/pm8001_hwi.c } else if (task->num_scatter == 1) { task 4344 drivers/scsi/pm8001/pm8001_hwi.c u64 dma_addr = sg_dma_address(task->scatter); task 4347 drivers/scsi/pm8001/pm8001_hwi.c ssp_cmd.len = cpu_to_le32(task->total_xfer_len); task 4349 drivers/scsi/pm8001/pm8001_hwi.c } else if (task->num_scatter == 0) { task 4352 drivers/scsi/pm8001/pm8001_hwi.c ssp_cmd.len = cpu_to_le32(task->total_xfer_len); task 4362 drivers/scsi/pm8001/pm8001_hwi.c struct sas_task *task = ccb->task; task 4363 drivers/scsi/pm8001/pm8001_hwi.c struct domain_device *dev = task->dev; task 4377 drivers/scsi/pm8001/pm8001_hwi.c if (task->data_dir == DMA_NONE) { task 4380 drivers/scsi/pm8001/pm8001_hwi.c } else if (likely(!task->ata_task.device_control_reg_update)) { task 4381 drivers/scsi/pm8001/pm8001_hwi.c if (task->ata_task.dma_xfer) { task 4388 drivers/scsi/pm8001/pm8001_hwi.c if (task->ata_task.use_ncq && task 4394 drivers/scsi/pm8001/pm8001_hwi.c if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { task 4395 drivers/scsi/pm8001/pm8001_hwi.c task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); task 4398 drivers/scsi/pm8001/pm8001_hwi.c dir = data_dir_flags[task->data_dir] << 8; task 4401 drivers/scsi/pm8001/pm8001_hwi.c sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); task 4404 drivers/scsi/pm8001/pm8001_hwi.c sata_cmd.sata_fis = task->ata_task.fis; task 4405 drivers/scsi/pm8001/pm8001_hwi.c if (likely(!task->ata_task.device_control_reg_update)) task 4409 drivers/scsi/pm8001/pm8001_hwi.c if (task->num_scatter > 1) { task 4410 drivers/scsi/pm8001/pm8001_hwi.c pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); task 4416 drivers/scsi/pm8001/pm8001_hwi.c } else if (task->num_scatter == 1) { task 4417 drivers/scsi/pm8001/pm8001_hwi.c u64 dma_addr = sg_dma_address(task->scatter); task 4420 drivers/scsi/pm8001/pm8001_hwi.c sata_cmd.len = cpu_to_le32(task->total_xfer_len); task 4422 drivers/scsi/pm8001/pm8001_hwi.c } else if (task->num_scatter == 0) { task 4425 drivers/scsi/pm8001/pm8001_hwi.c sata_cmd.len = cpu_to_le32(task->total_xfer_len); task 4437 drivers/scsi/pm8001/pm8001_hwi.c ts = &task->task_status; task 4439 drivers/scsi/pm8001/pm8001_hwi.c spin_lock_irqsave(&task->task_state_lock, flags); task 4442 drivers/scsi/pm8001/pm8001_hwi.c task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task 4443 drivers/scsi/pm8001/pm8001_hwi.c task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task 4444 drivers/scsi/pm8001/pm8001_hwi.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 4445 drivers/scsi/pm8001/pm8001_hwi.c if (unlikely((task->task_state_flags & task 4447 drivers/scsi/pm8001/pm8001_hwi.c spin_unlock_irqrestore(&task->task_state_lock, task 4452 drivers/scsi/pm8001/pm8001_hwi.c "\n", task, ts->resp, ts->stat)); task 4453 drivers/scsi/pm8001/pm8001_hwi.c pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); task 4455 drivers/scsi/pm8001/pm8001_hwi.c spin_unlock_irqrestore(&task->task_state_lock, task 4457 drivers/scsi/pm8001/pm8001_hwi.c pm8001_ccb_task_free_done(pm8001_ha, task, task 4715 drivers/scsi/pm8001/pm8001_hwi.c struct sas_task *task = ccb->task; task 4716 drivers/scsi/pm8001/pm8001_hwi.c struct domain_device *dev = task->dev; task 4727 drivers/scsi/pm8001/pm8001_hwi.c memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); task 378 drivers/scsi/pm8001/pm8001_init.c pm8001_ha->ccb_info[i].task = NULL; task 49 drivers/scsi/pm8001/pm8001_sas.c static int pm8001_find_tag(struct sas_task *task, u32 *tag) task 51 drivers/scsi/pm8001/pm8001_sas.c if (task->lldd_task) { task 53 drivers/scsi/pm8001/pm8001_sas.c ccb = task->lldd_task; task 300 drivers/scsi/pm8001/pm8001_sas.c u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) task 302 drivers/scsi/pm8001/pm8001_sas.c struct ata_queued_cmd *qc = task->uldd_task; task 378 drivers/scsi/pm8001/pm8001_sas.c static int pm8001_task_exec(struct sas_task *task, task 381 drivers/scsi/pm8001/pm8001_sas.c struct domain_device *dev = task->dev; task 385 drivers/scsi/pm8001/pm8001_sas.c struct sas_task *t = task; task 398 drivers/scsi/pm8001/pm8001_sas.c pm8001_ha = pm8001_find_ha_by_dev(task->dev); task 453 drivers/scsi/pm8001/pm8001_sas.c ccb->task = t; task 511 drivers/scsi/pm8001/pm8001_sas.c int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) task 513 drivers/scsi/pm8001/pm8001_sas.c return pm8001_task_exec(task, gfp_flags, 0, NULL); task 524 drivers/scsi/pm8001/pm8001_sas.c struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) task 526 drivers/scsi/pm8001/pm8001_sas.c if (!ccb->task) task 528 drivers/scsi/pm8001/pm8001_sas.c if (!sas_protocol_ata(task->task_proto)) task 530 drivers/scsi/pm8001/pm8001_sas.c dma_unmap_sg(pm8001_ha->dev, task->scatter, task 531 drivers/scsi/pm8001/pm8001_sas.c task->num_scatter, task->data_dir); task 533 drivers/scsi/pm8001/pm8001_sas.c switch (task->task_proto) { task 535 drivers/scsi/pm8001/pm8001_sas.c dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, task 537 drivers/scsi/pm8001/pm8001_sas.c dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, task 548 drivers/scsi/pm8001/pm8001_sas.c task->lldd_task = NULL; task 549 drivers/scsi/pm8001/pm8001_sas.c ccb->task = NULL; task 681 drivers/scsi/pm8001/pm8001_sas.c void pm8001_task_done(struct sas_task *task) task 683 drivers/scsi/pm8001/pm8001_sas.c if (!del_timer(&task->slow_task->timer)) task 685 drivers/scsi/pm8001/pm8001_sas.c complete(&task->slow_task->completion); task 691 drivers/scsi/pm8001/pm8001_sas.c struct sas_task *task = slow->task; task 693 drivers/scsi/pm8001/pm8001_sas.c task->task_state_flags |= SAS_TASK_STATE_ABORTED; task 694 drivers/scsi/pm8001/pm8001_sas.c complete(&task->slow_task->completion); task 713 drivers/scsi/pm8001/pm8001_sas.c struct sas_task *task = NULL; task 719 drivers/scsi/pm8001/pm8001_sas.c task = sas_alloc_slow_task(GFP_KERNEL); task 720 drivers/scsi/pm8001/pm8001_sas.c if (!task) task 723 drivers/scsi/pm8001/pm8001_sas.c task->dev = dev; task 724 drivers/scsi/pm8001/pm8001_sas.c task->task_proto = dev->tproto; task 725 drivers/scsi/pm8001/pm8001_sas.c memcpy(&task->ssp_task, parameter, para_len); task 726 drivers/scsi/pm8001/pm8001_sas.c task->task_done = pm8001_task_done; task 727 drivers/scsi/pm8001/pm8001_sas.c task->slow_task->timer.function = pm8001_tmf_timedout; task 728 drivers/scsi/pm8001/pm8001_sas.c task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; task 729 drivers/scsi/pm8001/pm8001_sas.c add_timer(&task->slow_task->timer); task 731 drivers/scsi/pm8001/pm8001_sas.c res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf); task 734 drivers/scsi/pm8001/pm8001_sas.c del_timer(&task->slow_task->timer); task 740 drivers/scsi/pm8001/pm8001_sas.c wait_for_completion(&task->slow_task->completion); task 749 drivers/scsi/pm8001/pm8001_sas.c if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { task 750 drivers/scsi/pm8001/pm8001_sas.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task 758 drivers/scsi/pm8001/pm8001_sas.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 759 drivers/scsi/pm8001/pm8001_sas.c task->task_status.stat == SAM_STAT_GOOD) { task 764 drivers/scsi/pm8001/pm8001_sas.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 765 drivers/scsi/pm8001/pm8001_sas.c task->task_status.stat == SAS_DATA_UNDERRUN) { task 768 drivers/scsi/pm8001/pm8001_sas.c res = task->task_status.residual; task 772 drivers/scsi/pm8001/pm8001_sas.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 773 drivers/scsi/pm8001/pm8001_sas.c task->task_status.stat == SAS_DATA_OVERRUN) { task 783 drivers/scsi/pm8001/pm8001_sas.c task->task_status.resp, task 784 drivers/scsi/pm8001/pm8001_sas.c task->task_status.stat)); task 785 drivers/scsi/pm8001/pm8001_sas.c sas_free_task(task); task 786 drivers/scsi/pm8001/pm8001_sas.c task = NULL; task 790 drivers/scsi/pm8001/pm8001_sas.c BUG_ON(retry == 3 && task != NULL); task 791 drivers/scsi/pm8001/pm8001_sas.c sas_free_task(task); task 803 drivers/scsi/pm8001/pm8001_sas.c struct sas_task *task = NULL; task 806 drivers/scsi/pm8001/pm8001_sas.c task = sas_alloc_slow_task(GFP_KERNEL); task 807 drivers/scsi/pm8001/pm8001_sas.c if (!task) task 810 drivers/scsi/pm8001/pm8001_sas.c task->dev = dev; task 811 drivers/scsi/pm8001/pm8001_sas.c task->task_proto = dev->tproto; task 812 drivers/scsi/pm8001/pm8001_sas.c task->task_done = pm8001_task_done; task 813 drivers/scsi/pm8001/pm8001_sas.c task->slow_task->timer.function = pm8001_tmf_timedout; task 814 drivers/scsi/pm8001/pm8001_sas.c task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; task 815 drivers/scsi/pm8001/pm8001_sas.c add_timer(&task->slow_task->timer); task 823 drivers/scsi/pm8001/pm8001_sas.c ccb->task = task; task 830 drivers/scsi/pm8001/pm8001_sas.c del_timer(&task->slow_task->timer); task 836 drivers/scsi/pm8001/pm8001_sas.c wait_for_completion(&task->slow_task->completion); task 839 drivers/scsi/pm8001/pm8001_sas.c if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { task 840 drivers/scsi/pm8001/pm8001_sas.c if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task 847 drivers/scsi/pm8001/pm8001_sas.c if (task->task_status.resp == SAS_TASK_COMPLETE && task 848 drivers/scsi/pm8001/pm8001_sas.c task->task_status.stat == SAM_STAT_GOOD) { task 857 drivers/scsi/pm8001/pm8001_sas.c task->task_status.resp, task 858 drivers/scsi/pm8001/pm8001_sas.c task->task_status.stat)); task 859 drivers/scsi/pm8001/pm8001_sas.c sas_free_task(task); task 860 drivers/scsi/pm8001/pm8001_sas.c task = NULL; task 864 drivers/scsi/pm8001/pm8001_sas.c BUG_ON(retry == 3 && task != NULL); task 865 drivers/scsi/pm8001/pm8001_sas.c sas_free_task(task); task 937 drivers/scsi/pm8001/pm8001_sas.c struct sas_task *task; task 958 drivers/scsi/pm8001/pm8001_sas.c task = ccb->task; task 959 drivers/scsi/pm8001/pm8001_sas.c if (!task || !task->task_done) task 961 drivers/scsi/pm8001/pm8001_sas.c if (task_to_close && (task != task_to_close)) task 963 drivers/scsi/pm8001/pm8001_sas.c ts = &task->task_status; task 970 drivers/scsi/pm8001/pm8001_sas.c spin_lock_irqsave(&task->task_state_lock, flags1); task 971 drivers/scsi/pm8001/pm8001_sas.c task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task 972 drivers/scsi/pm8001/pm8001_sas.c task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task 973 drivers/scsi/pm8001/pm8001_sas.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 974 drivers/scsi/pm8001/pm8001_sas.c if (unlikely((task->task_state_flags task 976 drivers/scsi/pm8001/pm8001_sas.c spin_unlock_irqrestore(&task->task_state_lock, task 978 drivers/scsi/pm8001/pm8001_sas.c pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); task 980 drivers/scsi/pm8001/pm8001_sas.c spin_unlock_irqrestore(&task->task_state_lock, task 982 drivers/scsi/pm8001/pm8001_sas.c pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); task 985 drivers/scsi/pm8001/pm8001_sas.c task->task_done(task); task 1137 drivers/scsi/pm8001/pm8001_sas.c int pm8001_query_task(struct sas_task *task) task 1144 drivers/scsi/pm8001/pm8001_sas.c if (unlikely(!task || !task->lldd_task || !task->dev)) task 1147 drivers/scsi/pm8001/pm8001_sas.c if (task->task_proto & SAS_PROTOCOL_SSP) { task 1148 drivers/scsi/pm8001/pm8001_sas.c struct scsi_cmnd *cmnd = task->uldd_task; task 1149 drivers/scsi/pm8001/pm8001_sas.c struct domain_device *dev = task->dev; task 1154 drivers/scsi/pm8001/pm8001_sas.c rc = pm8001_find_tag(task, &tag); task 1187 drivers/scsi/pm8001/pm8001_sas.c int pm8001_abort_task(struct sas_task *task) task 1199 drivers/scsi/pm8001/pm8001_sas.c if (unlikely(!task || !task->lldd_task || !task->dev)) task 1201 drivers/scsi/pm8001/pm8001_sas.c dev = task->dev; task 1205 drivers/scsi/pm8001/pm8001_sas.c rc = pm8001_find_tag(task, &tag); task 1207 drivers/scsi/pm8001/pm8001_sas.c pm8001_printk("no tag for task:%p\n", task); task 1210 drivers/scsi/pm8001/pm8001_sas.c spin_lock_irqsave(&task->task_state_lock, flags); task 1211 drivers/scsi/pm8001/pm8001_sas.c if (task->task_state_flags & SAS_TASK_STATE_DONE) { task 1212 drivers/scsi/pm8001/pm8001_sas.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1215 drivers/scsi/pm8001/pm8001_sas.c task->task_state_flags |= SAS_TASK_STATE_ABORTED; task 1216 drivers/scsi/pm8001/pm8001_sas.c if (task->slow_task == NULL) { task 1218 drivers/scsi/pm8001/pm8001_sas.c task->slow_task = &slow_task; task 1220 drivers/scsi/pm8001/pm8001_sas.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 1221 drivers/scsi/pm8001/pm8001_sas.c if (task->task_proto & SAS_PROTOCOL_SSP) { task 1222 drivers/scsi/pm8001/pm8001_sas.c struct scsi_cmnd *cmnd = task->uldd_task; task 1229 drivers/scsi/pm8001/pm8001_sas.c } else if (task->task_proto & SAS_PROTOCOL_SATA || task 1230 drivers/scsi/pm8001/pm8001_sas.c task->task_proto & SAS_PROTOCOL_STP) { task 1277 drivers/scsi/pm8001/pm8001_sas.c &task->slow_task->completion, task 1293 drivers/scsi/pm8001/pm8001_sas.c } else if (task->task_proto & SAS_PROTOCOL_SMP) { task 1300 drivers/scsi/pm8001/pm8001_sas.c spin_lock_irqsave(&task->task_state_lock, flags); task 1301 drivers/scsi/pm8001/pm8001_sas.c if (task->slow_task == &slow_task) task 1302 drivers/scsi/pm8001/pm8001_sas.c task->slow_task = NULL; task 1303 drivers/scsi/pm8001/pm8001_sas.c spin_unlock_irqrestore(&task->task_state_lock, flags); task 299 drivers/scsi/pm8001/pm8001_sas.h struct sas_task *task; task 637 drivers/scsi/pm8001/pm8001_sas.h u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); task 639 drivers/scsi/pm8001/pm8001_sas.h struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx); task 644 drivers/scsi/pm8001/pm8001_sas.h int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags); task 645 drivers/scsi/pm8001/pm8001_sas.h int pm8001_abort_task(struct sas_task *task); task 654 drivers/scsi/pm8001/pm8001_sas.h int pm8001_query_task(struct sas_task *task); task 711 drivers/scsi/pm8001/pm8001_sas.h void pm8001_task_done(struct sas_task *task); task 712 drivers/scsi/pm8001/pm8001_sas.h void pm8001_free_task(struct sas_task *task); task 732 drivers/scsi/pm8001/pm8001_sas.h struct sas_task *task, struct pm8001_ccb_info *ccb, task 735 drivers/scsi/pm8001/pm8001_sas.h pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx); task 738 drivers/scsi/pm8001/pm8001_sas.h task->task_done(task); task 1407 drivers/scsi/pm8001/pm80xx_hwi.c struct sas_task *task = NULL; task 1418 drivers/scsi/pm8001/pm80xx_hwi.c task = sas_alloc_slow_task(GFP_ATOMIC); task 1420 drivers/scsi/pm8001/pm80xx_hwi.c if (!task) { task 1426 drivers/scsi/pm8001/pm80xx_hwi.c task->task_done = pm8001_task_done; task 1430 drivers/scsi/pm8001/pm80xx_hwi.c sas_free_task(task); task 1437 drivers/scsi/pm8001/pm80xx_hwi.c ccb->task = task; task 1448 drivers/scsi/pm8001/pm80xx_hwi.c sas_free_task(task); task 1460 drivers/scsi/pm8001/pm80xx_hwi.c struct sas_task *task = NULL; task 1466 drivers/scsi/pm8001/pm80xx_hwi.c task = sas_alloc_slow_task(GFP_ATOMIC); task 1468 drivers/scsi/pm8001/pm80xx_hwi.c if (!task) { task 1473 drivers/scsi/pm8001/pm80xx_hwi.c task->task_done = pm8001_task_done; task 1477 drivers/scsi/pm8001/pm80xx_hwi.c sas_free_task(task); task 1488 drivers/scsi/pm8001/pm80xx_hwi.c sas_free_task(task); task 1495 drivers/scsi/pm8001/pm80xx_hwi.c task->dev = dev; task 1496 drivers/scsi/pm8001/pm80xx_hwi.c task->dev->lldd_dev = pm8001_ha_dev; task 1501 drivers/scsi/pm8001/pm80xx_hwi.c ccb->task = task; task 1524 drivers/scsi/pm8001/pm80xx_hwi.c sas_free_task(task); task 1565 drivers/scsi/pm8001/pm80xx_hwi.c t = ccb->task; task 1821 drivers/scsi/pm8001/pm80xx_hwi.c t = ccb->task; task 2035 drivers/scsi/pm8001/pm80xx_hwi.c t = ccb->task; task 2412 drivers/scsi/pm8001/pm80xx_hwi.c t = ccb->task; task 2651 drivers/scsi/pm8001/pm80xx_hwi.c t = ccb->task; task 3890 drivers/scsi/pm8001/pm80xx_hwi.c struct sas_task *task = ccb->task; task 3891 drivers/scsi/pm8001/pm80xx_hwi.c struct domain_device *dev = task->dev; task 3906 drivers/scsi/pm8001/pm80xx_hwi.c sg_req = &task->smp_task.smp_req; task 3912 drivers/scsi/pm8001/pm80xx_hwi.c sg_resp = &task->smp_task.smp_resp; task 3938 drivers/scsi/pm8001/pm80xx_hwi.c tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); task 3952 drivers/scsi/pm8001/pm80xx_hwi.c (&task->smp_task.smp_req) + 4); task 3955 drivers/scsi/pm8001/pm80xx_hwi.c cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); task 3958 drivers/scsi/pm8001/pm80xx_hwi.c (&task->smp_task.smp_resp)); task 3961 drivers/scsi/pm8001/pm80xx_hwi.c (&task->smp_task.smp_resp)-4); task 3965 drivers/scsi/pm8001/pm80xx_hwi.c (&task->smp_task.smp_req)); task 3967 drivers/scsi/pm8001/pm80xx_hwi.c cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); task 3970 drivers/scsi/pm8001/pm80xx_hwi.c (&task->smp_task.smp_resp)); task 3973 drivers/scsi/pm8001/pm80xx_hwi.c ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); task 4003 drivers/scsi/pm8001/pm80xx_hwi.c dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, task 4006 drivers/scsi/pm8001/pm80xx_hwi.c dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, task 4011 drivers/scsi/pm8001/pm80xx_hwi.c static int check_enc_sas_cmd(struct sas_task *task) task 4013 drivers/scsi/pm8001/pm80xx_hwi.c u8 cmd = task->ssp_task.cmd->cmnd[0]; task 4021 drivers/scsi/pm8001/pm80xx_hwi.c static int check_enc_sat_cmd(struct sas_task *task) task 4024 drivers/scsi/pm8001/pm80xx_hwi.c switch (task->ata_task.fis.command) { task 4052 drivers/scsi/pm8001/pm80xx_hwi.c struct sas_task *task = ccb->task; task 4053 drivers/scsi/pm8001/pm80xx_hwi.c struct domain_device *dev = task->dev; task 4064 drivers/scsi/pm8001/pm80xx_hwi.c memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); task 4070 drivers/scsi/pm8001/pm80xx_hwi.c cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); task 4071 drivers/scsi/pm8001/pm80xx_hwi.c ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); task 4074 drivers/scsi/pm8001/pm80xx_hwi.c if (task->ssp_task.enable_first_burst) task 4076 drivers/scsi/pm8001/pm80xx_hwi.c ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); task 4077 drivers/scsi/pm8001/pm80xx_hwi.c ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); task 4078 drivers/scsi/pm8001/pm80xx_hwi.c memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, task 4079 drivers/scsi/pm8001/pm80xx_hwi.c task->ssp_task.cmd->cmd_len); task 4085 drivers/scsi/pm8001/pm80xx_hwi.c !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { task 4088 drivers/scsi/pm8001/pm80xx_hwi.c task->ssp_task.cmd->cmnd[0])); task 4092 drivers/scsi/pm8001/pm80xx_hwi.c ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); task 4095 drivers/scsi/pm8001/pm80xx_hwi.c if (task->num_scatter > 1) { task 4096 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_chip_make_sg(task->scatter, task 4105 drivers/scsi/pm8001/pm80xx_hwi.c } else if (task->num_scatter == 1) { task 4106 drivers/scsi/pm8001/pm80xx_hwi.c u64 dma_addr = sg_dma_address(task->scatter); task 4111 drivers/scsi/pm8001/pm80xx_hwi.c ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); task 4126 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_chip_make_sg(task->scatter, 1, task 4137 drivers/scsi/pm8001/pm80xx_hwi.c } else if (task->num_scatter == 0) { task 4140 drivers/scsi/pm8001/pm80xx_hwi.c ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); task 4146 drivers/scsi/pm8001/pm80xx_hwi.c ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) | task 4147 drivers/scsi/pm8001/pm80xx_hwi.c (task->ssp_task.cmd->cmnd[3] << 16) | task 4148 drivers/scsi/pm8001/pm80xx_hwi.c (task->ssp_task.cmd->cmnd[4] << 8) | task 4149 drivers/scsi/pm8001/pm80xx_hwi.c (task->ssp_task.cmd->cmnd[5])); task 4153 drivers/scsi/pm8001/pm80xx_hwi.c task->ssp_task.cmd->cmnd[0], q_index)); task 4155 drivers/scsi/pm8001/pm80xx_hwi.c if (task->num_scatter > 1) { task 4156 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_chip_make_sg(task->scatter, ccb->n_elem, task 4165 drivers/scsi/pm8001/pm80xx_hwi.c } else if (task->num_scatter == 1) { task 4166 drivers/scsi/pm8001/pm80xx_hwi.c u64 dma_addr = sg_dma_address(task->scatter); task 4170 drivers/scsi/pm8001/pm80xx_hwi.c ssp_cmd.len = cpu_to_le32(task->total_xfer_len); task 4185 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_chip_make_sg(task->scatter, 1, task 4196 drivers/scsi/pm8001/pm80xx_hwi.c } else if (task->num_scatter == 0) { task 4199 drivers/scsi/pm8001/pm80xx_hwi.c ssp_cmd.len = cpu_to_le32(task->total_xfer_len); task 4212 drivers/scsi/pm8001/pm80xx_hwi.c struct sas_task *task = ccb->task; task 4213 drivers/scsi/pm8001/pm80xx_hwi.c struct domain_device *dev = task->dev; task 4231 drivers/scsi/pm8001/pm80xx_hwi.c if (task->data_dir == DMA_NONE) { task 4234 drivers/scsi/pm8001/pm80xx_hwi.c } else if (likely(!task->ata_task.device_control_reg_update)) { task 4235 drivers/scsi/pm8001/pm80xx_hwi.c if (task->ata_task.dma_xfer) { task 4242 drivers/scsi/pm8001/pm80xx_hwi.c if (task->ata_task.use_ncq && task 4248 drivers/scsi/pm8001/pm80xx_hwi.c if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { task 4249 drivers/scsi/pm8001/pm80xx_hwi.c task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); task 4252 drivers/scsi/pm8001/pm80xx_hwi.c dir = data_dir_flags[task->data_dir] << 8; task 4255 drivers/scsi/pm8001/pm80xx_hwi.c sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); task 4257 drivers/scsi/pm8001/pm80xx_hwi.c sata_cmd.sata_fis = task->ata_task.fis; task 4258 drivers/scsi/pm8001/pm80xx_hwi.c if (likely(!task->ata_task.device_control_reg_update)) task 4264 drivers/scsi/pm8001/pm80xx_hwi.c !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { task 4276 drivers/scsi/pm8001/pm80xx_hwi.c if (task->num_scatter > 1) { task 4277 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_chip_make_sg(task->scatter, task 4284 drivers/scsi/pm8001/pm80xx_hwi.c } else if (task->num_scatter == 1) { task 4285 drivers/scsi/pm8001/pm80xx_hwi.c u64 dma_addr = sg_dma_address(task->scatter); task 4288 drivers/scsi/pm8001/pm80xx_hwi.c sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); task 4303 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_chip_make_sg(task->scatter, 1, task 4315 drivers/scsi/pm8001/pm80xx_hwi.c } else if (task->num_scatter == 0) { task 4318 drivers/scsi/pm8001/pm80xx_hwi.c sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); task 4342 drivers/scsi/pm8001/pm80xx_hwi.c if (task->num_scatter > 1) { task 4343 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_chip_make_sg(task->scatter, task 4350 drivers/scsi/pm8001/pm80xx_hwi.c } else if (task->num_scatter == 1) { task 4351 drivers/scsi/pm8001/pm80xx_hwi.c u64 dma_addr = sg_dma_address(task->scatter); task 4354 drivers/scsi/pm8001/pm80xx_hwi.c sata_cmd.len = cpu_to_le32(task->total_xfer_len); task 4369 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_chip_make_sg(task->scatter, 1, task 4380 drivers/scsi/pm8001/pm80xx_hwi.c } else if (task->num_scatter == 0) { task 4383 drivers/scsi/pm8001/pm80xx_hwi.c sata_cmd.len = cpu_to_le32(task->total_xfer_len); task 4388 drivers/scsi/pm8001/pm80xx_hwi.c cpu_to_le32(((task->ata_task.atapi_packet[0]) | task 4389 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[1] << 8) | task 4390 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[2] << 16) | task 4391 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[3] << 24))); task 4393 drivers/scsi/pm8001/pm80xx_hwi.c cpu_to_le32(((task->ata_task.atapi_packet[4]) | task 4394 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[5] << 8) | task 4395 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[6] << 16) | task 4396 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[7] << 24))); task 4398 drivers/scsi/pm8001/pm80xx_hwi.c cpu_to_le32(((task->ata_task.atapi_packet[8]) | task 4399 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[9] << 8) | task 4400 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[10] << 16) | task 4401 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[11] << 24))); task 4403 drivers/scsi/pm8001/pm80xx_hwi.c cpu_to_le32(((task->ata_task.atapi_packet[12]) | task 4404 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[13] << 8) | task 4405 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[14] << 16) | task 4406 drivers/scsi/pm8001/pm80xx_hwi.c (task->ata_task.atapi_packet[15] << 24))); task 4417 drivers/scsi/pm8001/pm80xx_hwi.c ts = &task->task_status; task 4419 drivers/scsi/pm8001/pm80xx_hwi.c spin_lock_irqsave(&task->task_state_lock, flags); task 4422 drivers/scsi/pm8001/pm80xx_hwi.c task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task 4423 drivers/scsi/pm8001/pm80xx_hwi.c task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task 4424 drivers/scsi/pm8001/pm80xx_hwi.c task->task_state_flags |= SAS_TASK_STATE_DONE; task 4425 drivers/scsi/pm8001/pm80xx_hwi.c if (unlikely((task->task_state_flags & task 4427 drivers/scsi/pm8001/pm80xx_hwi.c spin_unlock_irqrestore(&task->task_state_lock, task 4432 drivers/scsi/pm8001/pm80xx_hwi.c "\n", task, ts->resp, ts->stat)); task 4433 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); task 4436 drivers/scsi/pm8001/pm80xx_hwi.c spin_unlock_irqrestore(&task->task_state_lock, task 4438 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_ccb_task_free_done(pm8001_ha, task, task 147 drivers/scsi/qedf/qedf.h struct e4_fcoe_task_context *task; task 19 drivers/scsi/qedf/qedf_els.c struct e4_fcoe_task_context *task; task 124 drivers/scsi/qedf/qedf_els.c task = qedf_get_task_mem(&qedf->tasks, xid); task 125 drivers/scsi/qedf/qedf_els.c qedf_init_mp_task(els_req, task, sqe); task 295 drivers/scsi/qedf/qedf_els.c htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id); task 1030 drivers/scsi/qedf/qedf_els.c htons(orig_io_req->task->tstorm_st_context.read_write.rx_id); task 612 drivers/scsi/qedf/qedf_io.c io_req->task = task_ctx; task 707 drivers/scsi/qedf/qedf_io.c io_req->task = task_ctx; task 2135 drivers/scsi/qedf/qedf_io.c struct e4_fcoe_task_context *task; task 2199 drivers/scsi/qedf/qedf_io.c task = qedf_get_task_mem(&qedf->tasks, xid); task 2272 drivers/scsi/qedf/qedf_io.c struct e4_fcoe_task_context *task; task 2330 drivers/scsi/qedf/qedf_io.c task = qedf_get_task_mem(&qedf->tasks, xid); task 2340 drivers/scsi/qedf/qedf_io.c qedf_init_task(fcport, lport, io_req, task, sqe); task 32 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 41 drivers/scsi/qedi/qedi_fw.c cmd = (struct qedi_cmd *)task->dd_data; task 82 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 93 drivers/scsi/qedi/qedi_fw.c cmd = (struct qedi_cmd *)task->dd_data; task 161 drivers/scsi/qedi/qedi_fw.c rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true); task 182 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 195 drivers/scsi/qedi/qedi_fw.c qedi_cmd = task->dd_data; task 223 drivers/scsi/qedi/qedi_fw.c tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr; task 253 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 264 drivers/scsi/qedi/qedi_fw.c cmd = (struct qedi_cmd *)task->dd_data; task 404 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 444 drivers/scsi/qedi/qedi_fw.c if (task) { task 445 drivers/scsi/qedi/qedi_fw.c cmd = task->dd_data; task 476 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 530 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 572 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 576 drivers/scsi/qedi/qedi_fw.c struct qedi_cmd *cmd = task->dd_data; task 623 drivers/scsi/qedi/qedi_fw.c hdr = (struct iscsi_scsi_rsp *)task->hdr; task 667 drivers/scsi/qedi/qedi_fw.c qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP); task 678 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 690 drivers/scsi/qedi/qedi_fw.c qedi_scsi_completion(qedi, cqe, task, iscsi_conn); task 693 drivers/scsi/qedi/qedi_fw.c qedi_process_login_resp(qedi, cqe, task, conn); task 696 drivers/scsi/qedi/qedi_fw.c qedi_process_tmf_resp(qedi, cqe, task, conn); task 699 drivers/scsi/qedi/qedi_fw.c qedi_process_text_resp(qedi, cqe, task, conn); task 702 drivers/scsi/qedi/qedi_fw.c qedi_process_logout_resp(qedi, cqe, task, conn); task 705 drivers/scsi/qedi/qedi_fw.c qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx); task 714 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 719 drivers/scsi/qedi/qedi_fw.c struct qedi_cmd *cmd = task->dd_data; task 729 drivers/scsi/qedi/qedi_fw.c __iscsi_put_task(task); task 735 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 773 drivers/scsi/qedi/qedi_fw.c mtask = qedi_cmd->task; task 795 drivers/scsi/qedi/qedi_fw.c task = iscsi_itt_to_task(conn, protoitt); task 799 drivers/scsi/qedi/qedi_fw.c if (!task) { task 807 drivers/scsi/qedi/qedi_fw.c dbg_cmd = task->dd_data; task 811 drivers/scsi/qedi/qedi_fw.c get_itt(tmf_hdr->rtt), get_itt(task->itt), task 830 drivers/scsi/qedi/qedi_fw.c task = iscsi_itt_to_task(conn, protoitt); task 837 drivers/scsi/qedi/qedi_fw.c if (!task) { task 854 drivers/scsi/qedi/qedi_fw.c task = iscsi_itt_to_task(conn, protoitt); task 857 drivers/scsi/qedi/qedi_fw.c protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); task 865 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task = NULL; task 912 drivers/scsi/qedi/qedi_fw.c task = qedi_cmd->task; task 913 drivers/scsi/qedi/qedi_fw.c if (!task) { task 919 drivers/scsi/qedi/qedi_fw.c nopout_hdr = (struct iscsi_nopout *)task->hdr; task 923 drivers/scsi/qedi/qedi_fw.c task, q_conn); task 928 drivers/scsi/qedi/qedi_fw.c qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx); task 934 drivers/scsi/qedi/qedi_fw.c qedi_process_nopin_mesg(qedi, cqe, task, q_conn, task 938 drivers/scsi/qedi/qedi_fw.c qedi_process_async_mesg(qedi, cqe, task, q_conn, task 942 drivers/scsi/qedi/qedi_fw.c qedi_process_reject_mesg(qedi, cqe, task, q_conn, task 952 drivers/scsi/qedi/qedi_fw.c qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task, task 1007 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task) task 1024 drivers/scsi/qedi/qedi_fw.c qedi_cmd = (struct qedi_cmd *)task->dd_data; task 1026 drivers/scsi/qedi/qedi_fw.c login_hdr = (struct iscsi_login_req *)task->hdr; task 1054 drivers/scsi/qedi/qedi_fw.c qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); task 1055 drivers/scsi/qedi/qedi_fw.c login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); task 1110 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task) task 1125 drivers/scsi/qedi/qedi_fw.c qedi_cmd = (struct qedi_cmd *)task->dd_data; task 1126 drivers/scsi/qedi/qedi_fw.c logout_hdr = (struct iscsi_logout *)task->hdr; task 1148 drivers/scsi/qedi/qedi_fw.c qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); task 1149 drivers/scsi/qedi/qedi_fw.c logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); task 1183 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, bool in_recovery) task 1195 drivers/scsi/qedi/qedi_fw.c if (task) { task 1196 drivers/scsi/qedi/qedi_fw.c tmf_hdr = (struct iscsi_tm *)task->hdr; task 1220 drivers/scsi/qedi/qedi_fw.c ctask = cmd->task; task 1221 drivers/scsi/qedi/qedi_fw.c if (ctask == task) task 1298 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task) task 1320 drivers/scsi/qedi/qedi_fw.c rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true); task 1331 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 1335 drivers/scsi/qedi/qedi_fw.c struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data; task 1372 drivers/scsi/qedi/qedi_fw.c mtask = qedi_cmd->task; task 1434 drivers/scsi/qedi/qedi_fw.c qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task); task 1561 drivers/scsi/qedi/qedi_fw.c qedi_cmd->task = mtask; task 1584 drivers/scsi/qedi/qedi_fw.c qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task); task 1596 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task) task 1615 drivers/scsi/qedi/qedi_fw.c qedi_cmd = (struct qedi_cmd *)task->dd_data; task 1616 drivers/scsi/qedi/qedi_fw.c text_hdr = (struct iscsi_text *)task->hdr; task 1639 drivers/scsi/qedi/qedi_fw.c qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); task 1640 drivers/scsi/qedi/qedi_fw.c text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); task 1695 drivers/scsi/qedi/qedi_fw.c struct iscsi_task *task, task 1714 drivers/scsi/qedi/qedi_fw.c qedi_cmd = (struct qedi_cmd *)task->dd_data; task 1715 drivers/scsi/qedi/qedi_fw.c nopout_hdr = (struct iscsi_nopout *)task->hdr; task 1745 drivers/scsi/qedi/qedi_fw.c qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); task 1751 drivers/scsi/qedi/qedi_fw.c nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); task 1973 drivers/scsi/qedi/qedi_fw.c void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task, task 1977 drivers/scsi/qedi/qedi_fw.c struct iscsi_conn *conn = task->conn; task 1979 drivers/scsi/qedi/qedi_fw.c struct scsi_cmnd *sc_cmd = task->sc; task 2028 drivers/scsi/qedi/qedi_fw.c int qedi_iscsi_send_ioreq(struct iscsi_task *task) task 2030 drivers/scsi/qedi/qedi_fw.c struct iscsi_conn *conn = task->conn; task 2035 drivers/scsi/qedi/qedi_fw.c struct qedi_cmd *cmd = task->dd_data; task 2036 drivers/scsi/qedi/qedi_fw.c struct scsi_cmnd *sc = task->sc; task 2047 drivers/scsi/qedi/qedi_fw.c struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; task 2100 drivers/scsi/qedi/qedi_fw.c qedi_update_itt_map(qedi, tid, task->itt, cmd); task 2101 drivers/scsi/qedi/qedi_fw.c cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); task 2188 drivers/scsi/qedi/qedi_fw.c int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted) task 2192 drivers/scsi/qedi/qedi_fw.c struct iscsi_conn *conn = task->conn; task 2194 drivers/scsi/qedi/qedi_fw.c struct qedi_cmd *cmd = task->dd_data; task 2200 drivers/scsi/qedi/qedi_fw.c cmd->task_id, get_itt(task->itt), task->state, task 31 drivers/scsi/qedi/qedi_gbl.h struct iscsi_task *task); task 33 drivers/scsi/qedi/qedi_gbl.h struct iscsi_task *task); task 37 drivers/scsi/qedi/qedi_gbl.h struct iscsi_task *task); task 39 drivers/scsi/qedi/qedi_gbl.h struct iscsi_task *task, task 41 drivers/scsi/qedi/qedi_gbl.h int qedi_iscsi_send_ioreq(struct iscsi_task *task); task 44 drivers/scsi/qedi/qedi_gbl.h int qedi_iscsi_cleanup_task(struct iscsi_task *task, task 65 drivers/scsi/qedi/qedi_gbl.h struct iscsi_task *task, bool in_recovery); task 66 drivers/scsi/qedi/qedi_gbl.h void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task, task 75 drivers/scsi/qedi/qedi_gbl.h struct iscsi_task *task); task 169 drivers/scsi/qedi/qedi_iscsi.c struct iscsi_task *task = session->cmds[i]; task 170 drivers/scsi/qedi/qedi_iscsi.c struct qedi_cmd *cmd = task->dd_data; task 213 drivers/scsi/qedi/qedi_iscsi.c struct iscsi_task *task = session->cmds[i]; task 214 drivers/scsi/qedi/qedi_iscsi.c struct qedi_cmd *cmd = task->dd_data; task 216 drivers/scsi/qedi/qedi_iscsi.c task->hdr = &cmd->hdr; task 217 drivers/scsi/qedi/qedi_iscsi.c task->hdr_max = sizeof(struct iscsi_hdr); task 717 drivers/scsi/qedi/qedi_iscsi.c static int qedi_iscsi_send_generic_request(struct iscsi_task *task) task 719 drivers/scsi/qedi/qedi_iscsi.c struct qedi_cmd *cmd = task->dd_data; task 726 drivers/scsi/qedi/qedi_iscsi.c switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { task 728 drivers/scsi/qedi/qedi_iscsi.c qedi_send_iscsi_login(qedi_conn, task); task 734 drivers/scsi/qedi/qedi_iscsi.c rc = qedi_send_iscsi_nopout(qedi_conn, task, task 737 drivers/scsi/qedi/qedi_iscsi.c rc = qedi_send_iscsi_nopout(qedi_conn, task, task 741 drivers/scsi/qedi/qedi_iscsi.c rc = qedi_send_iscsi_logout(qedi_conn, task); task 744 drivers/scsi/qedi/qedi_iscsi.c rc = qedi_iscsi_abort_work(qedi_conn, task); task 747 drivers/scsi/qedi/qedi_iscsi.c rc = qedi_send_iscsi_text(qedi_conn, task); task 751 drivers/scsi/qedi/qedi_iscsi.c "unsupported op 0x%x\n", task->hdr->opcode); task 757 drivers/scsi/qedi/qedi_iscsi.c static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) task 760 drivers/scsi/qedi/qedi_iscsi.c struct qedi_cmd *cmd = task->dd_data; task 764 drivers/scsi/qedi/qedi_iscsi.c qedi_conn->gen_pdu.req_buf_size = task->data_count; task 766 drivers/scsi/qedi/qedi_iscsi.c if (task->data_count) { task 767 drivers/scsi/qedi/qedi_iscsi.c memcpy(qedi_conn->gen_pdu.req_buf, task->data, task 768 drivers/scsi/qedi/qedi_iscsi.c task->data_count); task 770 drivers/scsi/qedi/qedi_iscsi.c qedi_conn->gen_pdu.req_buf + task->data_count; task 775 drivers/scsi/qedi/qedi_iscsi.c return qedi_iscsi_send_generic_request(task); task 778 drivers/scsi/qedi/qedi_iscsi.c static int qedi_task_xmit(struct iscsi_task *task) task 780 drivers/scsi/qedi/qedi_iscsi.c struct iscsi_conn *conn = task->conn; task 782 drivers/scsi/qedi/qedi_iscsi.c struct qedi_cmd *cmd = task->dd_data; task 783 drivers/scsi/qedi/qedi_iscsi.c struct scsi_cmnd *sc = task->sc; task 786 drivers/scsi/qedi/qedi_iscsi.c cmd->task = NULL; task 789 drivers/scsi/qedi/qedi_iscsi.c cmd->task = task; task 794 drivers/scsi/qedi/qedi_iscsi.c return qedi_mtask_xmit(conn, task); task 797 drivers/scsi/qedi/qedi_iscsi.c return qedi_iscsi_send_ioreq(task); task 1366 drivers/scsi/qedi/qedi_iscsi.c static void qedi_cleanup_task(struct iscsi_task *task) task 1368 drivers/scsi/qedi/qedi_iscsi.c if (!task->sc || task->state == ISCSI_TASK_PENDING) { task 1370 drivers/scsi/qedi/qedi_iscsi.c refcount_read(&task->refcount)); task 1374 drivers/scsi/qedi/qedi_iscsi.c qedi_iscsi_unmap_sg_list(task->dd_data); task 189 drivers/scsi/qedi/qedi_iscsi.h struct iscsi_task *task; task 537 drivers/scsi/qla2xxx/qla_fw.h uint8_t task; task 1676 drivers/scsi/qla2xxx/qla_iocb.c cmd_pkt->task = TSK_SIMPLE; task 1996 drivers/scsi/qla2xxx/qla_iocb.c cmd_pkt->task = TSK_SIMPLE; task 3325 drivers/scsi/qla2xxx/qla_iocb.c cmd_pkt->task |= sp->fcport->fcp_prio << 3; task 44 drivers/scsi/qla2xxx/qla_mr.h uint8_t task; task 834 drivers/scsi/qla4xxx/ql4_def.h struct iscsi_task *task; task 158 drivers/scsi/qla4xxx/ql4_glbl.h int qla4xxx_send_passthru0(struct iscsi_task *task); task 384 drivers/scsi/qla4xxx/ql4_iocb.c int qla4xxx_send_passthru0(struct iscsi_task *task) task 387 drivers/scsi/qla4xxx/ql4_iocb.c struct iscsi_session *sess = task->conn->session; task 390 drivers/scsi/qla4xxx/ql4_iocb.c struct ql4_task_data *task_data = task->dd_data; task 407 drivers/scsi/qla4xxx/ql4_iocb.c passthru_iocb->handle = task->itt; task 414 drivers/scsi/qla4xxx/ql4_iocb.c sizeof(struct iscsi_hdr), task->data, task->data_count); task 421 drivers/scsi/qla4xxx/ql4_iocb.c cpu_to_le32(task->data_count + task 367 drivers/scsi/qla4xxx/ql4_isr.c struct iscsi_task *task; task 389 drivers/scsi/qla4xxx/ql4_isr.c task = iscsi_itt_to_task(conn, itt); task 392 drivers/scsi/qla4xxx/ql4_isr.c if (task == NULL) { task 397 drivers/scsi/qla4xxx/ql4_isr.c task_data = task->dd_data; task 3320 drivers/scsi/qla4xxx/ql4_os.c struct iscsi_task *task; task 3330 drivers/scsi/qla4xxx/ql4_os.c task = task_data->task; task 3339 drivers/scsi/qla4xxx/ql4_os.c conn = task->conn; task 3359 drivers/scsi/qla4xxx/ql4_os.c static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) task 3367 drivers/scsi/qla4xxx/ql4_os.c sess = task->conn->session; task 3370 drivers/scsi/qla4xxx/ql4_os.c task_data = task->dd_data; task 3373 drivers/scsi/qla4xxx/ql4_os.c if (task->sc) { task 3381 drivers/scsi/qla4xxx/ql4_os.c task_data->task = task; task 3383 drivers/scsi/qla4xxx/ql4_os.c if (task->data_count) { task 3384 drivers/scsi/qla4xxx/ql4_os.c task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, task 3385 drivers/scsi/qla4xxx/ql4_os.c task->data_count, task 3390 drivers/scsi/qla4xxx/ql4_os.c __func__, task->conn->max_recv_dlength, hdr_len)); task 3392 drivers/scsi/qla4xxx/ql4_os.c task_data->resp_len = task->conn->max_recv_dlength + hdr_len; task 3400 drivers/scsi/qla4xxx/ql4_os.c task_data->req_len = task->data_count + hdr_len; task 3408 drivers/scsi/qla4xxx/ql4_os.c task->hdr = task_data->req_buffer; task 3425 drivers/scsi/qla4xxx/ql4_os.c static void qla4xxx_task_cleanup(struct iscsi_task *task) task 3434 drivers/scsi/qla4xxx/ql4_os.c sess = task->conn->session; task 3437 drivers/scsi/qla4xxx/ql4_os.c task_data = task->dd_data; task 3439 drivers/scsi/qla4xxx/ql4_os.c if (task->data_count) { task 3441 drivers/scsi/qla4xxx/ql4_os.c task->data_count, DMA_TO_DEVICE); task 3445 drivers/scsi/qla4xxx/ql4_os.c __func__, task->conn->max_recv_dlength, hdr_len)); task 3454 drivers/scsi/qla4xxx/ql4_os.c static int qla4xxx_task_xmit(struct iscsi_task *task) task 3456 drivers/scsi/qla4xxx/ql4_os.c struct scsi_cmnd *sc = task->sc; task 3457 drivers/scsi/qla4xxx/ql4_os.c struct iscsi_session *sess = task->conn->session; task 3462 drivers/scsi/qla4xxx/ql4_os.c return qla4xxx_send_passthru0(task); task 664 drivers/scsi/smartpqi/smartpqi.h struct task_struct *task; task 2965 drivers/scsi/sym53c8xx_2/sym_hipd.c sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task) task 2989 drivers/scsi/sym53c8xx_2/sym_hipd.c (task == -1 || cp->tag == task)) { task 3186 drivers/scsi/sym53c8xx_2/sym_hipd.c int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task) task 3211 drivers/scsi/sym53c8xx_2/sym_hipd.c (task != -1 && task 3212 drivers/scsi/sym53c8xx_2/sym_hipd.c (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { task 3275 drivers/scsi/sym53c8xx_2/sym_hipd.c int target=-1, lun=-1, task; task 3540 drivers/scsi/sym53c8xx_2/sym_hipd.c task = -1; task 3564 drivers/scsi/sym53c8xx_2/sym_hipd.c task = np->abrt_msg[2]; task 3573 drivers/scsi/sym53c8xx_2/sym_hipd.c sym_clear_tasks(np, DID_ABORT, target, lun, task); task 1052 drivers/scsi/sym53c8xx_2/sym_hipd.h int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task); task 435 drivers/soc/ti/wkup_m3_ipc.c struct task_struct *task; task 501 drivers/soc/ti/wkup_m3_ipc.c task = kthread_run((void *)wkup_m3_rproc_boot_thread, m3_ipc, task 504 drivers/soc/ti/wkup_m3_ipc.c if (IS_ERR(task)) { task 506 drivers/soc/ti/wkup_m3_ipc.c ret = PTR_ERR(task); task 370 drivers/ssb/driver_chipcommon_pmu.c u8 task; /* SET | ADD | REMOVE */ task 401 drivers/ssb/driver_chipcommon_pmu.c .task = PMU_RES_DEP_SET, task 415 drivers/ssb/driver_chipcommon_pmu.c .task = PMU_RES_DEP_ADD, task 490 drivers/ssb/driver_chipcommon_pmu.c switch (depend_tab[i].task) { task 154 drivers/staging/android/ion/ion.h struct task_struct *task; task 251 drivers/staging/android/ion/ion_heap.c heap->task = kthread_run(ion_heap_deferred_free, heap, task 253 drivers/staging/android/ion/ion_heap.c if (IS_ERR(heap->task)) { task 256 drivers/staging/android/ion/ion_heap.c return PTR_ERR_OR_ZERO(heap->task); task 258 drivers/staging/android/ion/ion_heap.c sched_setscheduler(heap->task, SCHED_IDLE, ¶m); task 462 drivers/staging/android/vsoc.c if (likely(to->task)) task 465 drivers/staging/android/vsoc.c if (!to->task) { task 65 drivers/staging/greybus/loopback.c struct task_struct *task; task 1060 drivers/staging/greybus/loopback.c gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback"); task 1061 drivers/staging/greybus/loopback.c if (IS_ERR(gb->task)) { task 1062 drivers/staging/greybus/loopback.c retval = PTR_ERR(gb->task); task 1106 drivers/staging/greybus/loopback.c if (!IS_ERR_OR_NULL(gb->task)) task 1107 drivers/staging/greybus/loopback.c kthread_stop(gb->task); task 155 drivers/staging/media/imx/imx-ic-prpencvf.c int ret, task = ic_priv->task_id; task 157 drivers/staging/media/imx/imx-ic-prpencvf.c ic = ipu_ic_get(ic_priv->ipu, task); task 165 drivers/staging/media/imx/imx-ic-prpencvf.c out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].out_ch); task 168 drivers/staging/media/imx/imx-ic-prpencvf.c prp_channel[task].out_ch); task 174 drivers/staging/media/imx/imx-ic-prpencvf.c rot_in_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_in_ch); task 177 drivers/staging/media/imx/imx-ic-prpencvf.c prp_channel[task].rot_in_ch); task 183 drivers/staging/media/imx/imx-ic-prpencvf.c rot_out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_out_ch); task 186 drivers/staging/media/imx/imx-ic-prpencvf.c prp_channel[task].rot_out_ch); task 896 drivers/staging/most/core.c struct task_struct *task = task 900 drivers/staging/most/core.c if (IS_ERR(task)) task 901 drivers/staging/most/core.c return PTR_ERR(task); task 903 drivers/staging/most/core.c c->hdm_enqueue_task = task; task 159 drivers/staging/mt7621-dma/mtk-hsdma.c struct tasklet_struct task; task 435 drivers/staging/mt7621-dma/mtk-hsdma.c tasklet_schedule(&hsdma->task); task 453 drivers/staging/mt7621-dma/mtk-hsdma.c tasklet_schedule(&hsdma->task); task 675 drivers/staging/mt7621-dma/mtk-hsdma.c tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma); task 124 drivers/staging/ralink-gdma/ralink-gdma.c struct tasklet_struct task; task 489 drivers/staging/ralink-gdma/ralink-gdma.c tasklet_schedule(&dma_dev->task); task 504 drivers/staging/ralink-gdma/ralink-gdma.c tasklet_schedule(&dma_dev->task); task 826 drivers/staging/ralink-gdma/ralink-gdma.c tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev); task 899 drivers/staging/ralink-gdma/ralink-gdma.c tasklet_kill(&dma_dev->task); task 850 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c s32 rtw_register_task_alive(struct adapter *padapter, u32 task) task 862 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c register_task_alive(pwrctrl, task); task 867 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c __func__, task, pwrctrl->cpwm, pwrctrl->alives)); task 896 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c void rtw_unregister_task_alive(struct adapter *padapter, u32 task) task 915 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c unregister_task_alive(pwrctrl, task); task 327 drivers/staging/rtl8723bs/include/rtw_pwrctrl.h s32 rtw_register_task_alive(struct adapter *, u32 task); task 328 drivers/staging/rtl8723bs/include/rtw_pwrctrl.h void rtw_unregister_task_alive(struct adapter *, u32 task); task 243 drivers/staging/rtl8723bs/include/rtw_recv.h struct task rx_indicate_tasklet; task 113 drivers/staging/unisys/visorhba/visorhba_main.c struct task_struct *task; task 115 drivers/staging/unisys/visorhba/visorhba_main.c task = kthread_run(threadfn, thrcontext, "%s", name); task 116 drivers/staging/unisys/visorhba/visorhba_main.c if (IS_ERR(task)) { task 120 drivers/staging/unisys/visorhba/visorhba_main.c return task; task 127 drivers/staging/unisys/visorhba/visorhba_main.c static void visor_thread_stop(struct task_struct *task) task 129 drivers/staging/unisys/visorhba/visorhba_main.c kthread_stop(task); task 88 drivers/staging/uwb/uwb.h struct task_struct *task; task 291 drivers/staging/uwb/uwbd.c struct task_struct *task = kthread_run(uwbd, rc, "uwbd"); task 292 drivers/staging/uwb/uwbd.c if (IS_ERR(task)) { task 293 drivers/staging/uwb/uwbd.c rc->uwbd.task = NULL; task 297 drivers/staging/uwb/uwbd.c rc->uwbd.task = task; task 298 drivers/staging/uwb/uwbd.c rc->uwbd.pid = rc->uwbd.task->pid; task 305 drivers/staging/uwb/uwbd.c if (rc->uwbd.task) task 306 drivers/staging/uwb/uwbd.c kthread_stop(rc->uwbd.task); task 195 drivers/target/loopback/tcm_loop.c u64 lun, int task, enum tcm_tmreq_table tmr) task 222 drivers/target/loopback/tcm_loop.c NULL, tmr, GFP_KERNEL, task, task 491 drivers/thermal/intel/intel_powerclamp.c sched_setscheduler(worker->task, SCHED_FIFO, &sparam); task 201 drivers/tty/synclink.c struct work_struct task; /* task structure for scheduling bh */ task 1057 drivers/tty/synclink.c container_of(work, struct mgsl_struct, task); task 1727 drivers/tty/synclink.c schedule_work(&info->task); task 4246 drivers/tty/synclink.c INIT_WORK(&info->task, mgsl_bh_handler); task 275 drivers/tty/synclink_gt.c struct work_struct task; task 1923 drivers/tty/synclink_gt.c struct slgt_info *info = container_of(work, struct slgt_info, task); task 2386 drivers/tty/synclink_gt.c schedule_work(&port->task); task 3564 drivers/tty/synclink_gt.c INIT_WORK(&info->task, bh_handler); task 5126 drivers/tty/synclink_gt.c bh_handler(&info->task); task 179 drivers/tty/synclinkmp.c struct work_struct task; /* task structure for scheduling bh */ task 1982 drivers/tty/synclinkmp.c SLMP_INFO *info = container_of(work, SLMP_INFO, task); task 2604 drivers/tty/synclinkmp.c schedule_work(&port->task); task 3736 drivers/tty/synclinkmp.c INIT_WORK(&info->task, bh_handler); task 51 drivers/tty/tty_ldsem.c struct task_struct *task; task 95 drivers/tty/tty_ldsem.c tsk = waiter->task; task 96 drivers/tty/tty_ldsem.c smp_store_release(&waiter->task, NULL); task 124 drivers/tty/tty_ldsem.c wake_up_process(waiter->task); task 183 drivers/tty/tty_ldsem.c waiter.task = current; task 196 drivers/tty/tty_ldsem.c if (!smp_load_acquire(&waiter.task)) task 212 drivers/tty/tty_ldsem.c if (waiter.task) { task 217 drivers/tty/tty_ldsem.c put_task_struct(waiter.task); task 255 drivers/tty/tty_ldsem.c waiter.task = current; task 126 drivers/usb/atm/ueagle-atm.c struct work_struct task; task 900 drivers/usb/atm/ueagle-atm.c struct uea_softc *sc = container_of(work, struct uea_softc, task); task 1025 drivers/usb/atm/ueagle-atm.c struct uea_softc *sc = container_of(work, struct uea_softc, task); task 1861 drivers/usb/atm/ueagle-atm.c schedule_work(&sc->task); task 2073 drivers/usb/atm/ueagle-atm.c schedule_work(&sc->task); task 2080 drivers/usb/atm/ueagle-atm.c schedule_work(&sc->task); task 2141 drivers/usb/atm/ueagle-atm.c INIT_WORK(&sc->task, uea_load_page_e4); task 2148 drivers/usb/atm/ueagle-atm.c INIT_WORK(&sc->task, uea_load_page_e1); task 2222 drivers/usb/atm/ueagle-atm.c flush_work(&sc->task); task 324 drivers/usb/usbip/usbip_common.h int usbip_in_eh(struct task_struct *task); task 188 drivers/usb/usbip/usbip_event.c int usbip_in_eh(struct task_struct *task) task 190 drivers/usb/usbip/usbip_event.c if (task == worker_context) task 91 drivers/vfio/vfio_iommu_type1.c struct task_struct *task; task 276 drivers/vfio/vfio_iommu_type1.c mm = async ? get_task_mm(dma->task) : dma->task->mm; task 282 drivers/vfio/vfio_iommu_type1.c ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, task 503 drivers/vfio/vfio_iommu_type1.c mm = get_task_mm(dma->task); task 515 drivers/vfio/vfio_iommu_type1.c dma->task->comm, task_pid_nr(dma->task), task 516 drivers/vfio/vfio_iommu_type1.c task_rlimit(dma->task, RLIMIT_MEMLOCK)); task 840 drivers/vfio/vfio_iommu_type1.c put_task_struct(dma->task); task 944 drivers/vfio/vfio_iommu_type1.c if (dma->task->mm != current->mm) task 1156 drivers/vfio/vfio_iommu_type1.c dma->task = current->group_leader; task 44 drivers/video/fbdev/atmel_lcdfb.c struct work_struct task; task 849 drivers/video/fbdev/atmel_lcdfb.c schedule_work(&sinfo->task); task 861 drivers/video/fbdev/atmel_lcdfb.c container_of(work, struct atmel_lcdfb_info, task); task 1183 drivers/video/fbdev/atmel_lcdfb.c INIT_WORK(&sinfo->task, atmel_lcdfb_task); task 1220 drivers/video/fbdev/atmel_lcdfb.c cancel_work_sync(&sinfo->task); task 1258 drivers/video/fbdev/atmel_lcdfb.c cancel_work_sync(&sinfo->task); task 118 drivers/video/fbdev/ps3fb.c struct task_struct *task; task 925 drivers/video/fbdev/ps3fb.c if (ps3fb.task && !ps3fb.is_blanked && task 928 drivers/video/fbdev/ps3fb.c wake_up_process(ps3fb.task); task 976 drivers/video/fbdev/ps3fb.c struct task_struct *task; task 1173 drivers/video/fbdev/ps3fb.c task = kthread_run(ps3fbd, info, DEVICE_NAME); task 1174 drivers/video/fbdev/ps3fb.c if (IS_ERR(task)) { task 1175 drivers/video/fbdev/ps3fb.c retval = PTR_ERR(task); task 1179 drivers/video/fbdev/ps3fb.c ps3fb.task = task; task 1220 drivers/video/fbdev/ps3fb.c if (ps3fb.task) { task 1221 drivers/video/fbdev/ps3fb.c struct task_struct *task = ps3fb.task; task 1222 drivers/video/fbdev/ps3fb.c ps3fb.task = NULL; task 1223 drivers/video/fbdev/ps3fb.c kthread_stop(task); task 129 drivers/video/fbdev/pxafb.c schedule_work(&fbi->task); task 1642 drivers/video/fbdev/pxafb.c container_of(work, struct pxafb_info, task); task 1848 drivers/video/fbdev/pxafb.c INIT_WORK(&fbi->task, pxafb_task); task 147 drivers/video/fbdev/pxafb.h struct work_struct task; task 242 drivers/video/fbdev/sa1100fb.c schedule_work(&fbi->task); task 963 drivers/video/fbdev/sa1100fb.c struct sa1100fb_info *fbi = container_of(w, struct sa1100fb_info, task); task 1150 drivers/video/fbdev/sa1100fb.c INIT_WORK(&fbi->task, sa1100fb_task); task 63 drivers/video/fbdev/sa1100fb.h struct work_struct task; task 75 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task; task 84 drivers/video/fbdev/uvesafb.c task = uvfb_tasks[msg->seq]; task 86 drivers/video/fbdev/uvesafb.c if (!task || msg->ack != task->ack) { task 94 drivers/video/fbdev/uvesafb.c if (task->t.buf_len < utask->buf_len || task 103 drivers/video/fbdev/uvesafb.c memcpy(&task->t, utask, sizeof(*utask)); task 105 drivers/video/fbdev/uvesafb.c if (task->t.buf_len && task->buf) task 106 drivers/video/fbdev/uvesafb.c memcpy(task->buf, utask + 1, task->t.buf_len); task 108 drivers/video/fbdev/uvesafb.c complete(task->done); task 144 drivers/video/fbdev/uvesafb.c static int uvesafb_exec(struct uvesafb_ktask *task) task 149 drivers/video/fbdev/uvesafb.c int len = sizeof(task->t) + task->t.buf_len; task 165 drivers/video/fbdev/uvesafb.c init_completion(task->done); task 173 drivers/video/fbdev/uvesafb.c memcpy(m + 1, &task->t, sizeof(task->t)); task 176 drivers/video/fbdev/uvesafb.c memcpy((u8 *)(m + 1) + sizeof(task->t), task->buf, task->t.buf_len); task 182 drivers/video/fbdev/uvesafb.c task->ack = m->ack; task 194 drivers/video/fbdev/uvesafb.c uvfb_tasks[seq] = task; task 216 drivers/video/fbdev/uvesafb.c if (!err && !(task->t.flags & TF_EXIT)) task 217 drivers/video/fbdev/uvesafb.c err = !wait_for_completion_timeout(task->done, task 235 drivers/video/fbdev/uvesafb.c static void uvesafb_free(struct uvesafb_ktask *task) task 237 drivers/video/fbdev/uvesafb.c if (task) { task 238 drivers/video/fbdev/uvesafb.c kfree(task->done); task 239 drivers/video/fbdev/uvesafb.c kfree(task); task 246 drivers/video/fbdev/uvesafb.c static void uvesafb_reset(struct uvesafb_ktask *task) task 248 drivers/video/fbdev/uvesafb.c struct completion *cpl = task->done; task 250 drivers/video/fbdev/uvesafb.c memset(task, 0, sizeof(*task)); task 251 drivers/video/fbdev/uvesafb.c task->done = cpl; task 259 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task; task 261 drivers/video/fbdev/uvesafb.c task = kzalloc(sizeof(*task), GFP_KERNEL); task 262 drivers/video/fbdev/uvesafb.c if (task) { task 263 drivers/video/fbdev/uvesafb.c task->done = kzalloc(sizeof(*task->done), GFP_KERNEL); task 264 drivers/video/fbdev/uvesafb.c if (!task->done) { task 265 drivers/video/fbdev/uvesafb.c kfree(task); task 266 drivers/video/fbdev/uvesafb.c task = NULL; task 269 drivers/video/fbdev/uvesafb.c return task; task 354 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task; task 365 drivers/video/fbdev/uvesafb.c task = uvesafb_prep(); task 366 drivers/video/fbdev/uvesafb.c if (!task) { task 371 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f04; task 372 drivers/video/fbdev/uvesafb.c task->t.regs.ecx = 0x000f; task 373 drivers/video/fbdev/uvesafb.c task->t.regs.edx = 0x0001; task 374 drivers/video/fbdev/uvesafb.c task->t.flags = TF_BUF_RET | TF_BUF_ESBX; task 375 drivers/video/fbdev/uvesafb.c task->t.buf_len = par->vbe_state_size; task 376 drivers/video/fbdev/uvesafb.c task->buf = state; task 377 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 379 drivers/video/fbdev/uvesafb.c if (err || (task->t.regs.eax & 0xffff) != 0x004f) { task 381 drivers/video/fbdev/uvesafb.c task->t.regs.eax, err); task 386 drivers/video/fbdev/uvesafb.c uvesafb_free(task); task 392 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task; task 398 drivers/video/fbdev/uvesafb.c task = uvesafb_prep(); task 399 drivers/video/fbdev/uvesafb.c if (!task) task 402 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f04; task 403 drivers/video/fbdev/uvesafb.c task->t.regs.ecx = 0x000f; task 404 drivers/video/fbdev/uvesafb.c task->t.regs.edx = 0x0002; task 405 drivers/video/fbdev/uvesafb.c task->t.buf_len = par->vbe_state_size; task 406 drivers/video/fbdev/uvesafb.c task->t.flags = TF_BUF_ESBX; task 407 drivers/video/fbdev/uvesafb.c task->buf = state_buf; task 409 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 410 drivers/video/fbdev/uvesafb.c if (err || (task->t.regs.eax & 0xffff) != 0x004f) task 412 drivers/video/fbdev/uvesafb.c task->t.regs.eax, err); task 414 drivers/video/fbdev/uvesafb.c uvesafb_free(task); task 417 drivers/video/fbdev/uvesafb.c static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task, task 422 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f00; task 423 drivers/video/fbdev/uvesafb.c task->t.flags = TF_VBEIB; task 424 drivers/video/fbdev/uvesafb.c task->t.buf_len = sizeof(struct vbe_ib); task 425 drivers/video/fbdev/uvesafb.c task->buf = &par->vbe_ib; task 428 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 429 drivers/video/fbdev/uvesafb.c if (err || (task->t.regs.eax & 0xffff) != 0x004f) { task 431 drivers/video/fbdev/uvesafb.c (u32)task->t.regs.eax, err); task 454 drivers/video/fbdev/uvesafb.c ((char *)task->buf) + par->vbe_ib.oem_vendor_name_ptr); task 458 drivers/video/fbdev/uvesafb.c ((char *)task->buf) + par->vbe_ib.oem_product_name_ptr); task 462 drivers/video/fbdev/uvesafb.c ((char *)task->buf) + par->vbe_ib.oem_product_rev_ptr); task 466 drivers/video/fbdev/uvesafb.c ((char *)task->buf) + par->vbe_ib.oem_string_ptr); task 475 drivers/video/fbdev/uvesafb.c static int uvesafb_vbe_getmodes(struct uvesafb_ktask *task, task 501 drivers/video/fbdev/uvesafb.c uvesafb_reset(task); task 502 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f01; task 503 drivers/video/fbdev/uvesafb.c task->t.regs.ecx = (u32) *mode; task 504 drivers/video/fbdev/uvesafb.c task->t.flags = TF_BUF_RET | TF_BUF_ESDI; task 505 drivers/video/fbdev/uvesafb.c task->t.buf_len = sizeof(struct vbe_mode_ib); task 506 drivers/video/fbdev/uvesafb.c task->buf = par->vbe_modes + off; task 508 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 509 drivers/video/fbdev/uvesafb.c if (err || (task->t.regs.eax & 0xffff) != 0x004f) { task 511 drivers/video/fbdev/uvesafb.c *mode, (u32)task->t.regs.eax, err); task 517 drivers/video/fbdev/uvesafb.c mib = task->buf; task 554 drivers/video/fbdev/uvesafb.c static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task, task 559 drivers/video/fbdev/uvesafb.c uvesafb_reset(task); task 560 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f0a; task 561 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = 0x0; task 562 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 564 drivers/video/fbdev/uvesafb.c if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) { task 567 drivers/video/fbdev/uvesafb.c par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4) task 568 drivers/video/fbdev/uvesafb.c + task->t.regs.edi); task 572 drivers/video/fbdev/uvesafb.c (u16)task->t.regs.es, (u16)task->t.regs.edi); task 613 drivers/video/fbdev/uvesafb.c static int uvesafb_vbe_getedid(struct uvesafb_ktask *task, struct fb_info *info) task 621 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f15; task 622 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = 0; task 623 drivers/video/fbdev/uvesafb.c task->t.regs.ecx = 0; task 624 drivers/video/fbdev/uvesafb.c task->t.buf_len = 0; task 625 drivers/video/fbdev/uvesafb.c task->t.flags = 0; task 627 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 629 drivers/video/fbdev/uvesafb.c if ((task->t.regs.eax & 0xffff) != 0x004f || err) task 632 drivers/video/fbdev/uvesafb.c if ((task->t.regs.ebx & 0x3) == 3) { task 634 drivers/video/fbdev/uvesafb.c } else if ((task->t.regs.ebx & 0x3) == 2) { task 636 drivers/video/fbdev/uvesafb.c } else if ((task->t.regs.ebx & 0x3) == 1) { task 643 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f15; task 644 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = 1; task 645 drivers/video/fbdev/uvesafb.c task->t.regs.ecx = task->t.regs.edx = 0; task 646 drivers/video/fbdev/uvesafb.c task->t.flags = TF_BUF_RET | TF_BUF_ESDI; task 647 drivers/video/fbdev/uvesafb.c task->t.buf_len = EDID_LENGTH; task 648 drivers/video/fbdev/uvesafb.c task->buf = kzalloc(EDID_LENGTH, GFP_KERNEL); task 649 drivers/video/fbdev/uvesafb.c if (!task->buf) task 652 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 654 drivers/video/fbdev/uvesafb.c if ((task->t.regs.eax & 0xffff) == 0x004f && !err) { task 655 drivers/video/fbdev/uvesafb.c fb_edid_to_monspecs(task->buf, &info->monspecs); task 670 drivers/video/fbdev/uvesafb.c kfree(task->buf); task 674 drivers/video/fbdev/uvesafb.c static void uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task, task 687 drivers/video/fbdev/uvesafb.c if (uvesafb_vbe_getedid(task, info)) { task 753 drivers/video/fbdev/uvesafb.c static void uvesafb_vbe_getstatesize(struct uvesafb_ktask *task, task 758 drivers/video/fbdev/uvesafb.c uvesafb_reset(task); task 764 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f04; task 765 drivers/video/fbdev/uvesafb.c task->t.regs.ecx = 0x000f; task 766 drivers/video/fbdev/uvesafb.c task->t.regs.edx = 0x0000; task 767 drivers/video/fbdev/uvesafb.c task->t.flags = 0; task 769 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 771 drivers/video/fbdev/uvesafb.c if (err || (task->t.regs.eax & 0xffff) != 0x004f) { task 773 drivers/video/fbdev/uvesafb.c task->t.regs.eax, err); task 778 drivers/video/fbdev/uvesafb.c par->vbe_state_size = 64 * (task->t.regs.ebx & 0xffff); task 783 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task = NULL; task 787 drivers/video/fbdev/uvesafb.c task = uvesafb_prep(); task 788 drivers/video/fbdev/uvesafb.c if (!task) task 791 drivers/video/fbdev/uvesafb.c err = uvesafb_vbe_getinfo(task, par); task 795 drivers/video/fbdev/uvesafb.c err = uvesafb_vbe_getmodes(task, par); task 809 drivers/video/fbdev/uvesafb.c uvesafb_vbe_getpmi(task, par); task 818 drivers/video/fbdev/uvesafb.c uvesafb_vbe_getmonspecs(task, info); task 819 drivers/video/fbdev/uvesafb.c uvesafb_vbe_getstatesize(task, par); task 821 drivers/video/fbdev/uvesafb.c out: uvesafb_free(task); task 925 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task; task 966 drivers/video/fbdev/uvesafb.c task = uvesafb_prep(); task 967 drivers/video/fbdev/uvesafb.c if (!task) task 970 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f09; task 971 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = 0x0; task 972 drivers/video/fbdev/uvesafb.c task->t.regs.ecx = count; task 973 drivers/video/fbdev/uvesafb.c task->t.regs.edx = start; task 974 drivers/video/fbdev/uvesafb.c task->t.flags = TF_BUF_ESDI; task 975 drivers/video/fbdev/uvesafb.c task->t.buf_len = sizeof(struct uvesafb_pal_entry) * count; task 976 drivers/video/fbdev/uvesafb.c task->buf = entries; task 978 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 979 drivers/video/fbdev/uvesafb.c if ((task->t.regs.eax & 0xffff) != 0x004f) task 982 drivers/video/fbdev/uvesafb.c uvesafb_free(task); task 1106 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task; task 1136 drivers/video/fbdev/uvesafb.c task = uvesafb_prep(); task 1137 drivers/video/fbdev/uvesafb.c if (!task) task 1140 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f10; task 1143 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = 0x0001; task 1146 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = 0x0101; /* standby */ task 1149 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = 0x0401; /* powerdown */ task 1155 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 1156 drivers/video/fbdev/uvesafb.c if (err || (task->t.regs.eax & 0xffff) != 0x004f) task 1158 drivers/video/fbdev/uvesafb.c out: uvesafb_free(task); task 1185 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task = NULL; task 1195 drivers/video/fbdev/uvesafb.c task = uvesafb_prep(); task 1196 drivers/video/fbdev/uvesafb.c if (!task) task 1200 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x0003; task 1201 drivers/video/fbdev/uvesafb.c uvesafb_exec(task); task 1210 drivers/video/fbdev/uvesafb.c uvesafb_free(task); task 1217 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task = NULL; task 1233 drivers/video/fbdev/uvesafb.c task = uvesafb_prep(); task 1234 drivers/video/fbdev/uvesafb.c if (!task) task 1237 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f02; task 1238 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = mode->mode_id | 0x4000; /* use LFB */ task 1242 drivers/video/fbdev/uvesafb.c task->t.regs.ebx |= 0x0800; /* use CRTC data */ task 1243 drivers/video/fbdev/uvesafb.c task->t.flags = TF_BUF_ESDI; task 1274 drivers/video/fbdev/uvesafb.c task->t.buf_len = sizeof(struct vbe_crtc_ib); task 1275 drivers/video/fbdev/uvesafb.c task->buf = &par->crtc; task 1277 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 1278 drivers/video/fbdev/uvesafb.c if (err || (task->t.regs.eax & 0xffff) != 0x004f) { task 1285 drivers/video/fbdev/uvesafb.c task->t.regs.eax, err); task 1286 drivers/video/fbdev/uvesafb.c uvesafb_reset(task); task 1293 drivers/video/fbdev/uvesafb.c task->t.regs.eax, err); task 1303 drivers/video/fbdev/uvesafb.c uvesafb_reset(task); task 1304 drivers/video/fbdev/uvesafb.c task->t.regs.eax = 0x4f08; task 1305 drivers/video/fbdev/uvesafb.c task->t.regs.ebx = 0x0800; task 1307 drivers/video/fbdev/uvesafb.c err = uvesafb_exec(task); task 1308 drivers/video/fbdev/uvesafb.c if (err || (task->t.regs.eax & 0xffff) != 0x004f || task 1309 drivers/video/fbdev/uvesafb.c ((task->t.regs.ebx & 0xff00) >> 8) != 8) { task 1322 drivers/video/fbdev/uvesafb.c uvesafb_free(task); task 1924 drivers/video/fbdev/uvesafb.c struct uvesafb_ktask *task; task 1927 drivers/video/fbdev/uvesafb.c task = uvesafb_prep(); task 1928 drivers/video/fbdev/uvesafb.c if (task) { task 1929 drivers/video/fbdev/uvesafb.c task->t.flags = TF_EXIT; task 1930 drivers/video/fbdev/uvesafb.c uvesafb_exec(task); task 1931 drivers/video/fbdev/uvesafb.c uvesafb_free(task); task 1134 drivers/watchdog/watchdog_dev.c sched_setscheduler(watchdog_kworker->task, SCHED_FIFO, ¶m); task 929 drivers/xen/xenbus/xenbus_xs.c struct task_struct *task; task 938 drivers/xen/xenbus/xenbus_xs.c task = kthread_run(xenwatch_thread, NULL, "xenwatch"); task 939 drivers/xen/xenbus/xenbus_xs.c if (IS_ERR(task)) task 940 drivers/xen/xenbus/xenbus_xs.c return PTR_ERR(task); task 1663 fs/binfmt_elf.c struct task_struct *task; task 1684 fs/binfmt_elf.c static void do_thread_regset_writeback(struct task_struct *task, task 1688 fs/binfmt_elf.c regset->writeback(task, regset, 1); task 1704 fs/binfmt_elf.c unsigned int regset0_size = regset_size(t->task, &view->regsets[0]); task 1712 fs/binfmt_elf.c fill_prstatus(&t->prstatus, t->task, signr); task 1713 fs/binfmt_elf.c (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size, task 1720 fs/binfmt_elf.c do_thread_regset_writeback(t->task, &view->regsets[0]); task 1729 fs/binfmt_elf.c do_thread_regset_writeback(t->task, regset); task 1731 fs/binfmt_elf.c (!regset->active || regset->active(t->task, regset) > 0)) { task 1733 fs/binfmt_elf.c size_t size = regset_size(t->task, regset); task 1737 fs/binfmt_elf.c ret = regset->get(t->task, regset, task 1816 fs/binfmt_elf.c t->task = ct->task; task 1817 fs/binfmt_elf.c if (ct->task == dump_task || !info->thread) { task 2025 fs/binfmt_elf.c ets->thread = ct->task; task 1618 fs/binfmt_elf_fdpic.c tmp->thread = ct->task; task 913 fs/btrfs/dev-replace.c struct task_struct *task; task 958 fs/btrfs/dev-replace.c task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl"); task 959 fs/btrfs/dev-replace.c return PTR_ERR_OR_ZERO(task); task 4657 fs/btrfs/volumes.c struct task_struct *task; task 4683 fs/btrfs/volumes.c task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); task 4684 fs/btrfs/volumes.c if (IS_ERR(task)) { task 4688 fs/btrfs/volumes.c return PTR_ERR(task); task 4696 fs/btrfs/volumes.c struct task_struct *task; task 4699 fs/btrfs/volumes.c task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); task 4700 fs/btrfs/volumes.c if (IS_ERR(task)) { task 4704 fs/btrfs/volumes.c return PTR_ERR(task); task 2709 fs/cifs/connect.c struct task_struct *task; task 2746 fs/cifs/connect.c task = xchg(&server->tsk, NULL); task 2747 fs/cifs/connect.c if (task) task 2748 fs/cifs/connect.c send_sig(SIGKILL, task, 1); task 445 fs/coredump.c core_state->dumper.task = tsk; task 468 fs/coredump.c wait_task_inactive(ptr->task, 0); task 479 fs/coredump.c struct task_struct *task; task 491 fs/coredump.c task = curr->task; task 497 fs/coredump.c curr->task = NULL; task 498 fs/coredump.c wake_up_process(task); task 399 fs/ecryptfs/ecryptfs_kernel.h struct task_struct *task; task 55 fs/ecryptfs/messaging.c (*msg_ctx)->task = current; task 241 fs/ecryptfs/messaging.c wake_up_process(msg_ctx->task); task 397 fs/ecryptfs/messaging.c ecryptfs_msg_ctx_arr[i].task = NULL; task 342 fs/f2fs/dir.c F2FS_I(dir)->task = current; task 746 fs/f2fs/dir.c if (current != F2FS_I(dir)->task) { task 748 fs/f2fs/dir.c F2FS_I(dir)->task = NULL; task 687 fs/f2fs/f2fs.h struct task_struct *task; /* lookup and create consistency */ task 400 fs/file.c struct files_struct *get_files_struct(struct task_struct *task) task 404 fs/file.c task_lock(task); task 405 fs/file.c files = task->files; task 408 fs/file.c task_unlock(task); task 337 fs/file_table.c struct task_struct *task = current; task 339 fs/file_table.c if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { task 341 fs/file_table.c if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) task 371 fs/file_table.c struct task_struct *task = current; task 372 fs/file_table.c BUG_ON(!(task->flags & PF_KTHREAD)); task 216 fs/lockd/clntlock.c struct task_struct *task; task 220 fs/lockd/clntlock.c task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name); task 221 fs/lockd/clntlock.c if (IS_ERR(task)) task 224 fs/lockd/clntlock.c "(%ld)\n", host->h_name, PTR_ERR(task)); task 369 fs/lockd/clntproc.c struct rpc_task *task; task 371 fs/lockd/clntproc.c task = __nlm_async_call(req, proc, msg, tk_ops); task 372 fs/lockd/clntproc.c if (IS_ERR(task)) task 373 fs/lockd/clntproc.c return PTR_ERR(task); task 374 fs/lockd/clntproc.c rpc_put_task(task); task 413 fs/lockd/clntproc.c struct rpc_task *task; task 416 fs/lockd/clntproc.c task = __nlm_async_call(req, proc, &msg, tk_ops); task 417 fs/lockd/clntproc.c if (IS_ERR(task)) task 418 fs/lockd/clntproc.c return PTR_ERR(task); task 419 fs/lockd/clntproc.c err = rpc_wait_for_completion_task(task); task 420 fs/lockd/clntproc.c rpc_put_task(task); task 702 fs/lockd/clntproc.c static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data) task 709 fs/lockd/clntproc.c defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data); task 712 fs/lockd/clntproc.c rpc_call_start(task); task 715 fs/lockd/clntproc.c static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) task 720 fs/lockd/clntproc.c if (RPC_SIGNALLED(task)) task 723 fs/lockd/clntproc.c if (task->tk_status < 0) { task 724 fs/lockd/clntproc.c dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); task 725 fs/lockd/clntproc.c switch (task->tk_status) { task 734 fs/lockd/clntproc.c rpc_delay(task, NLMCLNT_GRACE_WAIT); task 744 fs/lockd/clntproc.c rpc_restart_call(task); task 783 fs/lockd/clntproc.c static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) task 788 fs/lockd/clntproc.c if (RPC_SIGNALLED(task)) task 791 fs/lockd/clntproc.c if (task->tk_status < 0) { task 793 fs/lockd/clntproc.c task->tk_status); task 798 fs/lockd/clntproc.c status, task->tk_pid); task 822 fs/lockd/clntproc.c rpc_restart_call(task); task 823 fs/lockd/clntproc.c rpc_delay(task, 30 * HZ); task 267 fs/lockd/svc4proc.c static void nlm4svc_callback_exit(struct rpc_task *task, void *data) task 269 fs/lockd/svc4proc.c dprintk("lockd: %5u callback returned %d\n", task->tk_pid, task 270 fs/lockd/svc4proc.c -task->tk_status); task 887 fs/lockd/svclock.c static void nlmsvc_grant_callback(struct rpc_task *task, void *data) task 910 fs/lockd/svclock.c if (task->tk_status < 0) { task 300 fs/lockd/svcproc.c static void nlmsvc_callback_exit(struct rpc_task *task, void *data) task 302 fs/lockd/svcproc.c dprintk("lockd: %5u callback returned %d\n", task->tk_pid, task 303 fs/lockd/svcproc.c -task->tk_status); task 1175 fs/namespace.c struct task_struct *task = current; task 1176 fs/namespace.c if (likely(!(task->flags & PF_KTHREAD))) { task 1178 fs/namespace.c if (!task_work_add(task, &mnt->mnt_rcu, true)) task 3943 fs/namespace.c static struct ns_common *mntns_get(struct task_struct *task) task 3948 fs/namespace.c task_lock(task); task 3949 fs/namespace.c nsproxy = task->nsproxy; task 3954 fs/namespace.c task_unlock(task); task 234 fs/nfs/blocklayout/blocklayout.c struct rpc_task *task; task 237 fs/nfs/blocklayout/blocklayout.c task = container_of(work, struct rpc_task, u.tk_work); task 238 fs/nfs/blocklayout/blocklayout.c hdr = container_of(task, struct nfs_pgio_header, task); task 247 fs/nfs/blocklayout/blocklayout.c hdr->task.tk_status = hdr->pnfs_error; task 248 fs/nfs/blocklayout/blocklayout.c INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup); task 249 fs/nfs/blocklayout/blocklayout.c schedule_work(&hdr->task.u.tk_work); task 366 fs/nfs/blocklayout/blocklayout.c struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work); task 368 fs/nfs/blocklayout/blocklayout.c container_of(task, struct nfs_pgio_header, task); task 391 fs/nfs/blocklayout/blocklayout.c hdr->task.tk_status = hdr->pnfs_error; task 393 fs/nfs/blocklayout/blocklayout.c INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup); task 394 fs/nfs/blocklayout/blocklayout.c schedule_work(&hdr->task.u.tk_work); task 2066 fs/nfs/dir.c struct rpc_task *task; task 2113 fs/nfs/dir.c task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); task 2114 fs/nfs/dir.c if (IS_ERR(task)) { task 2115 fs/nfs/dir.c error = PTR_ERR(task); task 2119 fs/nfs/dir.c error = rpc_wait_for_completion_task(task); task 2121 fs/nfs/dir.c ((struct nfs_renamedata *)task->tk_calldata)->cancelled = 1; task 2125 fs/nfs/dir.c error = task->tk_status; task 2126 fs/nfs/dir.c rpc_put_task(task); task 677 fs/nfs/direct.c int status = data->task.tk_status; task 90 fs/nfs/filelayout/filelayout.c struct rpc_task *task = &hdr->task; task 95 fs/nfs/filelayout/filelayout.c hdr->task.tk_pid, task 101 fs/nfs/filelayout/filelayout.c task->tk_status = pnfs_write_done_resend_to_mds(hdr); task 107 fs/nfs/filelayout/filelayout.c struct rpc_task *task = &hdr->task; task 112 fs/nfs/filelayout/filelayout.c hdr->task.tk_pid, task 118 fs/nfs/filelayout/filelayout.c task->tk_status = pnfs_read_done_resend_to_mds(hdr); task 122 fs/nfs/filelayout/filelayout.c static int filelayout_async_handle_error(struct rpc_task *task, task 132 fs/nfs/filelayout/filelayout.c if (task->tk_status >= 0) task 135 fs/nfs/filelayout/filelayout.c switch (task->tk_status) { task 145 fs/nfs/filelayout/filelayout.c "flags 0x%x\n", __func__, task->tk_status, task 147 fs/nfs/filelayout/filelayout.c nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); task 151 fs/nfs/filelayout/filelayout.c rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); task 164 fs/nfs/filelayout/filelayout.c task->tk_status); task 184 fs/nfs/filelayout/filelayout.c task->tk_status); task 193 fs/nfs/filelayout/filelayout.c task->tk_status); task 196 fs/nfs/filelayout/filelayout.c task->tk_status = 0; task 202 fs/nfs/filelayout/filelayout.c static int filelayout_read_done_cb(struct rpc_task *task, task 207 fs/nfs/filelayout/filelayout.c trace_nfs4_pnfs_read(hdr, task->tk_status); task 208 fs/nfs/filelayout/filelayout.c err = filelayout_async_handle_error(task, hdr->args.context->state, task 214 fs/nfs/filelayout/filelayout.c return task->tk_status; task 216 fs/nfs/filelayout/filelayout.c rpc_restart_call_prepare(task); task 265 fs/nfs/filelayout/filelayout.c static void filelayout_read_prepare(struct rpc_task *task, void *data) task 270 fs/nfs/filelayout/filelayout.c rpc_exit(task, -EIO); task 274 fs/nfs/filelayout/filelayout.c dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); task 276 fs/nfs/filelayout/filelayout.c rpc_exit(task, 0); task 284 fs/nfs/filelayout/filelayout.c task)) task 288 fs/nfs/filelayout/filelayout.c rpc_exit(task, -EIO); /* lost lock, terminate I/O */ task 291 fs/nfs/filelayout/filelayout.c static void filelayout_read_call_done(struct rpc_task *task, void *data) task 295 fs/nfs/filelayout/filelayout.c dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); task 298 fs/nfs/filelayout/filelayout.c task->tk_status == 0) { task 299 fs/nfs/filelayout/filelayout.c nfs41_sequence_done(task, &hdr->res.seq_res); task 304 fs/nfs/filelayout/filelayout.c hdr->mds_ops->rpc_call_done(task, data); task 307 fs/nfs/filelayout/filelayout.c static void filelayout_read_count_stats(struct rpc_task *task, void *data) task 311 fs/nfs/filelayout/filelayout.c rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics); task 314 fs/nfs/filelayout/filelayout.c static int filelayout_write_done_cb(struct rpc_task *task, task 319 fs/nfs/filelayout/filelayout.c trace_nfs4_pnfs_write(hdr, task->tk_status); task 320 fs/nfs/filelayout/filelayout.c err = filelayout_async_handle_error(task, hdr->args.context->state, task 326 fs/nfs/filelayout/filelayout.c return task->tk_status; task 328 fs/nfs/filelayout/filelayout.c rpc_restart_call_prepare(task); task 336 fs/nfs/filelayout/filelayout.c if (task->tk_status >= 0) task 342 fs/nfs/filelayout/filelayout.c static int filelayout_commit_done_cb(struct rpc_task *task, task 347 fs/nfs/filelayout/filelayout.c trace_nfs4_pnfs_commit_ds(data, task->tk_status); task 348 fs/nfs/filelayout/filelayout.c err = filelayout_async_handle_error(task, NULL, data->ds_clp, task 356 fs/nfs/filelayout/filelayout.c rpc_restart_call_prepare(task); task 365 fs/nfs/filelayout/filelayout.c static void filelayout_write_prepare(struct rpc_task *task, void *data) task 370 fs/nfs/filelayout/filelayout.c rpc_exit(task, -EIO); task 374 fs/nfs/filelayout/filelayout.c dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); task 376 fs/nfs/filelayout/filelayout.c rpc_exit(task, 0); task 382 fs/nfs/filelayout/filelayout.c task)) task 386 fs/nfs/filelayout/filelayout.c rpc_exit(task, -EIO); /* lost lock, terminate I/O */ task 389 fs/nfs/filelayout/filelayout.c static void filelayout_write_call_done(struct rpc_task *task, void *data) task 394 fs/nfs/filelayout/filelayout.c task->tk_status == 0) { task 395 fs/nfs/filelayout/filelayout.c nfs41_sequence_done(task, &hdr->res.seq_res); task 400 fs/nfs/filelayout/filelayout.c hdr->mds_ops->rpc_call_done(task, data); task 403 fs/nfs/filelayout/filelayout.c static void filelayout_write_count_stats(struct rpc_task *task, void *data) task 407 fs/nfs/filelayout/filelayout.c rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics); task 410 fs/nfs/filelayout/filelayout.c static void filelayout_commit_prepare(struct rpc_task *task, void *data) task 417 fs/nfs/filelayout/filelayout.c task); task 420 fs/nfs/filelayout/filelayout.c static void filelayout_commit_count_stats(struct rpc_task *task, void *data) task 424 fs/nfs/filelayout/filelayout.c rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics); task 35 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, task 702 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_ff_layout_stat_io_end_read(struct rpc_task *task, task 710 fs/nfs/flexfilelayout/flexfilelayout.c ktime_get(), task->tk_start); task 733 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, task 744 fs/nfs/flexfilelayout/flexfilelayout.c requested, completed, ktime_get(), task->tk_start); task 1071 fs/nfs/flexfilelayout/flexfilelayout.c struct rpc_task *task = &hdr->task; task 1078 fs/nfs/flexfilelayout/flexfilelayout.c hdr->task.tk_pid, task 1091 fs/nfs/flexfilelayout/flexfilelayout.c hdr->task.tk_pid, task 1101 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status = pnfs_write_done_resend_to_mds(hdr); task 1107 fs/nfs/flexfilelayout/flexfilelayout.c struct rpc_task *task = &hdr->task; task 1114 fs/nfs/flexfilelayout/flexfilelayout.c hdr->task.tk_pid, task 1124 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status = pnfs_read_done_resend_to_mds(hdr); task 1128 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_async_handle_error_v4(struct rpc_task *task, task 1139 fs/nfs/flexfilelayout/flexfilelayout.c switch (task->tk_status) { task 1148 fs/nfs/flexfilelayout/flexfilelayout.c "flags 0x%x\n", __func__, task->tk_status, task 1150 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); task 1154 fs/nfs/flexfilelayout/flexfilelayout.c rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); task 1166 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status); task 1186 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status); task 1196 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status); task 1199 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status = 0; task 1204 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_async_handle_error_v3(struct rpc_task *task, task 1210 fs/nfs/flexfilelayout/flexfilelayout.c switch (task->tk_status) { task 1224 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status); task 1231 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status = 0; task 1232 fs/nfs/flexfilelayout/flexfilelayout.c rpc_restart_call_prepare(task); task 1233 fs/nfs/flexfilelayout/flexfilelayout.c rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); task 1237 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_async_handle_error(struct rpc_task *task, task 1245 fs/nfs/flexfilelayout/flexfilelayout.c if (task->tk_status >= 0) { task 1256 fs/nfs/flexfilelayout/flexfilelayout.c return ff_layout_async_handle_error_v3(task, lseg, idx); task 1258 fs/nfs/flexfilelayout/flexfilelayout.c return ff_layout_async_handle_error_v4(task, state, clp, task 1318 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_read_done_cb(struct rpc_task *task, task 1324 fs/nfs/flexfilelayout/flexfilelayout.c trace_nfs4_pnfs_read(hdr, task->tk_status); task 1325 fs/nfs/flexfilelayout/flexfilelayout.c if (task->tk_status < 0) task 1329 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status); task 1330 fs/nfs/flexfilelayout/flexfilelayout.c err = ff_layout_async_handle_error(task, hdr->args.context->state, task 1343 fs/nfs/flexfilelayout/flexfilelayout.c return task->tk_status; task 1346 fs/nfs/flexfilelayout/flexfilelayout.c return task->tk_status; task 1353 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_read_record_layoutstats_done(task, hdr); task 1357 fs/nfs/flexfilelayout/flexfilelayout.c rpc_restart_call_prepare(task); task 1389 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_read_record_layoutstats_start(struct rpc_task *task, task 1397 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_start); task 1400 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, task 1405 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_ff_layout_stat_io_end_read(task, task 1412 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_read_prepare_common(struct rpc_task *task, task 1416 fs/nfs/flexfilelayout/flexfilelayout.c rpc_exit(task, -EIO); task 1420 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_read_record_layoutstats_start(task, hdr); task 1429 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) task 1433 fs/nfs/flexfilelayout/flexfilelayout.c if (ff_layout_read_prepare_common(task, hdr)) task 1436 fs/nfs/flexfilelayout/flexfilelayout.c rpc_call_start(task); task 1439 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) task 1446 fs/nfs/flexfilelayout/flexfilelayout.c task)) task 1449 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_read_prepare_common(task, hdr); task 1452 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_read_call_done(struct rpc_task *task, void *data) task 1456 fs/nfs/flexfilelayout/flexfilelayout.c dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); task 1459 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status == 0) { task 1460 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_sequence_done(task, &hdr->res.seq_res); task 1465 fs/nfs/flexfilelayout/flexfilelayout.c hdr->mds_ops->rpc_call_done(task, hdr); task 1468 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_read_count_stats(struct rpc_task *task, void *data) task 1472 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_read_record_layoutstats_done(task, hdr); task 1473 fs/nfs/flexfilelayout/flexfilelayout.c rpc_count_iostats_metrics(task, task 1481 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_read_record_layoutstats_done(&hdr->task, hdr); task 1491 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_write_done_cb(struct rpc_task *task, task 1497 fs/nfs/flexfilelayout/flexfilelayout.c trace_nfs4_pnfs_write(hdr, task->tk_status); task 1498 fs/nfs/flexfilelayout/flexfilelayout.c if (task->tk_status < 0) task 1502 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status); task 1503 fs/nfs/flexfilelayout/flexfilelayout.c err = ff_layout_async_handle_error(task, hdr->args.context->state, task 1512 fs/nfs/flexfilelayout/flexfilelayout.c return task->tk_status; task 1515 fs/nfs/flexfilelayout/flexfilelayout.c return task->tk_status; task 1529 fs/nfs/flexfilelayout/flexfilelayout.c if (task->tk_status >= 0) task 1535 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_commit_done_cb(struct rpc_task *task, task 1540 fs/nfs/flexfilelayout/flexfilelayout.c trace_nfs4_pnfs_commit_ds(data, task->tk_status); task 1541 fs/nfs/flexfilelayout/flexfilelayout.c if (task->tk_status < 0) task 1545 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status); task 1546 fs/nfs/flexfilelayout/flexfilelayout.c err = ff_layout_async_handle_error(task, NULL, data->ds_clp, task 1557 fs/nfs/flexfilelayout/flexfilelayout.c rpc_restart_call_prepare(task); task 1566 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_write_record_layoutstats_start(struct rpc_task *task, task 1574 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_start); task 1577 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_write_record_layoutstats_done(struct rpc_task *task, task 1582 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_ff_layout_stat_io_end_write(task, task 1589 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_write_prepare_common(struct rpc_task *task, task 1593 fs/nfs/flexfilelayout/flexfilelayout.c rpc_exit(task, -EIO); task 1597 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_write_record_layoutstats_start(task, hdr); task 1601 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data) task 1605 fs/nfs/flexfilelayout/flexfilelayout.c if (ff_layout_write_prepare_common(task, hdr)) task 1608 fs/nfs/flexfilelayout/flexfilelayout.c rpc_call_start(task); task 1611 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) task 1618 fs/nfs/flexfilelayout/flexfilelayout.c task)) task 1621 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_write_prepare_common(task, hdr); task 1624 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_write_call_done(struct rpc_task *task, void *data) task 1629 fs/nfs/flexfilelayout/flexfilelayout.c task->tk_status == 0) { task 1630 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_sequence_done(task, &hdr->res.seq_res); task 1635 fs/nfs/flexfilelayout/flexfilelayout.c hdr->mds_ops->rpc_call_done(task, hdr); task 1638 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_write_count_stats(struct rpc_task *task, void *data) task 1642 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_write_record_layoutstats_done(task, hdr); task 1643 fs/nfs/flexfilelayout/flexfilelayout.c rpc_count_iostats_metrics(task, task 1651 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_write_record_layoutstats_done(&hdr->task, hdr); task 1660 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task, task 1667 fs/nfs/flexfilelayout/flexfilelayout.c 0, task->tk_start); task 1670 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task, task 1679 fs/nfs/flexfilelayout/flexfilelayout.c if (task->tk_status == 0) { task 1683 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_ff_layout_stat_io_end_write(task, task 1689 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_commit_prepare_common(struct rpc_task *task, task 1692 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_commit_record_layoutstats_start(task, cdata); task 1695 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) task 1697 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_commit_prepare_common(task, data); task 1698 fs/nfs/flexfilelayout/flexfilelayout.c rpc_call_start(task); task 1701 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) task 1708 fs/nfs/flexfilelayout/flexfilelayout.c task)) task 1710 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_commit_prepare_common(task, data); task 1713 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_commit_done(struct rpc_task *task, void *data) task 1715 fs/nfs/flexfilelayout/flexfilelayout.c pnfs_generic_write_commit_done(task, data); task 1718 fs/nfs/flexfilelayout/flexfilelayout.c static void ff_layout_commit_count_stats(struct rpc_task *task, void *data) task 1722 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_commit_record_layoutstats_done(task, cdata); task 1723 fs/nfs/flexfilelayout/flexfilelayout.c rpc_count_iostats_metrics(task, task 1731 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_commit_record_layoutstats_done(&cdata->task, cdata); task 453 fs/nfs/internal.h extern void nfs_read_prepare(struct rpc_task *task, void *calldata); task 471 fs/nfs/internal.h extern void nfs_write_prepare(struct rpc_task *task, void *calldata); task 472 fs/nfs/internal.h extern void nfs_commit_prepare(struct rpc_task *task, void *calldata); task 48 fs/nfs/nfs3proc.c nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode) task 50 fs/nfs/nfs3proc.c if (task->tk_status != -EJUKEBOX) task 52 fs/nfs/nfs3proc.c if (task->tk_status == -EJUKEBOX) task 54 fs/nfs/nfs3proc.c task->tk_status = 0; task 55 fs/nfs/nfs3proc.c rpc_restart_call(task); task 56 fs/nfs/nfs3proc.c rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); task 434 fs/nfs/nfs3proc.c static void nfs3_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) task 436 fs/nfs/nfs3proc.c rpc_call_start(task); task 440 fs/nfs/nfs3proc.c nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir) task 443 fs/nfs/nfs3proc.c if (nfs3_async_handle_jukebox(task, dir)) task 445 fs/nfs/nfs3proc.c res = task->tk_msg.rpc_resp; task 458 fs/nfs/nfs3proc.c static void nfs3_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) task 460 fs/nfs/nfs3proc.c rpc_call_start(task); task 464 fs/nfs/nfs3proc.c nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir, task 469 fs/nfs/nfs3proc.c if (nfs3_async_handle_jukebox(task, old_dir)) task 471 fs/nfs/nfs3proc.c res = task->tk_msg.rpc_resp; task 813 fs/nfs/nfs3proc.c static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) task 819 fs/nfs/nfs3proc.c return hdr->pgio_done_cb(task, hdr); task 821 fs/nfs/nfs3proc.c if (nfs3_async_handle_jukebox(task, inode)) task 824 fs/nfs/nfs3proc.c if (task->tk_status >= 0 && !server->read_hdrsize) task 839 fs/nfs/nfs3proc.c static int nfs3_proc_pgio_rpc_prepare(struct rpc_task *task, task 842 fs/nfs/nfs3proc.c rpc_call_start(task); task 846 fs/nfs/nfs3proc.c static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) task 851 fs/nfs/nfs3proc.c return hdr->pgio_done_cb(task, hdr); task 853 fs/nfs/nfs3proc.c if (nfs3_async_handle_jukebox(task, inode)) task 855 fs/nfs/nfs3proc.c if (task->tk_status >= 0) task 867 fs/nfs/nfs3proc.c static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) task 869 fs/nfs/nfs3proc.c rpc_call_start(task); task 872 fs/nfs/nfs3proc.c static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data) task 875 fs/nfs/nfs3proc.c return data->commit_done_cb(task, data); task 877 fs/nfs/nfs3proc.c if (nfs3_async_handle_jukebox(task, data->inode)) task 898 fs/nfs/nfs3proc.c static bool nfs3_nlm_unlock_prepare(struct rpc_task *task, void *data) task 902 fs/nfs/nfs3proc.c return nfs_async_iocounter_wait(task, l_ctx); task 385 fs/nfs/nfs42proc.c static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) task 391 fs/nfs/nfs42proc.c &data->res.osr_seq_res, task); task 394 fs/nfs/nfs42proc.c static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) task 398 fs/nfs/nfs42proc.c nfs41_sequence_done(task, &data->res.osr_seq_res); task 399 fs/nfs/nfs42proc.c if (task->tk_status && task 400 fs/nfs/nfs42proc.c nfs4_async_handle_error(task, data->seq_server, NULL, task 402 fs/nfs/nfs42proc.c rpc_restart_call_prepare(task); task 422 fs/nfs/nfs42proc.c struct rpc_task *task; task 452 fs/nfs/nfs42proc.c task = rpc_run_task(&task_setup_data); task 453 fs/nfs/nfs42proc.c if (IS_ERR(task)) task 454 fs/nfs/nfs42proc.c return PTR_ERR(task); task 455 fs/nfs/nfs42proc.c status = rpc_wait_for_completion_task(task); task 458 fs/nfs/nfs42proc.c rpc_put_task(task); task 535 fs/nfs/nfs42proc.c nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) task 546 fs/nfs/nfs42proc.c rpc_exit(task, 0); task 552 fs/nfs/nfs42proc.c &data->res.seq_res, task); task 556 fs/nfs/nfs42proc.c nfs42_layoutstat_done(struct rpc_task *task, void *calldata) task 562 fs/nfs/nfs42proc.c if (!nfs4_sequence_done(task, &data->res.seq_res)) task 565 fs/nfs/nfs42proc.c switch (task->tk_status) { task 604 fs/nfs/nfs42proc.c rpc_delay(task, HZ); task 605 fs/nfs/nfs42proc.c rpc_restart_call_prepare(task); task 657 fs/nfs/nfs42proc.c struct rpc_task *task; task 665 fs/nfs/nfs42proc.c task = rpc_run_task(&task_setup); task 666 fs/nfs/nfs42proc.c if (IS_ERR(task)) task 667 fs/nfs/nfs42proc.c return PTR_ERR(task); task 668 fs/nfs/nfs42proc.c rpc_put_task(task); task 701 fs/nfs/nfs42proc.c nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) task 712 fs/nfs/nfs42proc.c rpc_exit(task, 0); task 720 fs/nfs/nfs42proc.c &data->res.seq_res, task); task 724 fs/nfs/nfs42proc.c nfs42_layouterror_done(struct rpc_task *task, void *calldata) task 730 fs/nfs/nfs42proc.c if (!nfs4_sequence_done(task, &data->res.seq_res)) task 733 fs/nfs/nfs42proc.c switch (task->tk_status) { task 770 fs/nfs/nfs42proc.c rpc_delay(task, HZ); task 771 fs/nfs/nfs42proc.c rpc_restart_call_prepare(task); task 800 fs/nfs/nfs42proc.c struct rpc_task *task; task 828 fs/nfs/nfs42proc.c task = rpc_run_task(&task_setup); task 829 fs/nfs/nfs42proc.c if (IS_ERR(task)) task 830 fs/nfs/nfs42proc.c return PTR_ERR(task); task 831 fs/nfs/nfs42proc.c rpc_put_task(task); task 91 fs/nfs/nfs4_fs.h struct rpc_task *task; task 284 fs/nfs/nfs4_fs.h extern int nfs4_async_handle_error(struct rpc_task *task, task 496 fs/nfs/nfs4_fs.h extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task); task 504 fs/nfs/nfs4_fs.h struct rpc_task *task); task 505 fs/nfs/nfs4_fs.h extern int nfs4_sequence_done(struct rpc_task *task, task 597 fs/nfs/nfs4proc.c nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, task 605 fs/nfs/nfs4proc.c rpc_delay(task, nfs4_update_delay(&exception->timeout)); task 609 fs/nfs/nfs4proc.c rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); task 611 fs/nfs/nfs4proc.c rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); task 625 fs/nfs/nfs4proc.c rpc_task_release_transport(task); task 631 fs/nfs/nfs4proc.c nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, task 638 fs/nfs/nfs4proc.c if (task->tk_status >= 0) task 642 fs/nfs/nfs4proc.c task->tk_status = nfs4_async_handle_exception(task, server, task 643 fs/nfs/nfs4proc.c task->tk_status, task 709 fs/nfs/nfs4proc.c static int nfs40_sequence_done(struct rpc_task *task, task 777 fs/nfs/nfs4proc.c static int nfs41_sequence_process(struct rpc_task *task, task 789 fs/nfs/nfs4proc.c if (!RPC_WAS_SENT(task) || slot->seq_done) task 797 fs/nfs/nfs4proc.c if (task->tk_status == -NFS4ERR_DEADSESSION) task 894 fs/nfs/nfs4proc.c if (rpc_restart_call_prepare(task)) { task 896 fs/nfs/nfs4proc.c task->tk_status = 0; task 901 fs/nfs/nfs4proc.c if (!rpc_restart_call(task)) task 903 fs/nfs/nfs4proc.c rpc_delay(task, NFS4_POLL_RETRY_MAX); task 907 fs/nfs/nfs4proc.c int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) task 909 fs/nfs/nfs4proc.c if (!nfs41_sequence_process(task, res)) task 918 fs/nfs/nfs4proc.c static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) task 923 fs/nfs/nfs4proc.c return nfs41_sequence_process(task, res); task 924 fs/nfs/nfs4proc.c return nfs40_sequence_done(task, res); task 937 fs/nfs/nfs4proc.c int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) task 942 fs/nfs/nfs4proc.c return nfs40_sequence_done(task, res); task 943 fs/nfs/nfs4proc.c return nfs41_sequence_done(task, res); task 947 fs/nfs/nfs4proc.c static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) task 954 fs/nfs/nfs4proc.c data->seq_args, data->seq_res, task); task 957 fs/nfs/nfs4proc.c static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) task 961 fs/nfs/nfs4proc.c nfs41_sequence_done(task, data->seq_res); task 971 fs/nfs/nfs4proc.c static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) task 973 fs/nfs/nfs4proc.c return nfs40_sequence_done(task, res); task 982 fs/nfs/nfs4proc.c int nfs4_sequence_done(struct rpc_task *task, task 985 fs/nfs/nfs4proc.c return nfs40_sequence_done(task, res); task 1014 fs/nfs/nfs4proc.c struct rpc_task *task) task 1045 fs/nfs/nfs4proc.c rpc_call_start(task); task 1050 fs/nfs/nfs4proc.c rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, task 1053 fs/nfs/nfs4proc.c rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, task 1059 fs/nfs/nfs4proc.c rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, task 1062 fs/nfs/nfs4proc.c rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); task 1068 fs/nfs/nfs4proc.c static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) task 1072 fs/nfs/nfs4proc.c data->seq_args, data->seq_res, task); task 1075 fs/nfs/nfs4proc.c static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) task 1078 fs/nfs/nfs4proc.c nfs4_sequence_done(task, data->seq_res); task 1089 fs/nfs/nfs4proc.c struct rpc_task *task; task 1091 fs/nfs/nfs4proc.c task = rpc_run_task(task_setup); task 1092 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 1093 fs/nfs/nfs4proc.c return PTR_ERR(task); task 1095 fs/nfs/nfs4proc.c ret = task->tk_status; task 1096 fs/nfs/nfs4proc.c rpc_put_task(task); task 2271 fs/nfs/nfs4proc.c static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) task 2276 fs/nfs/nfs4proc.c &data->c_arg.seq_args, &data->c_res.seq_res, task); task 2279 fs/nfs/nfs4proc.c static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) task 2283 fs/nfs/nfs4proc.c nfs40_sequence_done(task, &data->c_res.seq_res); task 2285 fs/nfs/nfs4proc.c data->rpc_status = task->tk_status; task 2324 fs/nfs/nfs4proc.c struct rpc_task *task; task 2347 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 2348 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 2349 fs/nfs/nfs4proc.c return PTR_ERR(task); task 2350 fs/nfs/nfs4proc.c status = rpc_wait_for_completion_task(task); task 2356 fs/nfs/nfs4proc.c rpc_put_task(task); task 2360 fs/nfs/nfs4proc.c static void nfs4_open_prepare(struct rpc_task *task, void *calldata) task 2367 fs/nfs/nfs4proc.c if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) task 2396 fs/nfs/nfs4proc.c task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; task 2402 fs/nfs/nfs4proc.c task) != 0) task 2419 fs/nfs/nfs4proc.c task->tk_action = NULL; task 2421 fs/nfs/nfs4proc.c nfs4_sequence_done(task, &data->o_res.seq_res); task 2424 fs/nfs/nfs4proc.c static void nfs4_open_done(struct rpc_task *task, void *calldata) task 2428 fs/nfs/nfs4proc.c data->rpc_status = task->tk_status; task 2430 fs/nfs/nfs4proc.c if (!nfs4_sequence_process(task, &data->o_res.seq_res)) task 2433 fs/nfs/nfs4proc.c if (task->tk_status == 0) { task 2489 fs/nfs/nfs4proc.c struct rpc_task *task; task 2519 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 2520 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 2521 fs/nfs/nfs4proc.c return PTR_ERR(task); task 2522 fs/nfs/nfs4proc.c status = rpc_wait_for_completion_task(task); task 2528 fs/nfs/nfs4proc.c rpc_put_task(task); task 3333 fs/nfs/nfs4proc.c nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) task 3338 fs/nfs/nfs4proc.c return pnfs_wait_on_layoutreturn(inode, task); task 3442 fs/nfs/nfs4proc.c static void nfs4_close_done(struct rpc_task *task, void *data) task 3455 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &calldata->res.seq_res)) task 3457 fs/nfs/nfs4proc.c trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); task 3460 fs/nfs/nfs4proc.c if (pnfs_roc_done(task, calldata->inode, task 3469 fs/nfs/nfs4proc.c switch (task->tk_status) { task 3493 fs/nfs/nfs4proc.c task->tk_msg.rpc_cred); task 3500 fs/nfs/nfs4proc.c task->tk_status = nfs4_async_handle_exception(task, task 3501 fs/nfs/nfs4proc.c server, task->tk_status, &exception); task 3508 fs/nfs/nfs4proc.c task->tk_status = 0; task 3511 fs/nfs/nfs4proc.c dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); task 3514 fs/nfs/nfs4proc.c task->tk_status = 0; task 3515 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 3519 fs/nfs/nfs4proc.c static void nfs4_close_prepare(struct rpc_task *task, void *data) task 3529 fs/nfs/nfs4proc.c if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) task 3532 fs/nfs/nfs4proc.c task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; task 3563 fs/nfs/nfs4proc.c if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { task 3575 fs/nfs/nfs4proc.c task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; task 3597 fs/nfs/nfs4proc.c task) != 0) task 3602 fs/nfs/nfs4proc.c task->tk_action = NULL; task 3604 fs/nfs/nfs4proc.c nfs4_sequence_done(task, &calldata->res.seq_res); task 3630 fs/nfs/nfs4proc.c struct rpc_task *task; task 3679 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 3680 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 3681 fs/nfs/nfs4proc.c return PTR_ERR(task); task 3684 fs/nfs/nfs4proc.c status = rpc_wait_for_completion_task(task); task 3685 fs/nfs/nfs4proc.c rpc_put_task(task); task 4566 fs/nfs/nfs4proc.c static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) task 4571 fs/nfs/nfs4proc.c task); task 4574 fs/nfs/nfs4proc.c static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) task 4576 fs/nfs/nfs4proc.c struct nfs_unlinkdata *data = task->tk_calldata; task 4579 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &res->seq_res)) task 4581 fs/nfs/nfs4proc.c if (nfs4_async_handle_error(task, res->server, NULL, task 4584 fs/nfs/nfs4proc.c if (task->tk_status == 0) task 4608 fs/nfs/nfs4proc.c static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) task 4613 fs/nfs/nfs4proc.c task); task 4616 fs/nfs/nfs4proc.c static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, task 4619 fs/nfs/nfs4proc.c struct nfs_renamedata *data = task->tk_calldata; task 4622 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &res->seq_res)) task 4624 fs/nfs/nfs4proc.c if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) task 4627 fs/nfs/nfs4proc.c if (task->tk_status == 0) { task 5149 fs/nfs/nfs4proc.c static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) task 5153 fs/nfs/nfs4proc.c trace_nfs4_read(hdr, task->tk_status); task 5154 fs/nfs/nfs4proc.c if (task->tk_status < 0) { task 5160 fs/nfs/nfs4proc.c task->tk_status = nfs4_async_handle_exception(task, task 5161 fs/nfs/nfs4proc.c server, task->tk_status, &exception); task 5163 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 5168 fs/nfs/nfs4proc.c if (task->tk_status > 0) task 5173 fs/nfs/nfs4proc.c static bool nfs4_read_stateid_changed(struct rpc_task *task, task 5177 fs/nfs/nfs4proc.c if (!nfs4_error_stateid_expired(task->tk_status) || task 5183 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 5187 fs/nfs/nfs4proc.c static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) task 5192 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &hdr->res.seq_res)) task 5194 fs/nfs/nfs4proc.c if (nfs4_read_stateid_changed(task, &hdr->args)) task 5196 fs/nfs/nfs4proc.c if (task->tk_status > 0) task 5198 fs/nfs/nfs4proc.c return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : task 5199 fs/nfs/nfs4proc.c nfs4_read_done_cb(task, hdr); task 5212 fs/nfs/nfs4proc.c static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, task 5218 fs/nfs/nfs4proc.c task)) task 5229 fs/nfs/nfs4proc.c static int nfs4_write_done_cb(struct rpc_task *task, task 5234 fs/nfs/nfs4proc.c trace_nfs4_write(hdr, task->tk_status); task 5235 fs/nfs/nfs4proc.c if (task->tk_status < 0) { task 5241 fs/nfs/nfs4proc.c task->tk_status = nfs4_async_handle_exception(task, task 5242 fs/nfs/nfs4proc.c NFS_SERVER(inode), task->tk_status, task 5245 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 5249 fs/nfs/nfs4proc.c if (task->tk_status >= 0) { task 5256 fs/nfs/nfs4proc.c static bool nfs4_write_stateid_changed(struct rpc_task *task, task 5260 fs/nfs/nfs4proc.c if (!nfs4_error_stateid_expired(task->tk_status) || task 5266 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 5270 fs/nfs/nfs4proc.c static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) task 5272 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &hdr->res.seq_res)) task 5274 fs/nfs/nfs4proc.c if (nfs4_write_stateid_changed(task, &hdr->args)) task 5276 fs/nfs/nfs4proc.c return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : task 5277 fs/nfs/nfs4proc.c nfs4_write_done_cb(task, hdr); task 5314 fs/nfs/nfs4proc.c static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) task 5319 fs/nfs/nfs4proc.c task); task 5322 fs/nfs/nfs4proc.c static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) task 5326 fs/nfs/nfs4proc.c trace_nfs4_commit(data, task->tk_status); task 5327 fs/nfs/nfs4proc.c if (nfs4_async_handle_error(task, NFS_SERVER(inode), task 5329 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 5335 fs/nfs/nfs4proc.c static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) task 5337 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &data->res.seq_res)) task 5339 fs/nfs/nfs4proc.c return data->commit_done_cb(task, data); task 5409 fs/nfs/nfs4proc.c static void nfs4_renew_done(struct rpc_task *task, void *calldata) task 5415 fs/nfs/nfs4proc.c trace_nfs4_renew_async(clp, task->tk_status); task 5416 fs/nfs/nfs4proc.c switch (task->tk_status) { task 5426 fs/nfs/nfs4proc.c if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { task 6058 fs/nfs/nfs4proc.c static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) task 6062 fs/nfs/nfs4proc.c if (task->tk_status == 0) task 6063 fs/nfs/nfs4proc.c sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); task 6192 fs/nfs/nfs4proc.c static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) task 6200 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &data->res.seq_res)) task 6203 fs/nfs/nfs4proc.c trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); task 6206 fs/nfs/nfs4proc.c if (pnfs_roc_done(task, data->inode, task 6212 fs/nfs/nfs4proc.c switch (task->tk_status) { task 6221 fs/nfs/nfs4proc.c task->tk_msg.rpc_cred); task 6225 fs/nfs/nfs4proc.c task->tk_status = 0; task 6230 fs/nfs/nfs4proc.c task->tk_status = 0; task 6240 fs/nfs/nfs4proc.c task->tk_status = nfs4_async_handle_exception(task, task 6241 fs/nfs/nfs4proc.c data->res.server, task->tk_status, task 6246 fs/nfs/nfs4proc.c data->rpc_status = task->tk_status; task 6249 fs/nfs/nfs4proc.c task->tk_status = 0; task 6250 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 6268 fs/nfs/nfs4proc.c static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) task 6275 fs/nfs/nfs4proc.c if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { task 6276 fs/nfs/nfs4proc.c nfs4_sequence_done(task, &d_data->res.seq_res); task 6289 fs/nfs/nfs4proc.c task); task 6302 fs/nfs/nfs4proc.c struct rpc_task *task; task 6351 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 6352 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 6353 fs/nfs/nfs4proc.c return PTR_ERR(task); task 6356 fs/nfs/nfs4proc.c status = rpc_wait_for_completion_task(task); task 6361 fs/nfs/nfs4proc.c rpc_put_task(task); task 6528 fs/nfs/nfs4proc.c static void nfs4_locku_done(struct rpc_task *task, void *data) task 6536 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &calldata->res.seq_res)) task 6538 fs/nfs/nfs4proc.c switch (task->tk_status) { task 6550 fs/nfs/nfs4proc.c task->tk_msg.rpc_cred); task 6556 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 6561 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 6564 fs/nfs/nfs4proc.c task->tk_status = nfs4_async_handle_exception(task, task 6565 fs/nfs/nfs4proc.c calldata->server, task->tk_status, task 6568 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 6573 fs/nfs/nfs4proc.c static void nfs4_locku_prepare(struct rpc_task *task, void *data) task 6578 fs/nfs/nfs4proc.c nfs_async_iocounter_wait(task, calldata->l_ctx)) task 6581 fs/nfs/nfs4proc.c if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) task 6591 fs/nfs/nfs4proc.c task) != 0) task 6595 fs/nfs/nfs4proc.c task->tk_action = NULL; task 6597 fs/nfs/nfs4proc.c nfs4_sequence_done(task, &calldata->res.seq_res); task 6654 fs/nfs/nfs4proc.c struct rpc_task *task; task 6684 fs/nfs/nfs4proc.c task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); task 6685 fs/nfs/nfs4proc.c status = PTR_ERR(task); task 6686 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 6688 fs/nfs/nfs4proc.c status = rpc_wait_for_completion_task(task); task 6689 fs/nfs/nfs4proc.c rpc_put_task(task); task 6747 fs/nfs/nfs4proc.c static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) task 6753 fs/nfs/nfs4proc.c if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) task 6757 fs/nfs/nfs4proc.c if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { task 6771 fs/nfs/nfs4proc.c task->tk_action = NULL; task 6778 fs/nfs/nfs4proc.c task) == 0) task 6785 fs/nfs/nfs4proc.c nfs4_sequence_done(task, &data->res.seq_res); task 6789 fs/nfs/nfs4proc.c static void nfs4_lock_done(struct rpc_task *task, void *calldata) task 6796 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &data->res.seq_res)) task 6799 fs/nfs/nfs4proc.c data->rpc_status = task->tk_status; task 6800 fs/nfs/nfs4proc.c switch (task->tk_status) { task 6833 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 6844 fs/nfs/nfs4proc.c struct rpc_task *task; task 6845 fs/nfs/nfs4proc.c task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, task 6847 fs/nfs/nfs4proc.c if (!IS_ERR(task)) task 6848 fs/nfs/nfs4proc.c rpc_put_task_async(task); task 6884 fs/nfs/nfs4proc.c struct rpc_task *task; task 6916 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 6917 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 6918 fs/nfs/nfs4proc.c return PTR_ERR(task); task 6919 fs/nfs/nfs4proc.c ret = rpc_wait_for_completion_task(task); task 6927 fs/nfs/nfs4proc.c rpc_put_task(task); task 7075 fs/nfs/nfs4proc.c struct task_struct *task; task 7102 fs/nfs/nfs4proc.c wait->private = waiter->task; task 7121 fs/nfs/nfs4proc.c struct nfs4_lock_waiter waiter = { .task = current, task 7233 fs/nfs/nfs4proc.c static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) task 7238 fs/nfs/nfs4proc.c &data->res.seq_res, task); task 7243 fs/nfs/nfs4proc.c static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) task 7248 fs/nfs/nfs4proc.c nfs40_sequence_done(task, &data->res.seq_res); task 7250 fs/nfs/nfs4proc.c switch (task->tk_status) { task 7260 fs/nfs/nfs4proc.c if (nfs4_async_handle_error(task, server, task 7262 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 7852 fs/nfs/nfs4proc.c nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) task 7854 fs/nfs/nfs4proc.c struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; task 7855 fs/nfs/nfs4proc.c struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; task 7858 fs/nfs/nfs4proc.c switch (task->tk_status) { task 7862 fs/nfs/nfs4proc.c task->tk_status); task 7866 fs/nfs/nfs4proc.c rpc_task_close_connection(task); task 7868 fs/nfs/nfs4proc.c rpc_restart_call(task); task 7909 fs/nfs/nfs4proc.c struct rpc_task *task; task 7919 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 7920 fs/nfs/nfs4proc.c if (!IS_ERR(task)) { task 7921 fs/nfs/nfs4proc.c status = task->tk_status; task 7922 fs/nfs/nfs4proc.c rpc_put_task(task); task 7924 fs/nfs/nfs4proc.c status = PTR_ERR(task); task 8227 fs/nfs/nfs4proc.c struct rpc_task *task; task 8233 fs/nfs/nfs4proc.c task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); task 8234 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 8235 fs/nfs/nfs4proc.c return PTR_ERR(task); task 8237 fs/nfs/nfs4proc.c argp = task->tk_msg.rpc_argp; task 8238 fs/nfs/nfs4proc.c resp = task->tk_msg.rpc_resp; task 8239 fs/nfs/nfs4proc.c status = task->tk_status; task 8278 fs/nfs/nfs4proc.c rpc_put_task(task); task 8330 fs/nfs/nfs4proc.c struct rpc_task *task; task 8341 fs/nfs/nfs4proc.c task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); task 8342 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 8345 fs/nfs/nfs4proc.c status = task->tk_status; task 8348 fs/nfs/nfs4proc.c task->tk_msg.rpc_resp, xprt); task 8353 fs/nfs/nfs4proc.c rpc_put_task(task); task 8427 fs/nfs/nfs4proc.c static void nfs4_get_lease_time_prepare(struct rpc_task *task, task 8439 fs/nfs/nfs4proc.c task); task 8447 fs/nfs/nfs4proc.c static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) task 8453 fs/nfs/nfs4proc.c if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) task 8455 fs/nfs/nfs4proc.c switch (task->tk_status) { task 8458 fs/nfs/nfs4proc.c dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); task 8459 fs/nfs/nfs4proc.c rpc_delay(task, NFS4_POLL_RETRY_MIN); task 8460 fs/nfs/nfs4proc.c task->tk_status = 0; task 8463 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 8754 fs/nfs/nfs4proc.c static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) task 8756 fs/nfs/nfs4proc.c switch(task->tk_status) { task 8758 fs/nfs/nfs4proc.c rpc_delay(task, NFS4_POLL_RETRY_MAX); task 8766 fs/nfs/nfs4proc.c static void nfs41_sequence_call_done(struct rpc_task *task, void *data) task 8771 fs/nfs/nfs4proc.c if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) task 8774 fs/nfs/nfs4proc.c trace_nfs4_sequence(clp, task->tk_status); task 8775 fs/nfs/nfs4proc.c if (task->tk_status < 0) { task 8776 fs/nfs/nfs4proc.c dprintk("%s ERROR %d\n", __func__, task->tk_status); task 8780 fs/nfs/nfs4proc.c if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { task 8781 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 8785 fs/nfs/nfs4proc.c dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); task 8790 fs/nfs/nfs4proc.c static void nfs41_sequence_prepare(struct rpc_task *task, void *data) task 8797 fs/nfs/nfs4proc.c args = task->tk_msg.rpc_argp; task 8798 fs/nfs/nfs4proc.c res = task->tk_msg.rpc_resp; task 8800 fs/nfs/nfs4proc.c nfs4_setup_sequence(clp, args, res, task); task 8855 fs/nfs/nfs4proc.c struct rpc_task *task; task 8860 fs/nfs/nfs4proc.c task = _nfs41_proc_sequence(clp, cred, NULL, false); task 8861 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 8862 fs/nfs/nfs4proc.c ret = PTR_ERR(task); task 8864 fs/nfs/nfs4proc.c rpc_put_task_async(task); task 8871 fs/nfs/nfs4proc.c struct rpc_task *task; task 8874 fs/nfs/nfs4proc.c task = _nfs41_proc_sequence(clp, cred, NULL, true); task 8875 fs/nfs/nfs4proc.c if (IS_ERR(task)) { task 8876 fs/nfs/nfs4proc.c ret = PTR_ERR(task); task 8879 fs/nfs/nfs4proc.c ret = rpc_wait_for_completion_task(task); task 8881 fs/nfs/nfs4proc.c ret = task->tk_status; task 8882 fs/nfs/nfs4proc.c rpc_put_task(task); task 8894 fs/nfs/nfs4proc.c static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) task 8901 fs/nfs/nfs4proc.c task); task 8904 fs/nfs/nfs4proc.c static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) task 8906 fs/nfs/nfs4proc.c switch(task->tk_status) { task 8914 fs/nfs/nfs4proc.c rpc_delay(task, NFS4_POLL_RETRY_MAX); task 8928 fs/nfs/nfs4proc.c static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) task 8935 fs/nfs/nfs4proc.c if (!nfs41_sequence_done(task, res)) task 8938 fs/nfs/nfs4proc.c trace_nfs4_reclaim_complete(clp, task->tk_status); task 8939 fs/nfs/nfs4proc.c if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { task 8940 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 8996 fs/nfs/nfs4proc.c nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) task 9003 fs/nfs/nfs4proc.c &lgp->res.seq_res, task); task 9007 fs/nfs/nfs4proc.c static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) task 9012 fs/nfs/nfs4proc.c nfs41_sequence_process(task, &lgp->res.seq_res); task 9017 fs/nfs/nfs4proc.c nfs4_layoutget_handle_exception(struct rpc_task *task, task 9023 fs/nfs/nfs4proc.c int nfs4err = task->tk_status; task 9027 fs/nfs/nfs4proc.c dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); task 9134 fs/nfs/nfs4proc.c struct rpc_task *task; task 9162 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 9163 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 9164 fs/nfs/nfs4proc.c return ERR_CAST(task); task 9165 fs/nfs/nfs4proc.c status = rpc_wait_for_completion_task(task); task 9169 fs/nfs/nfs4proc.c if (task->tk_status < 0) { task 9170 fs/nfs/nfs4proc.c status = nfs4_layoutget_handle_exception(task, lgp, &exception); task 9184 fs/nfs/nfs4proc.c rpc_put_task(task); task 9192 fs/nfs/nfs4proc.c nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) task 9200 fs/nfs/nfs4proc.c task); task 9202 fs/nfs/nfs4proc.c rpc_exit(task, 0); task 9205 fs/nfs/nfs4proc.c static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) task 9212 fs/nfs/nfs4proc.c if (!nfs41_sequence_process(task, &lrp->res.seq_res)) task 9219 fs/nfs/nfs4proc.c if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { task 9225 fs/nfs/nfs4proc.c switch (task->tk_status) { task 9233 fs/nfs/nfs4proc.c task->tk_status = 0; task 9238 fs/nfs/nfs4proc.c if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) task 9245 fs/nfs/nfs4proc.c task->tk_status = 0; task 9247 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 9275 fs/nfs/nfs4proc.c struct rpc_task *task; task 9304 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 9305 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 9306 fs/nfs/nfs4proc.c return PTR_ERR(task); task 9308 fs/nfs/nfs4proc.c status = task->tk_status; task 9311 fs/nfs/nfs4proc.c rpc_put_task(task); task 9364 fs/nfs/nfs4proc.c static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) task 9372 fs/nfs/nfs4proc.c task); task 9376 fs/nfs/nfs4proc.c nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) task 9381 fs/nfs/nfs4proc.c if (!nfs41_sequence_done(task, &data->res.seq_res)) task 9384 fs/nfs/nfs4proc.c switch (task->tk_status) { /* Just ignore these failures */ task 9389 fs/nfs/nfs4proc.c task->tk_status = 0; task 9393 fs/nfs/nfs4proc.c if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { task 9394 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 9428 fs/nfs/nfs4proc.c .task = &data->task, task 9434 fs/nfs/nfs4proc.c struct rpc_task *task; task 9451 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup_data); task 9452 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 9453 fs/nfs/nfs4proc.c return PTR_ERR(task); task 9455 fs/nfs/nfs4proc.c status = task->tk_status; task 9458 fs/nfs/nfs4proc.c rpc_put_task(task); task 9706 fs/nfs/nfs4proc.c static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) task 9712 fs/nfs/nfs4proc.c task); task 9715 fs/nfs/nfs4proc.c static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) task 9719 fs/nfs/nfs4proc.c nfs41_sequence_done(task, &data->res.seq_res); task 9721 fs/nfs/nfs4proc.c switch (task->tk_status) { task 9723 fs/nfs/nfs4proc.c if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) task 9724 fs/nfs/nfs4proc.c rpc_restart_call_prepare(task); task 9765 fs/nfs/nfs4proc.c struct rpc_task *task; task 9782 fs/nfs/nfs4proc.c task = rpc_run_task(&task_setup); task 9783 fs/nfs/nfs4proc.c if (IS_ERR(task)) task 9784 fs/nfs/nfs4proc.c return PTR_ERR(task); task 9785 fs/nfs/nfs4proc.c rpc_put_task(task); task 357 fs/nfs/nfs4session.c static bool nfs41_assign_slot(struct rpc_task *task, void *pslot) task 359 fs/nfs/nfs4session.c struct nfs4_sequence_args *args = task->tk_msg.rpc_argp; task 360 fs/nfs/nfs4session.c struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; task 1080 fs/nfs/nfs4state.c new->task = NULL; task 1098 fs/nfs/nfs4state.c rpc_wake_up_queued_task(&sequence->wait, next->task); task 1168 fs/nfs/nfs4state.c int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) task 1177 fs/nfs/nfs4state.c seqid->task = task; task 1182 fs/nfs/nfs4state.c rpc_sleep_on(&sequence->wait, task, NULL); task 1206 fs/nfs/nfs4state.c struct task_struct *task; task 1222 fs/nfs/nfs4state.c task = kthread_run(nfs4_run_state_manager, clp, "%s", buf); task 1223 fs/nfs/nfs4state.c if (IS_ERR(task)) { task 1225 fs/nfs/nfs4state.c __func__, PTR_ERR(task)); task 153 fs/nfs/nfs4super.c const struct task_struct *task; task 165 fs/nfs/nfs4super.c if (p->task == current) task 181 fs/nfs/nfs4super.c new->task = current; task 585 fs/nfs/nfs4trace.h const struct rpc_task *task = rqstp->rq_task; task 587 fs/nfs/nfs4trace.h __entry->task_id = task->tk_pid; task 588 fs/nfs/nfs4trace.h __entry->client_id = task->tk_client->cl_clid; task 1155 fs/nfs/nfstrace.h const struct rpc_task *task = rqstp->rq_task; task 1157 fs/nfs/nfstrace.h __entry->task_id = task->tk_pid; task 1158 fs/nfs/nfstrace.h __entry->client_id = task->tk_client->cl_clid; task 116 fs/nfs/pagelist.c nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) task 122 fs/nfs/pagelist.c rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL); task 127 fs/nfs/pagelist.c rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task); task 604 fs/nfs/pagelist.c static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) task 608 fs/nfs/pagelist.c err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr); task 610 fs/nfs/pagelist.c rpc_exit(task, err); task 617 fs/nfs/pagelist.c struct rpc_task *task; task 625 fs/nfs/pagelist.c .task = &hdr->task, task 643 fs/nfs/pagelist.c task = rpc_run_task(&task_setup_data); task 644 fs/nfs/pagelist.c if (IS_ERR(task)) { task 645 fs/nfs/pagelist.c ret = PTR_ERR(task); task 649 fs/nfs/pagelist.c ret = rpc_wait_for_completion_task(task); task 651 fs/nfs/pagelist.c ret = task->tk_status; task 653 fs/nfs/pagelist.c rpc_put_task(task); task 734 fs/nfs/pagelist.c static void nfs_pgio_result(struct rpc_task *task, void *calldata) task 740 fs/nfs/pagelist.c task->tk_pid, task->tk_status); task 742 fs/nfs/pagelist.c if (hdr->rw_ops->rw_done(task, hdr, inode) != 0) task 744 fs/nfs/pagelist.c if (task->tk_status < 0) task 745 fs/nfs/pagelist.c nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset); task 747 fs/nfs/pagelist.c hdr->rw_ops->rw_result(task, hdr); task 1453 fs/nfs/pnfs.c int pnfs_roc_done(struct rpc_task *task, struct inode *inode, task 1470 fs/nfs/pnfs.c if (task->tk_rpc_status == 0) task 1473 fs/nfs/pnfs.c if (!RPC_WAS_SENT(task)) task 1526 fs/nfs/pnfs.c bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) task 1537 fs/nfs/pnfs.c rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); task 2627 fs/nfs/pnfs.c hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr); task 2638 fs/nfs/pnfs.c hdr->mds_ops->rpc_call_done(&hdr->task, hdr); task 2753 fs/nfs/pnfs.c hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr); task 2762 fs/nfs/pnfs.c hdr->mds_ops->rpc_call_done(&hdr->task, hdr); task 2820 fs/nfs/pnfs.c hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr); task 285 fs/nfs/pnfs.h int pnfs_roc_done(struct rpc_task *task, struct inode *inode, task 292 fs/nfs/pnfs.h bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task); task 377 fs/nfs/pnfs.h void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); task 709 fs/nfs/pnfs.h pnfs_roc_done(struct rpc_task *task, struct inode *inode, task 725 fs/nfs/pnfs.h pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) task 36 fs/nfs/pnfs_nfs.c data->task.tk_status = 0; task 42 fs/nfs/pnfs_nfs.c void pnfs_generic_write_commit_done(struct rpc_task *task, void *data) task 47 fs/nfs/pnfs_nfs.c wdata->mds_ops->rpc_call_done(task, data); task 332 fs/nfs/proc.c static void nfs_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) task 334 fs/nfs/proc.c rpc_call_start(task); task 337 fs/nfs/proc.c static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) task 351 fs/nfs/proc.c static void nfs_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) task 353 fs/nfs/proc.c rpc_call_start(task); task 357 fs/nfs/proc.c nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir, task 587 fs/nfs/proc.c static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) task 592 fs/nfs/proc.c if (task->tk_status >= 0) { task 610 fs/nfs/proc.c static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task, task 613 fs/nfs/proc.c rpc_call_start(task); task 617 fs/nfs/proc.c static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) task 619 fs/nfs/proc.c if (task->tk_status >= 0) { task 635 fs/nfs/proc.c static void nfs_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) task 241 fs/nfs/read.c static int nfs_readpage_done(struct rpc_task *task, task 245 fs/nfs/read.c int status = NFS_PROTO(inode)->read_done(task, hdr); task 250 fs/nfs/read.c trace_nfs_readpage_done(inode, task->tk_status, task 253 fs/nfs/read.c if (task->tk_status == -ESTALE) { task 260 fs/nfs/read.c static void nfs_readpage_retry(struct rpc_task *task, task 275 fs/nfs/read.c if (!task->tk_ops) { task 285 fs/nfs/read.c rpc_restart_call_prepare(task); task 288 fs/nfs/read.c static void nfs_readpage_result(struct rpc_task *task, task 301 fs/nfs/read.c nfs_readpage_retry(task, hdr); task 46 fs/nfs/unlink.c static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) task 51 fs/nfs/unlink.c trace_nfs_sillyrename_unlink(data, task->tk_status); task 52 fs/nfs/unlink.c if (!NFS_PROTO(dir)->unlink_done(task, dir)) task 53 fs/nfs/unlink.c rpc_restart_call_prepare(task); task 76 fs/nfs/unlink.c static void nfs_unlink_prepare(struct rpc_task *task, void *calldata) task 80 fs/nfs/unlink.c NFS_PROTO(dir)->unlink_rpc_prepare(task, data); task 103 fs/nfs/unlink.c struct rpc_task *task; task 112 fs/nfs/unlink.c task = rpc_run_task(&task_setup_data); task 113 fs/nfs/unlink.c if (!IS_ERR(task)) task 114 fs/nfs/unlink.c rpc_put_task_async(task); task 258 fs/nfs/unlink.c static void nfs_async_rename_done(struct rpc_task *task, void *calldata) task 266 fs/nfs/unlink.c new_dir, data->new_dentry, task->tk_status); task 267 fs/nfs/unlink.c if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) { task 268 fs/nfs/unlink.c rpc_restart_call_prepare(task); task 273 fs/nfs/unlink.c data->complete(task, data); task 310 fs/nfs/unlink.c static void nfs_rename_prepare(struct rpc_task *task, void *calldata) task 313 fs/nfs/unlink.c NFS_PROTO(data->old_dir)->rename_rpc_prepare(task, data); task 391 fs/nfs/unlink.c nfs_complete_sillyrename(struct rpc_task *task, struct nfs_renamedata *data) task 395 fs/nfs/unlink.c if (task->tk_status != 0) { task 438 fs/nfs/unlink.c struct rpc_task *task; task 485 fs/nfs/unlink.c task = nfs_async_rename(dir, dir, dentry, sdentry, task 487 fs/nfs/unlink.c if (IS_ERR(task)) { task 494 fs/nfs/unlink.c error = rpc_wait_for_completion_task(task); task 496 fs/nfs/unlink.c error = task->tk_status; task 515 fs/nfs/unlink.c rpc_put_task(task); task 1494 fs/nfs/write.c void nfs_commit_prepare(struct rpc_task *task, void *calldata) task 1498 fs/nfs/write.c NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); task 1562 fs/nfs/write.c static int nfs_writeback_done(struct rpc_task *task, task 1575 fs/nfs/write.c status = NFS_PROTO(inode)->write_done(task, hdr); task 1580 fs/nfs/write.c trace_nfs_writeback_done(inode, task->tk_status, task 1584 fs/nfs/write.c task->tk_status >= 0) { task 1617 fs/nfs/write.c static void nfs_writeback_result(struct rpc_task *task, task 1638 fs/nfs/write.c task->tk_status = -EIO; task 1643 fs/nfs/write.c if (!task->tk_ops) { task 1661 fs/nfs/write.c rpc_restart_call_prepare(task); task 1694 fs/nfs/write.c struct rpc_task *task; task 1702 fs/nfs/write.c .task = &data->task, task 1717 fs/nfs/write.c task = rpc_run_task(&task_setup_data); task 1718 fs/nfs/write.c if (IS_ERR(task)) task 1719 fs/nfs/write.c return PTR_ERR(task); task 1721 fs/nfs/write.c rpc_wait_for_completion_task(task); task 1722 fs/nfs/write.c rpc_put_task(task); task 1827 fs/nfs/write.c static void nfs_commit_done(struct rpc_task *task, void *calldata) task 1832 fs/nfs/write.c task->tk_pid, task->tk_status); task 1835 fs/nfs/write.c NFS_PROTO(data->inode)->commit_done(task, data); task 1843 fs/nfs/write.c int status = data->task.tk_status; task 930 fs/nfsd/nfs4callback.c static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) task 934 fs/nfsd/nfs4callback.c if (task->tk_status) task 935 fs/nfsd/nfs4callback.c nfsd4_mark_cb_down(clp, task->tk_status); task 978 fs/nfsd/nfs4callback.c static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) task 981 fs/nfsd/nfs4callback.c rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); task 987 fs/nfsd/nfs4callback.c rpc_wake_up_queued_task(&clp->cl_cb_waitq, task); task 996 fs/nfsd/nfs4callback.c static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) task 1009 fs/nfsd/nfs4callback.c if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task)) task 1013 fs/nfsd/nfs4callback.c rpc_call_start(task); task 1016 fs/nfsd/nfs4callback.c static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb) task 1032 fs/nfsd/nfs4callback.c if (RPC_SIGNALLED(task)) task 1061 fs/nfsd/nfs4callback.c if (!rpc_restart_call(task)) task 1064 fs/nfsd/nfs4callback.c rpc_delay(task, 2 * HZ); task 1085 fs/nfsd/nfs4callback.c if (RPC_SIGNALLED(task)) task 1090 fs/nfsd/nfs4callback.c if (rpc_restart_call_prepare(task)) task 1094 fs/nfsd/nfs4callback.c task->tk_status = 0; task 1099 fs/nfsd/nfs4callback.c static void nfsd4_cb_done(struct rpc_task *task, void *calldata) task 1107 fs/nfsd/nfs4callback.c if (!nfsd4_cb_sequence_done(task, cb)) task 1111 fs/nfsd/nfs4callback.c WARN_ON_ONCE(task->tk_status); task 1112 fs/nfsd/nfs4callback.c task->tk_status = cb->cb_status; task 1115 fs/nfsd/nfs4callback.c switch (cb->cb_ops->done(cb, task)) { task 1117 fs/nfsd/nfs4callback.c task->tk_status = 0; task 1118 fs/nfsd/nfs4callback.c rpc_restart_call_prepare(task); task 1121 fs/nfsd/nfs4callback.c switch (task->tk_status) { task 1124 fs/nfsd/nfs4callback.c nfsd4_mark_cb_down(clp, task->tk_status); task 652 fs/nfsd/nfs4layouts.c nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) task 661 fs/nfsd/nfs4layouts.c switch (task->tk_status) { task 677 fs/nfsd/nfs4layouts.c cutoff = ktime_add_ns(task->tk_start, task 681 fs/nfsd/nfs4layouts.c rpc_delay(task, HZ/100); /* 10 mili-seconds */ task 699 fs/nfsd/nfs4layouts.c task->tk_status = 0; task 1147 fs/nfsd/nfs4proc.c struct rpc_task *task) task 326 fs/nfsd/nfs4state.c nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) task 333 fs/nfsd/nfs4state.c switch (task->tk_status) { task 335 fs/nfsd/nfs4state.c rpc_delay(task, 1 * HZ); task 4390 fs/nfsd/nfs4state.c struct rpc_task *task) task 4397 fs/nfsd/nfs4state.c switch (task->tk_status) { task 4401 fs/nfsd/nfs4state.c rpc_delay(task, 2 * HZ); task 4410 fs/nfsd/nfs4state.c rpc_delay(task, 2 * HZ); task 124 fs/nsfs.c struct task_struct *task; task 131 fs/nsfs.c return args->ns_ops->get(args->task); task 134 fs/nsfs.c void *ns_get_path(struct path *path, struct task_struct *task, task 139 fs/nsfs.c .task = task, task 215 fs/nsfs.c int ns_get_name(char *buf, size_t size, struct task_struct *task, task 221 fs/nsfs.c ns = ns_ops->get(task); task 135 fs/ocfs2/cluster/tcp.c u32 msgkey, struct task_struct *task, u8 node) task 138 fs/ocfs2/cluster/tcp.c nst->st_task = task; task 1542 fs/ocfs2/ocfs2_trace.h TP_PROTO(void *task, void *dc_task, unsigned long long ino, task 1544 fs/ocfs2/ocfs2_trace.h TP_ARGS(task, dc_task, ino, flags), task 1546 fs/ocfs2/ocfs2_trace.h __field(void *, task) task 1552 fs/ocfs2/ocfs2_trace.h __entry->task = task; task 1557 fs/ocfs2/ocfs2_trace.h TP_printk("%p %p %llu %u", __entry->task, __entry->dc_task, task 381 fs/proc/array.c static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) task 384 fs/proc/array.c cpumask_pr_args(task->cpus_ptr)); task 386 fs/proc/array.c cpumask_pr_args(task->cpus_ptr)); task 405 fs/proc/array.c struct pid *pid, struct task_struct *task) task 407 fs/proc/array.c struct mm_struct *mm = get_task_mm(task); task 410 fs/proc/array.c proc_task_name(m, task, true); task 413 fs/proc/array.c task_state(m, ns, pid, task); task 421 fs/proc/array.c task_sig(m, task); task 422 fs/proc/array.c task_cap(m, task); task 423 fs/proc/array.c task_seccomp(m, task); task 424 fs/proc/array.c task_cpus_allowed(m, task); task 425 fs/proc/array.c cpuset_task_status_allowed(m, task); task 426 fs/proc/array.c task_context_switch_counts(m, task); task 431 fs/proc/array.c struct pid *pid, struct task_struct *task, int whole) task 450 fs/proc/array.c state = *get_task_state(task); task 452 fs/proc/array.c permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT); task 453 fs/proc/array.c mm = get_task_mm(task); task 465 fs/proc/array.c if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) { task 466 fs/proc/array.c if (try_get_task_stack(task)) { task 467 fs/proc/array.c eip = KSTK_EIP(task); task 468 fs/proc/array.c esp = KSTK_ESP(task); task 469 fs/proc/array.c put_task_stack(task); task 479 fs/proc/array.c if (lock_task_sighand(task, &flags)) { task 480 fs/proc/array.c struct signal_struct *sig = task->signal; task 489 fs/proc/array.c num_threads = get_nr_threads(task); task 490 fs/proc/array.c collect_sigign_sigcatch(task, &sigign, &sigcatch); task 501 fs/proc/array.c struct task_struct *t = task; task 506 fs/proc/array.c } while_each_thread(task, t); task 510 fs/proc/array.c thread_group_cputime_adjusted(task, &utime, &stime); task 514 fs/proc/array.c sid = task_session_nr_ns(task, ns); task 515 fs/proc/array.c ppid = task_tgid_nr_ns(task->real_parent, ns); task 516 fs/proc/array.c pgid = task_pgrp_nr_ns(task, ns); task 518 fs/proc/array.c unlock_task_sighand(task, &flags); task 522 fs/proc/array.c wchan = get_wchan(task); task 524 fs/proc/array.c min_flt = task->min_flt; task 525 fs/proc/array.c maj_flt = task->maj_flt; task 526 fs/proc/array.c task_cputime_adjusted(task, &utime, &stime); task 527 fs/proc/array.c gtime = task_gtime(task); task 532 fs/proc/array.c priority = task_prio(task); task 533 fs/proc/array.c nice = task_nice(task); task 536 fs/proc/array.c start_time = nsec_to_clock_t(task->real_start_time); task 540 fs/proc/array.c proc_task_name(m, task, false); task 548 fs/proc/array.c seq_put_decimal_ull(m, " ", task->flags); task 574 fs/proc/array.c seq_put_decimal_ull(m, " ", task->pending.signal.sig[0] & 0x7fffffffUL); task 575 fs/proc/array.c seq_put_decimal_ull(m, " ", task->blocked.sig[0] & 0x7fffffffUL); task 593 fs/proc/array.c seq_put_decimal_ll(m, " ", task->exit_signal); task 594 fs/proc/array.c seq_put_decimal_ll(m, " ", task_cpu(task)); task 595 fs/proc/array.c seq_put_decimal_ull(m, " ", task->rt_priority); task 596 fs/proc/array.c seq_put_decimal_ull(m, " ", task->policy); task 597 fs/proc/array.c seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task)); task 613 fs/proc/array.c seq_put_decimal_ll(m, " ", task->exit_code); task 624 fs/proc/array.c struct pid *pid, struct task_struct *task) task 626 fs/proc/array.c return do_task_stat(m, ns, pid, task, 0); task 630 fs/proc/array.c struct pid *pid, struct task_struct *task) task 632 fs/proc/array.c return do_task_stat(m, ns, pid, task, 1); task 636 fs/proc/array.c struct pid *pid, struct task_struct *task) task 639 fs/proc/array.c struct mm_struct *mm = get_task_mm(task); task 667 fs/proc/array.c struct task_struct *start, *task; task 681 fs/proc/array.c task = pid_task(pid_prev, PIDTYPE_PID); task 682 fs/proc/array.c if (task && task->real_parent == start && task 683 fs/proc/array.c !(list_empty(&task->sibling))) { task 684 fs/proc/array.c if (list_is_last(&task->sibling, &start->children)) task 686 fs/proc/array.c task = list_first_entry(&task->sibling, task 688 fs/proc/array.c pid = get_pid(task_pid(task)); task 708 fs/proc/array.c list_for_each_entry(task, &start->children, sibling) { task 710 fs/proc/array.c pid = get_pid(task_pid(task)); task 170 fs/proc/base.c static int get_task_root(struct task_struct *task, struct path *root) task 174 fs/proc/base.c task_lock(task); task 175 fs/proc/base.c if (task->fs) { task 176 fs/proc/base.c get_fs_root(task->fs, root); task 179 fs/proc/base.c task_unlock(task); task 185 fs/proc/base.c struct task_struct *task = get_proc_task(d_inode(dentry)); task 188 fs/proc/base.c if (task) { task 189 fs/proc/base.c task_lock(task); task 190 fs/proc/base.c if (task->fs) { task 191 fs/proc/base.c get_fs_pwd(task->fs, path); task 194 fs/proc/base.c task_unlock(task); task 195 fs/proc/base.c put_task_struct(task); task 202 fs/proc/base.c struct task_struct *task = get_proc_task(d_inode(dentry)); task 205 fs/proc/base.c if (task) { task 206 fs/proc/base.c result = get_task_root(task, path); task 207 fs/proc/base.c put_task_struct(task); task 384 fs/proc/base.c struct pid *pid, struct task_struct *task) task 389 fs/proc/base.c if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) task 392 fs/proc/base.c wchan = get_wchan(task); task 404 fs/proc/base.c static int lock_trace(struct task_struct *task) task 406 fs/proc/base.c int err = mutex_lock_killable(&task->signal->cred_guard_mutex); task 409 fs/proc/base.c if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { task 410 fs/proc/base.c mutex_unlock(&task->signal->cred_guard_mutex); task 416 fs/proc/base.c static void unlock_trace(struct task_struct *task) task 418 fs/proc/base.c mutex_unlock(&task->signal->cred_guard_mutex); task 426 fs/proc/base.c struct pid *pid, struct task_struct *task) task 450 fs/proc/base.c err = lock_trace(task); task 454 fs/proc/base.c nr_entries = stack_trace_save_tsk(task, entries, task 461 fs/proc/base.c unlock_trace(task); task 474 fs/proc/base.c struct pid *pid, struct task_struct *task) task 480 fs/proc/base.c (unsigned long long)task->se.sum_exec_runtime, task 481 fs/proc/base.c (unsigned long long)task->sched_info.run_delay, task 482 fs/proc/base.c task->sched_info.pcount); task 493 fs/proc/base.c struct task_struct *task = get_proc_task(inode); task 495 fs/proc/base.c if (!task) task 499 fs/proc/base.c struct latency_record *lr = &task->latency_record[i]; task 515 fs/proc/base.c put_task_struct(task); task 527 fs/proc/base.c struct task_struct *task = get_proc_task(file_inode(file)); task 529 fs/proc/base.c if (!task) task 531 fs/proc/base.c clear_tsk_latency_tracing(task); task 532 fs/proc/base.c put_task_struct(task); task 548 fs/proc/base.c struct pid *pid, struct task_struct *task) task 553 fs/proc/base.c points = oom_badness(task, totalpages) * 1000 / totalpages; task 585 fs/proc/base.c struct pid *pid, struct task_struct *task) task 592 fs/proc/base.c if (!lock_task_sighand(task, &flags)) task 594 fs/proc/base.c memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS); task 595 fs/proc/base.c unlock_task_sighand(task, &flags); task 629 fs/proc/base.c struct pid *pid, struct task_struct *task) task 635 fs/proc/base.c res = lock_trace(task); task 639 fs/proc/base.c if (task_current_syscall(task, &info)) task 650 fs/proc/base.c unlock_trace(task); task 663 fs/proc/base.c struct task_struct *task; task 669 fs/proc/base.c task = get_proc_task(inode); task 670 fs/proc/base.c if (task) { task 671 fs/proc/base.c allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); task 672 fs/proc/base.c put_task_struct(task); task 699 fs/proc/base.c struct task_struct *task, task 706 fs/proc/base.c return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); task 713 fs/proc/base.c struct task_struct *task; task 716 fs/proc/base.c task = get_proc_task(inode); task 717 fs/proc/base.c if (!task) task 719 fs/proc/base.c has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS); task 720 fs/proc/base.c put_task_struct(task); task 749 fs/proc/base.c struct task_struct *task; task 752 fs/proc/base.c task = get_pid_task(pid, PIDTYPE_PID); task 753 fs/proc/base.c if (!task) task 756 fs/proc/base.c ret = PROC_I(inode)->op.proc_show(m, ns, pid, task); task 758 fs/proc/base.c put_task_struct(task); task 777 fs/proc/base.c struct task_struct *task = get_proc_task(inode); task 780 fs/proc/base.c if (task) { task 781 fs/proc/base.c mm = mm_access(task, mode | PTRACE_MODE_FSCREDS); task 782 fs/proc/base.c put_task_struct(task); task 1020 fs/proc/base.c struct task_struct *task = get_proc_task(file_inode(file)); task 1025 fs/proc/base.c if (!task) task 1027 fs/proc/base.c if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) task 1030 fs/proc/base.c oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / task 1032 fs/proc/base.c put_task_struct(task); task 1041 fs/proc/base.c struct task_struct *task; task 1044 fs/proc/base.c task = get_proc_task(file_inode(file)); task 1045 fs/proc/base.c if (!task) task 1050 fs/proc/base.c if (oom_adj < task->signal->oom_score_adj && task 1060 fs/proc/base.c current->comm, task_pid_nr(current), task_pid_nr(task), task 1061 fs/proc/base.c task_pid_nr(task)); task 1063 fs/proc/base.c if ((short)oom_adj < task->signal->oom_score_adj_min && task 1075 fs/proc/base.c if (!task->vfork_done) { task 1076 fs/proc/base.c struct task_struct *p = find_lock_task_mm(task); task 1087 fs/proc/base.c task->signal->oom_score_adj = oom_adj; task 1089 fs/proc/base.c task->signal->oom_score_adj_min = (short)oom_adj; task 1090 fs/proc/base.c trace_oom_score_adj_update(task); task 1097 fs/proc/base.c if (same_thread_group(task, p)) task 1117 fs/proc/base.c put_task_struct(task); task 1178 fs/proc/base.c struct task_struct *task = get_proc_task(file_inode(file)); task 1183 fs/proc/base.c if (!task) task 1185 fs/proc/base.c oom_score_adj = task->signal->oom_score_adj; task 1186 fs/proc/base.c put_task_struct(task); task 1232 fs/proc/base.c struct task_struct *task = get_proc_task(inode); task 1236 fs/proc/base.c if (!task) task 1240 fs/proc/base.c audit_get_loginuid(task))); task 1241 fs/proc/base.c put_task_struct(task); task 1294 fs/proc/base.c struct task_struct *task = get_proc_task(inode); task 1298 fs/proc/base.c if (!task) task 1301 fs/proc/base.c audit_get_sessionid(task)); task 1302 fs/proc/base.c put_task_struct(task); task 1316 fs/proc/base.c struct task_struct *task = get_proc_task(file_inode(file)); task 1321 fs/proc/base.c if (!task) task 1323 fs/proc/base.c make_it_fail = task->make_it_fail; task 1324 fs/proc/base.c put_task_struct(task); task 1334 fs/proc/base.c struct task_struct *task; task 1352 fs/proc/base.c task = get_proc_task(file_inode(file)); task 1353 fs/proc/base.c if (!task) task 1355 fs/proc/base.c task->make_it_fail = make_it_fail; task 1356 fs/proc/base.c put_task_struct(task); task 1370 fs/proc/base.c struct task_struct *task; task 1378 fs/proc/base.c task = get_proc_task(file_inode(file)); task 1379 fs/proc/base.c if (!task) task 1381 fs/proc/base.c task->fail_nth = n; task 1382 fs/proc/base.c put_task_struct(task); task 1390 fs/proc/base.c struct task_struct *task; task 1394 fs/proc/base.c task = get_proc_task(file_inode(file)); task 1395 fs/proc/base.c if (!task) task 1397 fs/proc/base.c len = snprintf(numbuf, sizeof(numbuf), "%u\n", task->fail_nth); task 1398 fs/proc/base.c put_task_struct(task); task 1594 fs/proc/base.c struct task_struct *task; task 1597 fs/proc/base.c task = get_proc_task(d_inode(dentry)); task 1598 fs/proc/base.c if (!task) task 1600 fs/proc/base.c exe_file = get_task_exe_file(task); task 1601 fs/proc/base.c put_task_struct(task); task 1688 fs/proc/base.c void task_dump_owner(struct task_struct *task, umode_t mode, task 1698 fs/proc/base.c if (unlikely(task->flags & PF_KTHREAD)) { task 1706 fs/proc/base.c cred = __task_cred(task); task 1721 fs/proc/base.c task_lock(task); task 1722 fs/proc/base.c mm = task->mm; task 1740 fs/proc/base.c task_unlock(task); task 1747 fs/proc/base.c struct task_struct *task, umode_t mode) task 1768 fs/proc/base.c ei->pid = get_task_pid(task, PIDTYPE_PID); task 1772 fs/proc/base.c task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); task 1773 fs/proc/base.c security_task_to_inode(task, inode); task 1788 fs/proc/base.c struct task_struct *task; task 1795 fs/proc/base.c task = pid_task(proc_pid(inode), PIDTYPE_PID); task 1796 fs/proc/base.c if (task) { task 1797 fs/proc/base.c if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) { task 1805 fs/proc/base.c task_dump_owner(task, inode->i_mode, &stat->uid, &stat->gid); task 1816 fs/proc/base.c void pid_update_inode(struct task_struct *task, struct inode *inode) task 1818 fs/proc/base.c task_dump_owner(task, inode->i_mode, &inode->i_uid, &inode->i_gid); task 1821 fs/proc/base.c security_task_to_inode(task, inode); task 1832 fs/proc/base.c struct task_struct *task; task 1838 fs/proc/base.c task = get_proc_task(inode); task 1840 fs/proc/base.c if (task) { task 1841 fs/proc/base.c pid_update_inode(task, inode); task 1842 fs/proc/base.c put_task_struct(task); task 1884 fs/proc/base.c instantiate_t instantiate, struct task_struct *task, const void *ptr) task 1900 fs/proc/base.c res = instantiate(child, task, ptr); task 1965 fs/proc/base.c struct task_struct *task; task 1973 fs/proc/base.c task = get_proc_task(inode); task 1974 fs/proc/base.c if (!task) task 1977 fs/proc/base.c mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); task 1993 fs/proc/base.c task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); task 1995 fs/proc/base.c security_task_to_inode(task, inode); task 2000 fs/proc/base.c put_task_struct(task); task 2015 fs/proc/base.c struct task_struct *task; task 2020 fs/proc/base.c task = get_proc_task(d_inode(dentry)); task 2021 fs/proc/base.c if (!task) task 2024 fs/proc/base.c mm = get_task_mm(task); task 2025 fs/proc/base.c put_task_struct(task); task 2085 fs/proc/base.c struct task_struct *task, const void *ptr) task 2091 fs/proc/base.c inode = proc_pid_make_inode(dentry->d_sb, task, S_IFLNK | task 2112 fs/proc/base.c struct task_struct *task; task 2117 fs/proc/base.c task = get_proc_task(dir); task 2118 fs/proc/base.c if (!task) task 2122 fs/proc/base.c if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) task 2129 fs/proc/base.c mm = get_task_mm(task); task 2143 fs/proc/base.c result = proc_map_files_instantiate(dentry, task, task 2151 fs/proc/base.c put_task_struct(task); task 2166 fs/proc/base.c struct task_struct *task; task 2176 fs/proc/base.c task = get_proc_task(file_inode(file)); task 2177 fs/proc/base.c if (!task) task 2181 fs/proc/base.c if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) task 2188 fs/proc/base.c mm = get_task_mm(task); task 2240 fs/proc/base.c task, task 2247 fs/proc/base.c put_task_struct(task); task 2262 fs/proc/base.c struct task_struct *task; task 2272 fs/proc/base.c tp->task = get_pid_task(tp->pid, PIDTYPE_PID); task 2273 fs/proc/base.c if (!tp->task) task 2276 fs/proc/base.c tp->sighand = lock_task_sighand(tp->task, &tp->flags); task 2280 fs/proc/base.c return seq_list_start(&tp->task->signal->posix_timers, *pos); task 2286 fs/proc/base.c return seq_list_next(v, &tp->task->signal->posix_timers, pos); task 2294 fs/proc/base.c unlock_task_sighand(tp->task, &tp->flags); task 2298 fs/proc/base.c if (tp->task) { task 2299 fs/proc/base.c put_task_struct(tp->task); task 2300 fs/proc/base.c tp->task = NULL; task 2453 fs/proc/base.c struct task_struct *task, const void *ptr) task 2459 fs/proc/base.c inode = proc_pid_make_inode(dentry->d_sb, task, p->mode); task 2471 fs/proc/base.c pid_update_inode(task, inode); task 2481 fs/proc/base.c struct task_struct *task = get_proc_task(dir); task 2484 fs/proc/base.c if (!task) task 2495 fs/proc/base.c res = proc_pident_instantiate(dentry, task, p); task 2499 fs/proc/base.c put_task_struct(task); task 2507 fs/proc/base.c struct task_struct *task = get_proc_task(file_inode(file)); task 2510 fs/proc/base.c if (!task) task 2521 fs/proc/base.c proc_pident_instantiate, task, p)) task 2526 fs/proc/base.c put_task_struct(task); task 2537 fs/proc/base.c struct task_struct *task = get_proc_task(inode); task 2539 fs/proc/base.c if (!task) task 2542 fs/proc/base.c length = security_getprocattr(task, PROC_I(inode)->op.lsm, task 2545 fs/proc/base.c put_task_struct(task); task 2556 fs/proc/base.c struct task_struct *task; task 2561 fs/proc/base.c task = pid_task(proc_pid(inode), PIDTYPE_PID); task 2562 fs/proc/base.c if (!task) { task 2567 fs/proc/base.c if (current != task) { task 2693 fs/proc/base.c struct task_struct *task = get_proc_task(file_inode(file)); task 2699 fs/proc/base.c if (!task) task 2703 fs/proc/base.c mm = get_task_mm(task); task 2712 fs/proc/base.c put_task_struct(task); task 2722 fs/proc/base.c struct task_struct *task; task 2734 fs/proc/base.c task = get_proc_task(file_inode(file)); task 2735 fs/proc/base.c if (!task) task 2738 fs/proc/base.c mm = get_task_mm(task); task 2752 fs/proc/base.c put_task_struct(task); task 2767 fs/proc/base.c static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole) task 2769 fs/proc/base.c struct task_io_accounting acct = task->ioac; task 2773 fs/proc/base.c result = mutex_lock_killable(&task->signal->cred_guard_mutex); task 2777 fs/proc/base.c if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { task 2782 fs/proc/base.c if (whole && lock_task_sighand(task, &flags)) { task 2783 fs/proc/base.c struct task_struct *t = task; task 2785 fs/proc/base.c task_io_accounting_add(&acct, &task->signal->ioac); task 2786 fs/proc/base.c while_each_thread(task, t) task 2789 fs/proc/base.c unlock_task_sighand(task, &flags); task 2809 fs/proc/base.c mutex_unlock(&task->signal->cred_guard_mutex); task 2814 fs/proc/base.c struct pid *pid, struct task_struct *task) task 2816 fs/proc/base.c return do_io_accounting(task, m, 0); task 2820 fs/proc/base.c struct pid *pid, struct task_struct *task) task 2822 fs/proc/base.c return do_io_accounting(task, m, 1); task 2831 fs/proc/base.c struct task_struct *task; task 2835 fs/proc/base.c task = get_proc_task(inode); task 2836 fs/proc/base.c if (task) { task 2838 fs/proc/base.c ns = get_user_ns(task_cred_xxx(task, user_ns)); task 2840 fs/proc/base.c put_task_struct(task); task 2909 fs/proc/base.c struct task_struct *task; task 2913 fs/proc/base.c task = get_proc_task(inode); task 2914 fs/proc/base.c if (task) { task 2916 fs/proc/base.c ns = get_user_ns(task_cred_xxx(task, user_ns)); task 2918 fs/proc/base.c put_task_struct(task); task 2959 fs/proc/base.c struct pid *pid, struct task_struct *task) task 2961 fs/proc/base.c int err = lock_trace(task); task 2963 fs/proc/base.c seq_printf(m, "%08x\n", task->personality); task 2964 fs/proc/base.c unlock_trace(task); task 2971 fs/proc/base.c struct pid *pid, struct task_struct *task) task 2973 fs/proc/base.c seq_printf(m, "%d\n", task->patch_state); task 2980 fs/proc/base.c struct pid *pid, struct task_struct *task) task 2983 fs/proc/base.c (task->prev_lowest_stack & (THREAD_SIZE - 1)); task 2985 fs/proc/base.c (task->lowest_stack & (THREAD_SIZE - 1)); task 3206 fs/proc/base.c void proc_flush_task(struct task_struct *task) task 3212 fs/proc/base.c pid = task_pid(task); task 3213 fs/proc/base.c tgid = task_tgid(task); task 3223 fs/proc/base.c struct task_struct *task, const void *ptr) task 3227 fs/proc/base.c inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); task 3236 fs/proc/base.c pid_update_inode(task, inode); task 3244 fs/proc/base.c struct task_struct *task; task 3255 fs/proc/base.c task = find_task_by_pid_ns(tgid, ns); task 3256 fs/proc/base.c if (task) task 3257 fs/proc/base.c get_task_struct(task); task 3259 fs/proc/base.c if (!task) task 3262 fs/proc/base.c result = proc_pid_instantiate(dentry, task, NULL); task 3263 fs/proc/base.c put_task_struct(task); task 3274 fs/proc/base.c struct task_struct *task; task 3280 fs/proc/base.c if (iter.task) task 3281 fs/proc/base.c put_task_struct(iter.task); task 3284 fs/proc/base.c iter.task = NULL; task 3288 fs/proc/base.c iter.task = pid_task(pid, PIDTYPE_PID); task 3301 fs/proc/base.c if (!iter.task || !has_group_leader_pid(iter.task)) { task 3305 fs/proc/base.c get_task_struct(iter.task); task 3336 fs/proc/base.c iter.task = NULL; task 3338 fs/proc/base.c iter.task; task 3344 fs/proc/base.c if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE)) task 3350 fs/proc/base.c proc_pid_instantiate, iter.task, NULL)) { task 3351 fs/proc/base.c put_task_struct(iter.task); task 3374 fs/proc/base.c struct task_struct *task; task 3376 fs/proc/base.c task = get_proc_task(inode); task 3377 fs/proc/base.c if (!task) task 3379 fs/proc/base.c is_same_tgroup = same_thread_group(current, task); task 3380 fs/proc/base.c put_task_struct(task); task 3518 fs/proc/base.c struct task_struct *task, const void *ptr) task 3521 fs/proc/base.c inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); task 3530 fs/proc/base.c pid_update_inode(task, inode); task 3538 fs/proc/base.c struct task_struct *task; task 3553 fs/proc/base.c task = find_task_by_pid_ns(tid, ns); task 3554 fs/proc/base.c if (task) task 3555 fs/proc/base.c get_task_struct(task); task 3557 fs/proc/base.c if (!task) task 3559 fs/proc/base.c if (!same_thread_group(leader, task)) task 3562 fs/proc/base.c result = proc_task_instantiate(dentry, task, NULL); task 3564 fs/proc/base.c put_task_struct(task); task 3586 fs/proc/base.c struct task_struct *pos, *task; task 3593 fs/proc/base.c task = pid_task(pid, PIDTYPE_PID); task 3594 fs/proc/base.c if (!task) task 3600 fs/proc/base.c if (pos && same_thread_group(pos, task)) task 3605 fs/proc/base.c if (nr >= get_nr_threads(task)) task 3611 fs/proc/base.c pos = task = task->group_leader; task 3615 fs/proc/base.c } while_each_thread(task, pos); task 3652 fs/proc/base.c struct task_struct *task; task 3668 fs/proc/base.c for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); task 3669 fs/proc/base.c task; task 3670 fs/proc/base.c task = next_tid(task), ctx->pos++) { task 3673 fs/proc/base.c tid = task_pid_nr_ns(task, ns); task 3676 fs/proc/base.c proc_task_instantiate, task, NULL)) { task 3680 fs/proc/base.c put_task_struct(task); task 25 fs/proc/fd.c struct task_struct *task; task 27 fs/proc/fd.c task = get_proc_task(m->private); task 28 fs/proc/fd.c if (!task) task 31 fs/proc/fd.c files = get_files_struct(task); task 32 fs/proc/fd.c put_task_struct(task); task 84 fs/proc/fd.c static bool tid_fd_mode(struct task_struct *task, unsigned fd, fmode_t *mode) task 86 fs/proc/fd.c struct files_struct *files = get_files_struct(task); task 101 fs/proc/fd.c static void tid_fd_update_inode(struct task_struct *task, struct inode *inode, task 104 fs/proc/fd.c task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); task 114 fs/proc/fd.c security_task_to_inode(task, inode); task 119 fs/proc/fd.c struct task_struct *task; task 127 fs/proc/fd.c task = get_proc_task(inode); task 130 fs/proc/fd.c if (task) { task 132 fs/proc/fd.c if (tid_fd_mode(task, fd, &f_mode)) { task 133 fs/proc/fd.c tid_fd_update_inode(task, inode, f_mode); task 134 fs/proc/fd.c put_task_struct(task); task 137 fs/proc/fd.c put_task_struct(task); task 150 fs/proc/fd.c struct task_struct *task; task 153 fs/proc/fd.c task = get_proc_task(d_inode(dentry)); task 154 fs/proc/fd.c if (task) { task 155 fs/proc/fd.c files = get_files_struct(task); task 156 fs/proc/fd.c put_task_struct(task); task 183 fs/proc/fd.c struct task_struct *task, const void *ptr) task 189 fs/proc/fd.c inode = proc_pid_make_inode(dentry->d_sb, task, S_IFLNK); task 200 fs/proc/fd.c tid_fd_update_inode(task, inode, data->mode); task 210 fs/proc/fd.c struct task_struct *task = get_proc_task(dir); task 214 fs/proc/fd.c if (!task) task 218 fs/proc/fd.c if (!tid_fd_mode(task, data.fd, &data.mode)) task 221 fs/proc/fd.c result = instantiate(dentry, task, &data); task 223 fs/proc/fd.c put_task_struct(task); task 322 fs/proc/fd.c struct task_struct *task, const void *ptr) task 328 fs/proc/fd.c inode = proc_pid_make_inode(dentry->d_sb, task, S_IFREG | S_IRUSR); task 336 fs/proc/fd.c tid_fd_update_inode(task, inode, 0); task 80 fs/proc/internal.h struct task_struct *task); task 124 fs/proc/internal.h void task_dump_owner(struct task_struct *task, umode_t mode, task 278 fs/proc/internal.h struct task_struct *task; task 43 fs/proc/namespaces.c struct task_struct *task; task 50 fs/proc/namespaces.c task = get_proc_task(inode); task 51 fs/proc/namespaces.c if (!task) task 54 fs/proc/namespaces.c if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { task 55 fs/proc/namespaces.c error = ns_get_path(&ns_path, task, ns_ops); task 59 fs/proc/namespaces.c put_task_struct(task); task 67 fs/proc/namespaces.c struct task_struct *task; task 71 fs/proc/namespaces.c task = get_proc_task(inode); task 72 fs/proc/namespaces.c if (!task) task 75 fs/proc/namespaces.c if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { task 76 fs/proc/namespaces.c res = ns_get_name(name, sizeof(name), task, ns_ops); task 80 fs/proc/namespaces.c put_task_struct(task); task 91 fs/proc/namespaces.c struct task_struct *task, const void *ptr) task 97 fs/proc/namespaces.c inode = proc_pid_make_inode(dentry->d_sb, task, S_IFLNK | S_IRWXUGO); task 104 fs/proc/namespaces.c pid_update_inode(task, inode); task 112 fs/proc/namespaces.c struct task_struct *task = get_proc_task(file_inode(file)); task 115 fs/proc/namespaces.c if (!task) task 127 fs/proc/namespaces.c proc_ns_instantiate, task, ops)) task 133 fs/proc/namespaces.c put_task_struct(task); task 146 fs/proc/namespaces.c struct task_struct *task = get_proc_task(dir); task 151 fs/proc/namespaces.c if (!task) task 164 fs/proc/namespaces.c res = proc_ns_instantiate(dentry, task, *entry); task 166 fs/proc/namespaces.c put_task_struct(task); task 256 fs/proc/proc_net.c struct task_struct *task; task 261 fs/proc/proc_net.c task = pid_task(proc_pid(dir), PIDTYPE_PID); task 262 fs/proc/proc_net.c if (task != NULL) { task 263 fs/proc/proc_net.c task_lock(task); task 264 fs/proc/proc_net.c ns = task->nsproxy; task 267 fs/proc/proc_net.c task_unlock(task); task 106 fs/proc/task_mmu.c struct task_struct *task = priv->task; task 108 fs/proc/task_mmu.c task_lock(task); task 109 fs/proc/task_mmu.c priv->task_mempolicy = get_task_policy(task); task 111 fs/proc/task_mmu.c task_unlock(task); task 161 fs/proc/task_mmu.c priv->task = get_proc_task(priv->inode); task 162 fs/proc/task_mmu.c if (!priv->task) task 220 fs/proc/task_mmu.c if (priv->task) { task 221 fs/proc/task_mmu.c put_task_struct(priv->task); task 222 fs/proc/task_mmu.c priv->task = NULL; task 864 fs/proc/task_mmu.c priv->task = get_proc_task(priv->inode); task 865 fs/proc/task_mmu.c if (!priv->task) task 900 fs/proc/task_mmu.c put_task_struct(priv->task); task 901 fs/proc/task_mmu.c priv->task = NULL; task 1137 fs/proc/task_mmu.c struct task_struct *task; task 1158 fs/proc/task_mmu.c task = get_proc_task(file_inode(file)); task 1159 fs/proc/task_mmu.c if (!task) task 1161 fs/proc/task_mmu.c mm = get_task_mm(task); task 1236 fs/proc/task_mmu.c put_task_struct(task); task 206 fs/proc/task_nommu.c priv->task = get_proc_task(priv->inode); task 207 fs/proc/task_nommu.c if (!priv->task) task 237 fs/proc/task_nommu.c if (priv->task) { task 238 fs/proc/task_nommu.c put_task_struct(priv->task); task 239 fs/proc/task_nommu.c priv->task = NULL; task 241 fs/proc_namespace.c struct task_struct *task = get_proc_task(inode); task 249 fs/proc_namespace.c if (!task) task 252 fs/proc_namespace.c task_lock(task); task 253 fs/proc_namespace.c nsp = task->nsproxy; task 255 fs/proc_namespace.c task_unlock(task); task 256 fs/proc_namespace.c put_task_struct(task); task 261 fs/proc_namespace.c if (!task->fs) { task 262 fs/proc_namespace.c task_unlock(task); task 263 fs/proc_namespace.c put_task_struct(task); task 267 fs/proc_namespace.c get_fs_root(task->fs, &root); task 268 fs/proc_namespace.c task_unlock(task); task 269 fs/proc_namespace.c put_task_struct(task); task 7 include/asm-generic/current.h #define get_current() (current_thread_info()->task) task 30 include/asm-generic/mmu_context.h static inline void deactivate_mm(struct task_struct *task, task 38 include/asm-generic/syscall.h int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); task 56 include/asm-generic/syscall.h void syscall_rollback(struct task_struct *task, struct pt_regs *regs); task 68 include/asm-generic/syscall.h long syscall_get_error(struct task_struct *task, struct pt_regs *regs); task 81 include/asm-generic/syscall.h long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); task 98 include/asm-generic/syscall.h void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, task 113 include/asm-generic/syscall.h void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, task 128 include/asm-generic/syscall.h void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, task 143 include/asm-generic/syscall.h int syscall_get_arch(struct task_struct *task); task 84 include/drm/drm_flip_work.h struct drm_flip_task *task); task 80 include/linux/amd-iommu.h struct task_struct *task); task 263 include/linux/audit.h extern int audit_alloc(struct task_struct *task); task 264 include/linux/audit.h extern void __audit_free(struct task_struct *task); task 282 include/linux/audit.h static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) task 284 include/linux/audit.h task->audit_context = ctx; task 297 include/linux/audit.h static inline void audit_free(struct task_struct *task) task 299 include/linux/audit.h if (unlikely(task->audit_context)) task 300 include/linux/audit.h __audit_free(task); task 519 include/linux/audit.h static inline int audit_alloc(struct task_struct *task) task 523 include/linux/audit.h static inline void audit_free(struct task_struct *task) task 535 include/linux/audit.h static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) task 1801 include/linux/blkdev.h static inline void blk_flush_plug(struct task_struct *task) task 1805 include/linux/blkdev.h static inline void blk_schedule_flush_plug(struct task_struct *task) task 644 include/linux/cgroup-defs.h int (*can_fork)(struct task_struct *task); task 645 include/linux/cgroup-defs.h void (*cancel_fork)(struct task_struct *task); task 646 include/linux/cgroup-defs.h void (*fork)(struct task_struct *task); task 647 include/linux/cgroup-defs.h void (*exit)(struct task_struct *task); task 648 include/linux/cgroup-defs.h void (*release)(struct task_struct *task); task 119 include/linux/cgroup.h int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); task 283 include/linux/cgroup.h #define cgroup_taskset_for_each(task, dst_css, tset) \ task 284 include/linux/cgroup.h for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ task 285 include/linux/cgroup.h (task); \ task 286 include/linux/cgroup.h (task) = cgroup_taskset_next((tset), &(dst_css))) task 445 include/linux/cgroup.h #define task_css_set_check(task, __c) \ task 446 include/linux/cgroup.h rcu_dereference_check((task)->cgroups, \ task 449 include/linux/cgroup.h ((task)->flags & PF_EXITING) || (__c)) task 451 include/linux/cgroup.h #define task_css_set_check(task, __c) \ task 452 include/linux/cgroup.h rcu_dereference((task)->cgroups) task 464 include/linux/cgroup.h #define task_css_check(task, subsys_id, __c) \ task 465 include/linux/cgroup.h task_css_set_check((task), (__c))->subsys[(subsys_id)] task 473 include/linux/cgroup.h static inline struct css_set *task_css_set(struct task_struct *task) task 475 include/linux/cgroup.h return task_css_set_check(task, false); task 485 include/linux/cgroup.h static inline struct cgroup_subsys_state *task_css(struct task_struct *task, task 488 include/linux/cgroup.h return task_css_check(task, subsys_id, false); task 501 include/linux/cgroup.h task_get_css(struct task_struct *task, int subsys_id) task 507 include/linux/cgroup.h css = task_css(task, subsys_id); task 530 include/linux/cgroup.h static inline bool task_css_is_root(struct task_struct *task, int subsys_id) task 532 include/linux/cgroup.h return task_css_check(task, subsys_id, true) == task 536 include/linux/cgroup.h static inline struct cgroup *task_cgroup(struct task_struct *task, task 539 include/linux/cgroup.h return task_css(task, subsys_id)->cgroup; task 542 include/linux/cgroup.h static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) task 544 include/linux/cgroup.h return task_css_set(task)->dfl_cgrp; task 603 include/linux/cgroup.h static inline bool task_under_cgroup_hierarchy(struct task_struct *task, task 606 include/linux/cgroup.h struct css_set *cset = task_css_set(task); task 738 include/linux/cgroup.h static inline bool task_under_cgroup_hierarchy(struct task_struct *task, task 774 include/linux/cgroup.h static inline void cgroup_account_cputime(struct task_struct *task, task 779 include/linux/cgroup.h cpuacct_charge(task, delta_exec); task 782 include/linux/cgroup.h cgrp = task_dfl_cgroup(task); task 788 include/linux/cgroup.h static inline void cgroup_account_cputime_field(struct task_struct *task, task 794 include/linux/cgroup.h cpuacct_account_field(task, index, delta_exec); task 797 include/linux/cgroup.h cgrp = task_dfl_cgroup(task); task 805 include/linux/cgroup.h static inline void cgroup_account_cputime(struct task_struct *task, task 807 include/linux/cgroup.h static inline void cgroup_account_cputime_field(struct task_struct *task, task 905 include/linux/cgroup.h void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, task 908 include/linux/cgroup.h static inline bool cgroup_task_freeze(struct task_struct *task) task 912 include/linux/cgroup.h if (task->flags & PF_KTHREAD) task 916 include/linux/cgroup.h ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); task 922 include/linux/cgroup.h static inline bool cgroup_task_frozen(struct task_struct *task) task 924 include/linux/cgroup.h return task->frozen; task 931 include/linux/cgroup.h static inline bool cgroup_task_freeze(struct task_struct *task) task 935 include/linux/cgroup.h static inline bool cgroup_task_frozen(struct task_struct *task) task 23 include/linux/cn_proc.h void proc_fork_connector(struct task_struct *task); task 24 include/linux/cn_proc.h void proc_exec_connector(struct task_struct *task); task 25 include/linux/cn_proc.h void proc_id_connector(struct task_struct *task, int which_id); task 26 include/linux/cn_proc.h void proc_sid_connector(struct task_struct *task); task 27 include/linux/cn_proc.h void proc_ptrace_connector(struct task_struct *task, int which_id); task 28 include/linux/cn_proc.h void proc_comm_connector(struct task_struct *task); task 29 include/linux/cn_proc.h void proc_coredump_connector(struct task_struct *task); task 30 include/linux/cn_proc.h void proc_exit_connector(struct task_struct *task); task 32 include/linux/cn_proc.h static inline void proc_fork_connector(struct task_struct *task) task 35 include/linux/cn_proc.h static inline void proc_exec_connector(struct task_struct *task) task 38 include/linux/cn_proc.h static inline void proc_id_connector(struct task_struct *task, task 42 include/linux/cn_proc.h static inline void proc_sid_connector(struct task_struct *task) task 45 include/linux/cn_proc.h static inline void proc_comm_connector(struct task_struct *task) task 48 include/linux/cn_proc.h static inline void proc_ptrace_connector(struct task_struct *task, task 52 include/linux/cn_proc.h static inline void proc_coredump_connector(struct task_struct *task) task 55 include/linux/cn_proc.h static inline void proc_exit_connector(struct task_struct *task) task 100 include/linux/cpuset.h struct task_struct *task); task 231 include/linux/cpuset.h struct task_struct *task) task 319 include/linux/cred.h #define __task_cred(task) \ task 320 include/linux/cred.h rcu_dereference((task)->real_cred) task 362 include/linux/cred.h #define task_cred_xxx(task, xxx) \ task 366 include/linux/cred.h ___val = __task_cred((task))->xxx; \ task 371 include/linux/cred.h #define task_uid(task) (task_cred_xxx((task), uid)) task 372 include/linux/cred.h #define task_euid(task) (task_cred_xxx((task), euid)) task 53 include/linux/debug_locks.h extern void debug_show_held_locks(struct task_struct *task); task 61 include/linux/debug_locks.h static inline void debug_show_held_locks(struct task_struct *task) task 74 include/linux/freezer.h extern bool cgroup_freezing(struct task_struct *task); task 76 include/linux/freezer.h static inline bool cgroup_freezing(struct task_struct *task) task 604 include/linux/fs.h uncached_acl_sentinel(struct task_struct *task) task 606 include/linux/fs.h return (void *)task + 1; task 238 include/linux/fsl/bestcomm/bestcomm_priv.h extern int bcom_load_image(int task, u32 *task_image); task 239 include/linux/fsl/bestcomm/bestcomm_priv.h extern void bcom_set_initiator(int task, int initiator); task 261 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_enable_task(int task) task 264 include/linux/fsl/bestcomm/bestcomm_priv.h reg = in_be16(&bcom_eng->regs->tcr[task]); task 265 include/linux/fsl/bestcomm/bestcomm_priv.h out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE); task 269 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_disable_task(int task) task 271 include/linux/fsl/bestcomm/bestcomm_priv.h u16 reg = in_be16(&bcom_eng->regs->tcr[task]); task 272 include/linux/fsl/bestcomm/bestcomm_priv.h out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE); task 277 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_task_desc(int task) task 279 include/linux/fsl/bestcomm/bestcomm_priv.h return bcom_sram_pa2va(bcom_eng->tdt[task].start); task 283 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_task_num_descs(int task) task 285 include/linux/fsl/bestcomm/bestcomm_priv.h return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1; task 289 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_task_var(int task) task 291 include/linux/fsl/bestcomm/bestcomm_priv.h return bcom_sram_pa2va(bcom_eng->tdt[task].var); task 295 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_task_inc(int task) task 297 include/linux/fsl/bestcomm/bestcomm_priv.h return &bcom_task_var(task)[BCOM_MAX_VAR]; task 328 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_set_task_pragma(int task, int pragma) task 330 include/linux/fsl/bestcomm/bestcomm_priv.h u32 *fdt = &bcom_eng->tdt[task].fdt; task 335 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_set_task_auto_start(int task, int next_task) task 337 include/linux/fsl/bestcomm/bestcomm_priv.h u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; task 342 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_set_tcr_initiator(int task, int initiator) task 344 include/linux/fsl/bestcomm/bestcomm_priv.h u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; task 785 include/linux/ftrace.h ftrace_graph_get_ret_stack(struct task_struct *task, int idx); task 787 include/linux/ftrace.h unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, task 836 include/linux/ftrace.h ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, task 297 include/linux/hp_sdc.h struct tasklet_struct task; task 137 include/linux/hrtimer.h struct task_struct *task; task 150 include/linux/iocontext.h void exit_io_context(struct task_struct *task); task 151 include/linux/iocontext.h struct io_context *get_task_io_context(struct task_struct *task, task 156 include/linux/iocontext.h static inline void exit_io_context(struct task_struct *task) { } task 54 include/linux/ioprio.h static inline int task_nice_ioprio(struct task_struct *task) task 56 include/linux/ioprio.h return (task_nice(task) + 20) / 5; task 63 include/linux/ioprio.h static inline int task_nice_ioclass(struct task_struct *task) task 65 include/linux/ioprio.h if (task->policy == SCHED_IDLE) task 67 include/linux/ioprio.h else if (task_is_realtime(task)) task 91 include/linux/ioprio.h extern int set_task_ioprio(struct task_struct *task, int ioprio); task 40 include/linux/kasan.h void kasan_unpoison_task_stack(struct task_struct *task); task 93 include/linux/kasan.h static inline void kasan_unpoison_task_stack(struct task_struct *task) {} task 162 include/linux/kprobes.h struct task_struct *task; task 91 include/linux/kthread.h struct task_struct *task; task 31 include/linux/latencytop.h void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); task 33 include/linux/latencytop.h account_scheduler_latency(struct task_struct *task, int usecs, int inter) task 36 include/linux/latencytop.h __account_scheduler_latency(task, usecs, inter); task 47 include/linux/latencytop.h account_scheduler_latency(struct task_struct *task, int usecs, int inter) task 192 include/linux/livepatch.h void klp_update_patch_state(struct task_struct *task); task 194 include/linux/livepatch.h static inline bool klp_patch_pending(struct task_struct *task) task 196 include/linux/livepatch.h return test_tsk_thread_flag(task, TIF_PATCH_PENDING); task 224 include/linux/livepatch.h static inline bool klp_patch_pending(struct task_struct *task) { return false; } task 225 include/linux/livepatch.h static inline void klp_update_patch_state(struct task_struct *task) {} task 286 include/linux/lockdep.h extern void lockdep_set_selftest_task(struct task_struct *task); task 288 include/linux/lockdep.h extern void lockdep_init_task(struct task_struct *task); task 414 include/linux/lockdep.h static inline void lockdep_init_task(struct task_struct *task) task 426 include/linux/lockdep.h static inline void lockdep_set_selftest_task(struct task_struct *task) task 510 include/linux/lockdep.h static inline void lockdep_free_task(struct task_struct *task) {} task 1619 include/linux/lsm_hooks.h int (*task_alloc)(struct task_struct *task, unsigned long clone_flags); task 1620 include/linux/lsm_hooks.h void (*task_free)(struct task_struct *task); task 308 include/linux/mempolicy.h static inline void mpol_put_task_policy(struct task_struct *task) task 1154 include/linux/mm.h #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) task 1222 include/linux/mm.h static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) task 1549 include/linux/mm.h struct task_struct *task, bool bypass_rlim); task 1622 include/linux/mm.h int get_cmdline(struct task_struct *task, char *buffer, int buflen); task 2322 include/linux/mm.h extern struct file *get_task_exe_file(struct task_struct *task); task 359 include/linux/mm_types.h struct task_struct *task; task 268 include/linux/mmc/host.h struct task_struct *task; task 74 include/linux/mutex.h struct task_struct *task; task 309 include/linux/nfs_xdr.h struct rpc_task task; task 1531 include/linux/nfs_xdr.h struct rpc_task task; task 1567 include/linux/nfs_xdr.h struct rpc_task task; task 1584 include/linux/nfs_xdr.h int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); task 1667 include/linux/nfs_xdr.h void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); task 1668 include/linux/nfs_xdr.h int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); task 62 include/linux/nospec.h int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); task 63 include/linux/nospec.h int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, task 66 include/linux/nospec.h void arch_seccomp_spec_mitigate(struct task_struct *task); task 113 include/linux/oprofile.h struct task_struct *task); task 773 include/linux/perf_event.h struct task_struct *task; task 868 include/linux/perf_event.h perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) task 870 include/linux/perf_event.h return container_of(task_css_check(task, perf_event_cgrp_id, task 895 include/linux/perf_event.h struct task_struct *task); task 900 include/linux/perf_event.h extern void perf_event_free_task(struct task_struct *task); task 901 include/linux/perf_event.h extern void perf_event_delayed_put(struct task_struct *task); task 921 include/linux/perf_event.h struct task_struct *task, task 1140 include/linux/perf_event.h static inline void perf_event_task_migrate(struct task_struct *task) task 1143 include/linux/perf_event.h task->sched_migrated = 1; task 1147 include/linux/perf_event.h struct task_struct *task) task 1150 include/linux/perf_event.h __perf_event_task_sched_in(prev, task); task 1152 include/linux/perf_event.h if (perf_sw_migrate_enabled() && task->sched_migrated) { task 1157 include/linux/perf_event.h task->sched_migrated = 0; task 1263 include/linux/perf_event.h struct task_struct *task); task 1352 include/linux/perf_event.h perf_event_task_migrate(struct task_struct *task) { } task 1355 include/linux/perf_event.h struct task_struct *task) { } task 1361 include/linux/perf_event.h static inline void perf_event_free_task(struct task_struct *task) { } task 1362 include/linux/perf_event.h static inline void perf_event_delayed_put(struct task_struct *task) { } task 21 include/linux/perf_regs.h u64 perf_reg_abi(struct task_struct *task); task 39 include/linux/perf_regs.h static inline u64 perf_reg_abi(struct task_struct *task) task 90 include/linux/pid.h extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); task 95 include/linux/pid.h extern void attach_pid(struct task_struct *task, enum pid_type); task 96 include/linux/pid.h extern void detach_pid(struct task_struct *task, enum pid_type); task 97 include/linux/pid.h extern void change_pid(struct task_struct *task, enum pid_type, task 178 include/linux/pid.h #define do_each_pid_task(pid, type, task) \ task 181 include/linux/pid.h hlist_for_each_entry_rcu((task), \ task 188 include/linux/pid.h #define while_each_pid_task(pid, type, task) \ task 194 include/linux/pid.h #define do_each_pid_thread(pid, type, task) \ task 195 include/linux/pid.h do_each_pid_task(pid, type, task) { \ task 196 include/linux/pid.h struct task_struct *tg___ = task; \ task 197 include/linux/pid.h for_each_thread(tg___, task) { task 199 include/linux/pid.h #define while_each_pid_thread(pid, type, task) \ task 201 include/linux/pid.h task = tg___; \ task 202 include/linux/pid.h } while_each_pid_task(pid, type, task) task 72 include/linux/posix-timers.h struct task_struct *task; task 225 include/linux/posix-timers.h void posix_cpu_timers_exit(struct task_struct *task); task 226 include/linux/posix-timers.h void posix_cpu_timers_exit_group(struct task_struct *task); task 227 include/linux/posix-timers.h void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, task 230 include/linux/posix-timers.h void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); task 84 include/linux/proc_fs.h struct pid *pid, struct task_struct *task); task 93 include/linux/proc_fs.h static inline void proc_flush_task(struct task_struct *task) task 20 include/linux/proc_ns.h struct ns_common *(*get)(struct task_struct *task); task 79 include/linux/proc_ns.h extern void *ns_get_path(struct path *path, struct task_struct *task, task 85 include/linux/proc_ns.h extern int ns_get_name(char *buf, size_t size, struct task_struct *task, task 71 include/linux/profile.h void profile_task_exit(struct task_struct * task); task 76 include/linux/profile.h int profile_handoff_task(struct task_struct * task); task 19 include/linux/psi.h void psi_task_change(struct task_struct *task, int clear, int set); task 21 include/linux/psi.h void psi_memstall_tick(struct task_struct *task, int cpu); task 95 include/linux/ptrace.h extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); task 124 include/linux/ptrace.h static inline struct task_struct *ptrace_parent(struct task_struct *task) task 126 include/linux/ptrace.h if (unlikely(task->ptrace)) task 127 include/linux/ptrace.h return rcu_dereference(task->parent); task 140 include/linux/ptrace.h static inline bool ptrace_event_enabled(struct task_struct *task, int event) task 142 include/linux/ptrace.h return task->ptrace & PT_EVENT_FLAG(event); task 235 include/linux/ptrace.h static inline void ptrace_release_task(struct task_struct *task) task 237 include/linux/ptrace.h BUG_ON(!list_empty(&task->ptraced)); task 238 include/linux/ptrace.h ptrace_unlink(task); task 239 include/linux/ptrace.h BUG_ON(!list_empty(&task->ptrace_entry)); task 295 include/linux/ptrace.h static inline void user_enable_single_step(struct task_struct *task) task 309 include/linux/ptrace.h static inline void user_disable_single_step(struct task_struct *task) task 338 include/linux/ptrace.h static inline void user_enable_block_step(struct task_struct *task) task 16 include/linux/rcuwait.h struct task_struct __rcu *task; task 20 include/linux/rcuwait.h { .task = NULL, } task 24 include/linux/rcuwait.h w->task = NULL; task 35 include/linux/rcuwait.h rcu_assign_pointer((w)->task, current); \ task 48 include/linux/rcuwait.h WRITE_ONCE((w)->task, NULL); \ task 51 include/linux/rtmutex.h extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); task 58 include/linux/rtmutex.h # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) task 110 include/linux/sched.h #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) task 112 include/linux/sched.h #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) task 114 include/linux/sched.h #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) task 116 include/linux/sched.h #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ task 117 include/linux/sched.h (task->flags & PF_FROZEN) == 0 && \ task 118 include/linux/sched.h (task->state & TASK_NOLOAD) == 0) task 1288 include/linux/sched.h static inline struct pid *task_pid(struct task_struct *task) task 1290 include/linux/sched.h return task->thread_pid; task 1304 include/linux/sched.h pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); task 1631 include/linux/sched.h struct task_struct task; task 1646 include/linux/sched.h static inline struct thread_info *task_thread_info(struct task_struct *task) task 1648 include/linux/sched.h return &task->thread_info; task 1651 include/linux/sched.h # define task_thread_info(task) ((struct thread_info *)(task)->stack) task 189 include/linux/sched/cputime.h task_sched_runtime(struct task_struct *task); task 33 include/linux/sched/debug.h extern void show_stack(struct task_struct *task, unsigned long *sp); task 35 include/linux/sched/jobctl.h extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); task 36 include/linux/sched/jobctl.h extern void task_clear_jobctl_trapping(struct task_struct *task); task 37 include/linux/sched/jobctl.h extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask); task 113 include/linux/sched/mm.h extern struct mm_struct *get_task_mm(struct task_struct *task); task 119 include/linux/sched/mm.h extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); task 47 include/linux/sched/rt.h static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) task 266 include/linux/sched/signal.h extern int dequeue_signal(struct task_struct *task, task 271 include/linux/sched/signal.h struct task_struct *task = current; task 275 include/linux/sched/signal.h spin_lock_irq(&task->sighand->siglock); task 276 include/linux/sched/signal.h ret = dequeue_signal(task, &task->blocked, &__info); task 277 include/linux/sched/signal.h spin_unlock_irq(&task->sighand->siglock); task 393 include/linux/sched/signal.h void task_join_group_stop(struct task_struct *task); task 417 include/linux/sched/signal.h static inline void clear_tsk_restore_sigmask(struct task_struct *task) task 419 include/linux/sched/signal.h clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); task 426 include/linux/sched/signal.h static inline bool test_tsk_restore_sigmask(struct task_struct *task) task 428 include/linux/sched/signal.h return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); task 446 include/linux/sched/signal.h static inline void clear_tsk_restore_sigmask(struct task_struct *task) task 448 include/linux/sched/signal.h task->restore_sigmask = false; task 458 include/linux/sched/signal.h static inline bool test_tsk_restore_sigmask(struct task_struct *task) task 460 include/linux/sched/signal.h return task->restore_sigmask; task 594 include/linux/sched/signal.h struct pid *task_pid_type(struct task_struct *task, enum pid_type type) task 598 include/linux/sched/signal.h pid = task_pid(task); task 600 include/linux/sched/signal.h pid = task->signal->pids[type]; task 604 include/linux/sched/signal.h static inline struct pid *task_tgid(struct task_struct *task) task 606 include/linux/sched/signal.h return task->signal->pids[PIDTYPE_TGID]; task 614 include/linux/sched/signal.h static inline struct pid *task_pgrp(struct task_struct *task) task 616 include/linux/sched/signal.h return task->signal->pids[PIDTYPE_PGID]; task 619 include/linux/sched/signal.h static inline struct pid *task_session(struct task_struct *task) task 621 include/linux/sched/signal.h return task->signal->pids[PIDTYPE_SID]; task 624 include/linux/sched/signal.h static inline int get_nr_threads(struct task_struct *task) task 626 include/linux/sched/signal.h return task->signal->nr_threads; task 665 include/linux/sched/signal.h extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, task 668 include/linux/sched/signal.h static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, task 673 include/linux/sched/signal.h ret = __lock_task_sighand(task, flags); task 674 include/linux/sched/signal.h (void)__cond_lock(&task->sighand->siglock, ret); task 678 include/linux/sched/signal.h static inline void unlock_task_sighand(struct task_struct *task, task 681 include/linux/sched/signal.h spin_unlock_irqrestore(&task->sighand->siglock, *flags); task 684 include/linux/sched/signal.h static inline unsigned long task_rlimit(const struct task_struct *task, task 687 include/linux/sched/signal.h return READ_ONCE(task->signal->rlim[limit].rlim_cur); task 690 include/linux/sched/signal.h static inline unsigned long task_rlimit_max(const struct task_struct *task, task 693 include/linux/sched/signal.h return READ_ONCE(task->signal->rlim[limit].rlim_max); task 122 include/linux/sched/task.h void put_task_struct_rcu_user(struct task_struct *task); task 19 include/linux/sched/task_stack.h static inline void *task_stack_page(const struct task_struct *task) task 21 include/linux/sched/task_stack.h return task->stack; task 26 include/linux/sched/task_stack.h static inline unsigned long *end_of_stack(const struct task_struct *task) task 28 include/linux/sched/task_stack.h return task->stack; task 33 include/linux/sched/task_stack.h #define task_stack_page(task) ((void *)(task)->stack) task 38 include/linux/sched/task_stack.h task_thread_info(p)->task = p; task 78 include/linux/sched/task_stack.h #define task_stack_end_corrupted(task) \ task 79 include/linux/sched/task_stack.h (*(end_of_stack(task)) != STACK_END_MAGIC) task 59 include/linux/sched/wake_q.h extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); task 60 include/linux/sched/wake_q.h extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); task 98 include/linux/seccomp.h extern long seccomp_get_filter(struct task_struct *task, task 100 include/linux/seccomp.h extern long seccomp_get_metadata(struct task_struct *task, task 103 include/linux/seccomp.h static inline long seccomp_get_filter(struct task_struct *task, task 108 include/linux/seccomp.h static inline long seccomp_get_metadata(struct task_struct *task, task 375 include/linux/security.h int security_task_alloc(struct task_struct *task, unsigned long clone_flags); task 376 include/linux/security.h void security_task_free(struct task_struct *task); task 959 include/linux/security.h static inline int security_task_alloc(struct task_struct *task, task 965 include/linux/security.h static inline void security_task_free(struct task_struct *task) task 20 include/linux/shm.h void exit_shm(struct task_struct *task); task 21 include/linux/shm.h #define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist) task 37 include/linux/shm.h static inline void exit_shm(struct task_struct *task) task 40 include/linux/shm.h static inline void shm_init_task(struct task_struct *task) task 18 include/linux/stacktrace.h unsigned int stack_trace_save_tsk(struct task_struct *task, task 57 include/linux/stacktrace.h struct task_struct *task, struct pt_regs *regs); task 59 include/linux/stacktrace.h struct task_struct *task); task 79 include/linux/string_helpers.h char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp); task 132 include/linux/sunrpc/auth.h int (*crmarshal)(struct rpc_task *task, task 135 include/linux/sunrpc/auth.h int (*crvalidate)(struct rpc_task *task, task 137 include/linux/sunrpc/auth.h int (*crwrap_req)(struct rpc_task *task, task 139 include/linux/sunrpc/auth.h int (*crunwrap_resp)(struct rpc_task *task, task 168 include/linux/sunrpc/auth.h int rpcauth_marshcred(struct rpc_task *task, task 170 include/linux/sunrpc/auth.h int rpcauth_checkverf(struct rpc_task *task, task 172 include/linux/sunrpc/auth.h int rpcauth_wrap_req_encode(struct rpc_task *task, task 174 include/linux/sunrpc/auth.h int rpcauth_wrap_req(struct rpc_task *task, task 176 include/linux/sunrpc/auth.h int rpcauth_unwrap_resp_decode(struct rpc_task *task, task 178 include/linux/sunrpc/auth.h int rpcauth_unwrap_resp(struct rpc_task *task, task 180 include/linux/sunrpc/auth.h bool rpcauth_xmit_need_reencode(struct rpc_task *task); task 37 include/linux/sunrpc/bc_xprt.h void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task); task 226 include/linux/sunrpc/clnt.h const char *rpc_proc_name(const struct rpc_task *task); task 234 include/linux/sunrpc/clnt.h static inline int rpc_reply_expected(struct rpc_task *task) task 236 include/linux/sunrpc/clnt.h return (task->tk_msg.rpc_proc != NULL) && task 237 include/linux/sunrpc/clnt.h (task->tk_msg.rpc_proc->p_decode != NULL); task 240 include/linux/sunrpc/clnt.h static inline void rpc_task_close_connection(struct rpc_task *task) task 242 include/linux/sunrpc/clnt.h if (task->tk_xprt) task 243 include/linux/sunrpc/clnt.h xprt_force_disconnect(task->tk_xprt); task 96 include/linux/sunrpc/metrics.h static inline void rpc_count_iostats(const struct rpc_task *task, task 98 include/linux/sunrpc/metrics.h static inline void rpc_count_iostats_metrics(const struct rpc_task *task, task 107 include/linux/sunrpc/sched.h struct rpc_task *task; task 231 include/linux/sunrpc/sched.h unsigned long rpc_task_timeout(const struct rpc_task *task); task 233 include/linux/sunrpc/sched.h struct rpc_task *task, task 239 include/linux/sunrpc/sched.h struct rpc_task *task, task 265 include/linux/sunrpc/sched.h int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); task 274 include/linux/sunrpc/sched.h void rpc_prepare_task(struct rpc_task *task); task 276 include/linux/sunrpc/sched.h static inline int rpc_wait_for_completion_task(struct rpc_task *task) task 278 include/linux/sunrpc/sched.h return __rpc_wait_for_completion_task(task, NULL); task 134 include/linux/sunrpc/xprt.h int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); task 135 include/linux/sunrpc/xprt.h void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); task 136 include/linux/sunrpc/xprt.h void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); task 139 include/linux/sunrpc/xprt.h void (*rpcbind)(struct rpc_task *task); task 141 include/linux/sunrpc/xprt.h void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); task 142 include/linux/sunrpc/xprt.h int (*buf_alloc)(struct rpc_task *task); task 143 include/linux/sunrpc/xprt.h void (*buf_free)(struct rpc_task *task); task 146 include/linux/sunrpc/xprt.h void (*wait_for_reply_request)(struct rpc_task *task); task 147 include/linux/sunrpc/xprt.h void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); task 148 include/linux/sunrpc/xprt.h void (*release_request)(struct rpc_task *task); task 339 include/linux/sunrpc/xprt.h void xprt_connect(struct rpc_task *task); task 343 include/linux/sunrpc/xprt.h void xprt_reserve(struct rpc_task *task); task 344 include/linux/sunrpc/xprt.h void xprt_retry_reserve(struct rpc_task *task); task 345 include/linux/sunrpc/xprt.h int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); task 346 include/linux/sunrpc/xprt.h int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); task 347 include/linux/sunrpc/xprt.h void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); task 351 include/linux/sunrpc/xprt.h bool xprt_prepare_transmit(struct rpc_task *task); task 352 include/linux/sunrpc/xprt.h void xprt_request_enqueue_transmit(struct rpc_task *task); task 353 include/linux/sunrpc/xprt.h void xprt_request_enqueue_receive(struct rpc_task *task); task 354 include/linux/sunrpc/xprt.h void xprt_request_wait_receive(struct rpc_task *task); task 355 include/linux/sunrpc/xprt.h void xprt_request_dequeue_xprt(struct rpc_task *task); task 356 include/linux/sunrpc/xprt.h bool xprt_request_need_retransmit(struct rpc_task *task); task 357 include/linux/sunrpc/xprt.h void xprt_transmit(struct rpc_task *task); task 358 include/linux/sunrpc/xprt.h void xprt_end_transmit(struct rpc_task *task); task 360 include/linux/sunrpc/xprt.h void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); task 361 include/linux/sunrpc/xprt.h void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); task 362 include/linux/sunrpc/xprt.h void xprt_release(struct rpc_task *task); task 388 include/linux/sunrpc/xprt.h void xprt_wait_for_reply_request_def(struct rpc_task *task); task 389 include/linux/sunrpc/xprt.h void xprt_wait_for_reply_request_rtt(struct rpc_task *task); task 393 include/linux/sunrpc/xprt.h void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); task 395 include/linux/sunrpc/xprt.h void xprt_update_rtt(struct rpc_task *task); task 396 include/linux/sunrpc/xprt.h void xprt_complete_rqst(struct rpc_task *task, int copied); task 399 include/linux/sunrpc/xprt.h void xprt_release_rqst_cong(struct rpc_task *task); task 62 include/linux/swait.h struct task_struct *task; task 67 include/linux/swait.h .task = current, \ task 16 include/linux/task_work.h int task_work_add(struct task_struct *task, struct callback_head *twork, bool); task 20 include/linux/task_work.h static inline void exit_task_work(struct task_struct *task) task 635 include/linux/trace_events.h struct task_struct *task); task 640 include/linux/trace_events.h struct task_struct *task) task 642 include/linux/trace_events.h perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); task 158 include/linux/tracehook.h static inline void set_notify_resume(struct task_struct *task) task 161 include/linux/tracehook.h if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) task 162 include/linux/tracehook.h kick_process(task); task 512 include/linux/wait.h if (!__t.task) { \ task 477 include/linux/workqueue.h extern void print_worker_info(const char *log_lvl, struct task_struct *task); task 479 include/linux/workqueue.h extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); task 32 include/linux/ww_mutex.h struct task_struct *task; task 132 include/linux/ww_mutex.h ctx->task = current; task 116 include/misc/cxl.h struct task_struct *task); task 109 include/misc/cxllib.h int cxllib_get_PE_attributes(struct task_struct *task, task 128 include/net/dn_nsp.h __u8 task; task 88 include/rdma/restrack.h struct task_struct *task; task 135 include/scsi/libiscsi.h static inline int iscsi_task_has_unsol_data(struct iscsi_task *task) task 137 include/scsi/libiscsi.h return task->unsol_r2t.data_length > task->unsol_r2t.sent; task 140 include/scsi/libiscsi.h static inline void* iscsi_next_hdr(struct iscsi_task *task) task 142 include/scsi/libiscsi.h return (void*)task->hdr + task->hdr_len; task 184 include/scsi/libiscsi.h struct iscsi_task *task; /* xmit task in progress */ task 441 include/scsi/libiscsi.h extern void iscsi_prep_data_out_pdu(struct iscsi_task *task, task 453 include/scsi/libiscsi.h extern void iscsi_requeue_task(struct iscsi_task *task); task 454 include/scsi/libiscsi.h extern void iscsi_put_task(struct iscsi_task *task); task 455 include/scsi/libiscsi.h extern void __iscsi_put_task(struct iscsi_task *task); task 456 include/scsi/libiscsi.h extern void __iscsi_get_task(struct iscsi_task *task); task 457 include/scsi/libiscsi.h extern void iscsi_complete_scsi_task(struct iscsi_task *task, task 89 include/scsi/libiscsi_tcp.h extern void iscsi_tcp_cleanup_task(struct iscsi_task *task); task 90 include/scsi/libiscsi_tcp.h extern int iscsi_tcp_task_init(struct iscsi_task *task); task 91 include/scsi/libiscsi_tcp.h extern int iscsi_tcp_task_xmit(struct iscsi_task *task); task 610 include/scsi/libsas.h struct sas_task *task; task 621 include/scsi/libsas.h extern void sas_free_task(struct sas_task *task); task 703 include/scsi/libsas.h extern void sas_ssp_task_response(struct device *dev, struct sas_task *task, task 26 include/scsi/sas_ata.h void sas_ata_task_abort(struct sas_task *task); task 47 include/scsi/sas_ata.h static inline void sas_ata_task_abort(struct sas_task *task) task 109 include/scsi/scsi_transport_iscsi.h int (*init_task) (struct iscsi_task *task); task 110 include/scsi/scsi_transport_iscsi.h int (*xmit_task) (struct iscsi_task *task); task 111 include/scsi/scsi_transport_iscsi.h void (*cleanup_task) (struct iscsi_task *task); task 113 include/scsi/scsi_transport_iscsi.h int (*alloc_pdu) (struct iscsi_task *task, uint8_t opcode); task 114 include/scsi/scsi_transport_iscsi.h int (*xmit_pdu) (struct iscsi_task *task); task 115 include/scsi/scsi_transport_iscsi.h int (*init_pdu) (struct iscsi_task *task, unsigned int offset, task 157 include/scsi/scsi_transport_iscsi.h u8 (*check_protection)(struct iscsi_task *task, sector_t *sector); task 123 include/trace/events/cgroup.h struct task_struct *task, bool threadgroup), task 125 include/trace/events/cgroup.h TP_ARGS(dst_cgrp, path, task, threadgroup), task 133 include/trace/events/cgroup.h __string( comm, task->comm ) task 141 include/trace/events/cgroup.h __entry->pid = task->pid; task 142 include/trace/events/cgroup.h __assign_str(comm, task->comm); task 153 include/trace/events/cgroup.h struct task_struct *task, bool threadgroup), task 155 include/trace/events/cgroup.h TP_ARGS(dst_cgrp, path, task, threadgroup) task 161 include/trace/events/cgroup.h struct task_struct *task, bool threadgroup), task 163 include/trace/events/cgroup.h TP_ARGS(dst_cgrp, path, task, threadgroup) task 12 include/trace/events/oom.h TP_PROTO(struct task_struct *task), task 14 include/trace/events/oom.h TP_ARGS(task), task 23 include/trace/events/oom.h __entry->pid = task->pid; task 24 include/trace/events/oom.h memcpy(__entry->comm, task->comm, TASK_COMM_LEN); task 25 include/trace/events/oom.h __entry->oom_score_adj = task->signal->oom_score_adj; task 73 include/trace/events/rpcgss.h const struct rpc_task *task, task 77 include/trace/events/rpcgss.h TP_ARGS(task, maj_stat), task 87 include/trace/events/rpcgss.h __entry->task_id = task->tk_pid; task 88 include/trace/events/rpcgss.h __entry->client_id = task->tk_client->cl_clid; task 101 include/trace/events/rpcgss.h const struct rpc_task *task, \ task 104 include/trace/events/rpcgss.h TP_ARGS(task, maj_stat)) task 136 include/trace/events/rpcgss.h const struct rpc_task *task task 139 include/trace/events/rpcgss.h TP_ARGS(task), task 147 include/trace/events/rpcgss.h __entry->task_id = task->tk_pid; task 148 include/trace/events/rpcgss.h __entry->client_id = task->tk_client->cl_clid; task 156 include/trace/events/rpcgss.h const struct rpc_task *task, task 161 include/trace/events/rpcgss.h TP_ARGS(task, expected, received), task 171 include/trace/events/rpcgss.h __entry->task_id = task->tk_pid; task 172 include/trace/events/rpcgss.h __entry->client_id = task->tk_client->cl_clid; task 184 include/trace/events/rpcgss.h const struct rpc_task *task task 187 include/trace/events/rpcgss.h TP_ARGS(task), task 197 include/trace/events/rpcgss.h const struct rpc_rqst *rqst = task->tk_rqstp; task 199 include/trace/events/rpcgss.h __entry->task_id = task->tk_pid; task 200 include/trace/events/rpcgss.h __entry->client_id = task->tk_client->cl_clid; task 212 include/trace/events/rpcgss.h const struct rpc_task *task, task 217 include/trace/events/rpcgss.h TP_ARGS(task, seq_xmit, ret), task 229 include/trace/events/rpcgss.h __entry->task_id = task->tk_pid; task 230 include/trace/events/rpcgss.h __entry->client_id = task->tk_client->cl_clid; task 231 include/trace/events/rpcgss.h __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); task 233 include/trace/events/rpcgss.h __entry->seqno = task->tk_rqstp->rq_seqno; task 90 include/trace/events/rpcrdma.h const struct rpc_task *task, task 96 include/trace/events/rpcrdma.h TP_ARGS(task, pos, mr, nsegs), task 110 include/trace/events/rpcrdma.h __entry->task_id = task->tk_pid; task 111 include/trace/events/rpcrdma.h __entry->client_id = task->tk_client->cl_clid; task 131 include/trace/events/rpcrdma.h const struct rpc_task *task, \ task 136 include/trace/events/rpcrdma.h TP_ARGS(task, pos, mr, nsegs)) task 140 include/trace/events/rpcrdma.h const struct rpc_task *task, task 145 include/trace/events/rpcrdma.h TP_ARGS(task, mr, nsegs), task 158 include/trace/events/rpcrdma.h __entry->task_id = task->tk_pid; task 159 include/trace/events/rpcrdma.h __entry->client_id = task->tk_client->cl_clid; task 178 include/trace/events/rpcrdma.h const struct rpc_task *task, \ task 182 include/trace/events/rpcrdma.h TP_ARGS(task, mr, nsegs)) task 982 include/trace/events/rpcrdma.h const struct rpc_task *task, task 988 include/trace/events/rpcrdma.h TP_ARGS(task, rep, req, credits), task 1000 include/trace/events/rpcrdma.h __entry->task_id = task->tk_pid; task 1001 include/trace/events/rpcrdma.h __entry->client_id = task->tk_client->cl_clid; task 1145 include/trace/events/rpcrdma.h const struct rpc_task *task, task 1149 include/trace/events/rpcrdma.h TP_ARGS(task, req), task 1160 include/trace/events/rpcrdma.h __entry->task_id = task->tk_pid; task 1161 include/trace/events/rpcrdma.h __entry->client_id = task->tk_client->cl_clid; task 1163 include/trace/events/rpcrdma.h __entry->callsize = task->tk_rqstp->rq_callsize; task 1164 include/trace/events/rpcrdma.h __entry->rcvsize = task->tk_rqstp->rq_rcvsize; task 1175 include/trace/events/rpcrdma.h const struct rpc_task *task, task 1179 include/trace/events/rpcrdma.h TP_ARGS(task, req), task 1189 include/trace/events/rpcrdma.h __entry->task_id = task->tk_pid; task 1190 include/trace/events/rpcrdma.h __entry->client_id = task->tk_client->cl_clid; task 52 include/trace/events/signal.h TP_PROTO(int sig, struct kernel_siginfo *info, struct task_struct *task, task 55 include/trace/events/signal.h TP_ARGS(sig, info, task, group, result), task 70 include/trace/events/signal.h memcpy(__entry->comm, task->comm, TASK_COMM_LEN); task 71 include/trace/events/signal.h __entry->pid = task->pid; task 19 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_task *task), task 21 include/trace/events/sunrpc.h TP_ARGS(task), task 30 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 31 include/trace/events/sunrpc.h __entry->client_id = task->tk_client->cl_clid; task 32 include/trace/events/sunrpc.h __entry->status = task->tk_status; task 42 include/trace/events/sunrpc.h const struct rpc_task *task \ task 44 include/trace/events/sunrpc.h TP_ARGS(task)) task 51 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_task *task), task 53 include/trace/events/sunrpc.h TP_ARGS(task), task 60 include/trace/events/sunrpc.h __string(progname, task->tk_client->cl_program->name) task 61 include/trace/events/sunrpc.h __string(procname, rpc_proc_name(task)) task 65 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 66 include/trace/events/sunrpc.h __entry->client_id = task->tk_client->cl_clid; task 67 include/trace/events/sunrpc.h __entry->version = task->tk_client->cl_vers; task 68 include/trace/events/sunrpc.h __entry->async = RPC_IS_ASYNC(task); task 69 include/trace/events/sunrpc.h __assign_str(progname, task->tk_client->cl_program->name) task 70 include/trace/events/sunrpc.h __assign_str(procname, rpc_proc_name(task)) task 126 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_task *task, const void *action), task 128 include/trace/events/sunrpc.h TP_ARGS(task, action), task 140 include/trace/events/sunrpc.h __entry->client_id = task->tk_client ? task 141 include/trace/events/sunrpc.h task->tk_client->cl_clid : -1; task 142 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 144 include/trace/events/sunrpc.h __entry->runstate = task->tk_runstate; task 145 include/trace/events/sunrpc.h __entry->status = task->tk_status; task 146 include/trace/events/sunrpc.h __entry->flags = task->tk_flags; task 160 include/trace/events/sunrpc.h const struct rpc_task *task, \ task 163 include/trace/events/sunrpc.h TP_ARGS(task, action)) task 171 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q), task 173 include/trace/events/sunrpc.h TP_ARGS(task, q), task 186 include/trace/events/sunrpc.h __entry->client_id = task->tk_client ? task 187 include/trace/events/sunrpc.h task->tk_client->cl_clid : -1; task 188 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 189 include/trace/events/sunrpc.h __entry->timeout = rpc_task_timeout(task); task 190 include/trace/events/sunrpc.h __entry->runstate = task->tk_runstate; task 191 include/trace/events/sunrpc.h __entry->status = task->tk_status; task 192 include/trace/events/sunrpc.h __entry->flags = task->tk_flags; task 208 include/trace/events/sunrpc.h const struct rpc_task *task, \ task 211 include/trace/events/sunrpc.h TP_ARGS(task, q)) task 218 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_task *task), task 220 include/trace/events/sunrpc.h TP_ARGS(task), task 228 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 229 include/trace/events/sunrpc.h __entry->client_id = task->tk_client->cl_clid; task 239 include/trace/events/sunrpc.h const struct rpc_task *task \ task 241 include/trace/events/sunrpc.h TP_ARGS(task)) task 249 include/trace/events/sunrpc.h const struct rpc_task *task task 252 include/trace/events/sunrpc.h TP_ARGS(task), task 258 include/trace/events/sunrpc.h __string(progname, task->tk_client->cl_program->name) task 260 include/trace/events/sunrpc.h __string(procname, rpc_proc_name(task)) task 261 include/trace/events/sunrpc.h __string(servername, task->tk_xprt->servername) task 265 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 266 include/trace/events/sunrpc.h __entry->client_id = task->tk_client->cl_clid; task 267 include/trace/events/sunrpc.h __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); task 268 include/trace/events/sunrpc.h __assign_str(progname, task->tk_client->cl_program->name) task 269 include/trace/events/sunrpc.h __entry->version = task->tk_client->cl_vers; task 270 include/trace/events/sunrpc.h __assign_str(procname, rpc_proc_name(task)) task 271 include/trace/events/sunrpc.h __assign_str(servername, task->tk_xprt->servername) task 283 include/trace/events/sunrpc.h const struct rpc_task *task \ task 285 include/trace/events/sunrpc.h TP_ARGS(task)) task 300 include/trace/events/sunrpc.h const struct rpc_task *task, task 306 include/trace/events/sunrpc.h TP_ARGS(task, backlog, rtt, execute), task 313 include/trace/events/sunrpc.h __string(progname, task->tk_client->cl_program->name) task 314 include/trace/events/sunrpc.h __string(procname, rpc_proc_name(task)) task 321 include/trace/events/sunrpc.h __entry->client_id = task->tk_client->cl_clid; task 322 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 323 include/trace/events/sunrpc.h __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); task 324 include/trace/events/sunrpc.h __entry->version = task->tk_client->cl_vers; task 325 include/trace/events/sunrpc.h __assign_str(progname, task->tk_client->cl_program->name) task 326 include/trace/events/sunrpc.h __assign_str(procname, rpc_proc_name(task)) task 367 include/trace/events/sunrpc.h const struct rpc_task *task = xdr->rqst->rq_task; task 369 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 370 include/trace/events/sunrpc.h __entry->client_id = task->tk_client->cl_clid; task 372 include/trace/events/sunrpc.h task->tk_client->cl_program->name) task 373 include/trace/events/sunrpc.h __entry->version = task->tk_client->cl_vers; task 374 include/trace/events/sunrpc.h __assign_str(procedure, task->tk_msg.rpc_proc->p_name) task 433 include/trace/events/sunrpc.h const struct rpc_task *task = xdr->rqst->rq_task; task 435 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 436 include/trace/events/sunrpc.h __entry->client_id = task->tk_client->cl_clid; task 438 include/trace/events/sunrpc.h task->tk_client->cl_program->name) task 439 include/trace/events/sunrpc.h __entry->version = task->tk_client->cl_vers; task 440 include/trace/events/sunrpc.h __assign_str(procedure, task->tk_msg.rpc_proc->p_name) task 730 include/trace/events/sunrpc.h const struct rpc_task *task, task 734 include/trace/events/sunrpc.h TP_ARGS(task, stage), task 745 include/trace/events/sunrpc.h __entry->task_id = task->tk_pid; task 746 include/trace/events/sunrpc.h __entry->client_id = task->tk_client ? task 747 include/trace/events/sunrpc.h task->tk_client->cl_clid : -1; task 748 include/trace/events/sunrpc.h __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); task 749 include/trace/events/sunrpc.h __entry->seqno = task->tk_rqstp->rq_seqno; task 3 include/trace/events/task.h #define TRACE_SYSTEM task task 11 include/trace/events/task.h TP_PROTO(struct task_struct *task, unsigned long clone_flags), task 13 include/trace/events/task.h TP_ARGS(task, clone_flags), task 23 include/trace/events/task.h __entry->pid = task->pid; task 24 include/trace/events/task.h memcpy(__entry->comm, task->comm, TASK_COMM_LEN); task 26 include/trace/events/task.h __entry->oom_score_adj = task->signal->oom_score_adj; task 36 include/trace/events/task.h TP_PROTO(struct task_struct *task, const char *comm), task 38 include/trace/events/task.h TP_ARGS(task, comm), task 48 include/trace/events/task.h __entry->pid = task->pid; task 49 include/trace/events/task.h memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN); task 51 include/trace/events/task.h __entry->oom_score_adj = task->signal->oom_score_adj; task 348 include/uapi/linux/perf_event.h task : 1, /* trace fork/exit */ task 457 include/video/imx-ipu-v3.h struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task); task 67 ipc/mqueue.c struct task_struct *task; task 627 ipc/mqueue.c if (walk->task->prio <= current->prio) { task 718 ipc/mqueue.c struct task_struct *task; task 742 ipc/mqueue.c task = pid_task(info->notify_owner, PIDTYPE_TGID); task 743 ipc/mqueue.c if (task && task->self_exec_id == task 746 ipc/mqueue.c &sig_i, task, PIDTYPE_TGID); task 948 ipc/mqueue.c wake_q_add(wake_q, receiver->task); task 976 ipc/mqueue.c wake_q_add(wake_q, sender->task); task 1062 ipc/mqueue.c wait.task = current; task 1166 ipc/mqueue.c wait.task = current; task 161 ipc/namespace.c static struct ns_common *ipcns_get(struct task_struct *task) task 166 ipc/namespace.c task_lock(task); task 167 ipc/namespace.c nsproxy = task->nsproxy; task 170 ipc/namespace.c task_unlock(task); task 383 ipc/shm.c void exit_shm(struct task_struct *task) task 385 ipc/shm.c struct ipc_namespace *ns = task->nsproxy->ipc_ns; task 388 ipc/shm.c if (list_empty(&task->sysvshm.shm_clist)) task 398 ipc/shm.c list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) task 404 ipc/shm.c list_del(&task->sysvshm.shm_clist); task 415 ipc/shm.c list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { task 425 ipc/shm.c list_del(&task->sysvshm.shm_clist); task 212 kernel/audit.c int auditd_test_task(struct task_struct *task) task 219 kernel/audit.c rc = (ac && ac->pid == task_tgid(task) ? 1 : 0); task 203 kernel/audit.h extern int auditd_test_task(struct task_struct *task); task 157 kernel/bpf/helpers.c struct task_struct *task = current; task 159 kernel/bpf/helpers.c if (unlikely(!task)) task 162 kernel/bpf/helpers.c return (u64) task->tgid << 32 | task->pid; task 173 kernel/bpf/helpers.c struct task_struct *task = current; task 177 kernel/bpf/helpers.c if (unlikely(!task)) task 193 kernel/bpf/helpers.c struct task_struct *task = current; task 195 kernel/bpf/helpers.c if (unlikely(!task)) task 198 kernel/bpf/helpers.c strncpy(buf, task->comm, size); task 2771 kernel/bpf/syscall.c struct task_struct *task; task 2784 kernel/bpf/syscall.c task = get_pid_task(find_vpid(pid), PIDTYPE_PID); task 2785 kernel/bpf/syscall.c if (!task) task 2788 kernel/bpf/syscall.c files = get_files_struct(task); task 2789 kernel/bpf/syscall.c put_task_struct(task); task 211 kernel/cgroup/cgroup-internal.h struct cgroup *task_cgroup_from_root(struct task_struct *task, task 236 kernel/cgroup/cgroup-internal.h void cgroup_procs_write_finish(struct task_struct *task) task 103 kernel/cgroup/cgroup-v1.c struct task_struct *task; task 135 kernel/cgroup/cgroup-v1.c task = css_task_iter_next(&it); task 136 kernel/cgroup/cgroup-v1.c } while (task && (task->flags & PF_EXITING)); task 138 kernel/cgroup/cgroup-v1.c if (task) task 139 kernel/cgroup/cgroup-v1.c get_task_struct(task); task 142 kernel/cgroup/cgroup-v1.c if (task) { task 143 kernel/cgroup/cgroup-v1.c ret = cgroup_migrate(task, false, &mgctx); task 145 kernel/cgroup/cgroup-v1.c TRACE_CGROUP_PATH(transfer_tasks, to, task, false); task 146 kernel/cgroup/cgroup-v1.c put_task_struct(task); task 148 kernel/cgroup/cgroup-v1.c } while (task && !ret); task 496 kernel/cgroup/cgroup-v1.c struct task_struct *task; task 504 kernel/cgroup/cgroup-v1.c task = cgroup_procs_write_start(buf, threadgroup); task 505 kernel/cgroup/cgroup-v1.c ret = PTR_ERR_OR_ZERO(task); task 514 kernel/cgroup/cgroup-v1.c tcred = get_task_cred(task); task 523 kernel/cgroup/cgroup-v1.c ret = cgroup_attach_task(cgrp, task, threadgroup); task 526 kernel/cgroup/cgroup-v1.c cgroup_procs_write_finish(task); task 219 kernel/cgroup/cgroup.c struct task_struct *task); task 855 kernel/cgroup/cgroup.c struct task_struct *task) task 860 kernel/cgroup/cgroup.c css_task_iter_skip(it, task); task 878 kernel/cgroup/cgroup.c static void css_set_move_task(struct task_struct *task, task 888 kernel/cgroup/cgroup.c WARN_ON_ONCE(list_empty(&task->cg_list)); task 890 kernel/cgroup/cgroup.c css_set_skip_task_iters(from_cset, task); task 891 kernel/cgroup/cgroup.c list_del_init(&task->cg_list); task 895 kernel/cgroup/cgroup.c WARN_ON_ONCE(!list_empty(&task->cg_list)); task 905 kernel/cgroup/cgroup.c WARN_ON_ONCE(task->flags & PF_EXITING); task 907 kernel/cgroup/cgroup.c cgroup_move_task(task, to_cset); task 908 kernel/cgroup/cgroup.c list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks : task 1429 kernel/cgroup/cgroup.c struct cgroup *task_cgroup_from_root(struct task_struct *task, task 1437 kernel/cgroup/cgroup.c return cset_cgroup_from_root(task_css_set(task), root); task 2328 kernel/cgroup/cgroup.c int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) task 2341 kernel/cgroup/cgroup.c cgrp = task_cgroup_from_root(task, root); task 2364 kernel/cgroup/cgroup.c static void cgroup_migrate_add_task(struct task_struct *task, task 2372 kernel/cgroup/cgroup.c if (task->flags & PF_EXITING) task 2376 kernel/cgroup/cgroup.c if (list_empty(&task->cg_list)) task 2379 kernel/cgroup/cgroup.c cset = task_css_set(task); task 2385 kernel/cgroup/cgroup.c list_move_tail(&task->cg_list, &cset->mg_tasks); task 2422 kernel/cgroup/cgroup.c struct task_struct *task = tset->cur_task; task 2425 kernel/cgroup/cgroup.c if (!task) task 2426 kernel/cgroup/cgroup.c task = list_first_entry(&cset->mg_tasks, task 2429 kernel/cgroup/cgroup.c task = list_next_entry(task, cg_list); task 2431 kernel/cgroup/cgroup.c if (&task->cg_list != &cset->mg_tasks) { task 2433 kernel/cgroup/cgroup.c tset->cur_task = task; task 2446 kernel/cgroup/cgroup.c return task; task 2450 kernel/cgroup/cgroup.c task = NULL; task 2469 kernel/cgroup/cgroup.c struct task_struct *task, *tmp_task; task 2494 kernel/cgroup/cgroup.c list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { task 2495 kernel/cgroup/cgroup.c struct css_set *from_cset = task_css_set(task); task 2500 kernel/cgroup/cgroup.c css_set_move_task(task, from_cset, to_cset, true); task 2506 kernel/cgroup/cgroup.c cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp, task 2763 kernel/cgroup/cgroup.c struct task_struct *task; task 2772 kernel/cgroup/cgroup.c task = leader; task 2774 kernel/cgroup/cgroup.c cgroup_migrate_add_task(task, mgctx); task 2777 kernel/cgroup/cgroup.c } while_each_thread(leader, task); task 2796 kernel/cgroup/cgroup.c struct task_struct *task; task 2806 kernel/cgroup/cgroup.c task = leader; task 2808 kernel/cgroup/cgroup.c cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx); task 2811 kernel/cgroup/cgroup.c } while_each_thread(leader, task); task 2874 kernel/cgroup/cgroup.c void cgroup_procs_write_finish(struct task_struct *task) task 2881 kernel/cgroup/cgroup.c put_task_struct(task); task 2961 kernel/cgroup/cgroup.c struct task_struct *task, *ntask; task 2964 kernel/cgroup/cgroup.c list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) task 2965 kernel/cgroup/cgroup.c cgroup_migrate_add_task(task, &mgctx); task 4504 kernel/cgroup/cgroup.c struct task_struct *task) task 4508 kernel/cgroup/cgroup.c if (it->task_pos == &task->cg_list) { task 4516 kernel/cgroup/cgroup.c struct task_struct *task; task 4549 kernel/cgroup/cgroup.c task = list_entry(it->task_pos, struct task_struct, cg_list); task 4553 kernel/cgroup/cgroup.c if (!thread_group_leader(task)) task 4558 kernel/cgroup/cgroup.c !atomic_read(&task->signal->live)) task 4770 kernel/cgroup/cgroup.c struct task_struct *task; task 4777 kernel/cgroup/cgroup.c task = cgroup_procs_write_start(buf, true); task 4778 kernel/cgroup/cgroup.c ret = PTR_ERR_OR_ZERO(task); task 4784 kernel/cgroup/cgroup.c src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); task 4792 kernel/cgroup/cgroup.c ret = cgroup_attach_task(dst_cgrp, task, true); task 4795 kernel/cgroup/cgroup.c cgroup_procs_write_finish(task); task 4811 kernel/cgroup/cgroup.c struct task_struct *task; task 4820 kernel/cgroup/cgroup.c task = cgroup_procs_write_start(buf, false); task 4821 kernel/cgroup/cgroup.c ret = PTR_ERR_OR_ZERO(task); task 4827 kernel/cgroup/cgroup.c src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); task 4841 kernel/cgroup/cgroup.c ret = cgroup_attach_task(dst_cgrp, task, false); task 4844 kernel/cgroup/cgroup.c cgroup_procs_write_finish(task); task 6137 kernel/cgroup/cgroup.c void cgroup_release(struct task_struct *task) task 6143 kernel/cgroup/cgroup.c ss->release(task); task 6148 kernel/cgroup/cgroup.c css_set_skip_task_iters(task_css_set(task), task); task 6149 kernel/cgroup/cgroup.c list_del_init(&task->cg_list); task 6154 kernel/cgroup/cgroup.c void cgroup_free(struct task_struct *task) task 6156 kernel/cgroup/cgroup.c struct css_set *cset = task_css_set(task); task 198 kernel/cgroup/cpuset.c static inline struct cpuset *task_cs(struct task_struct *task) task 200 kernel/cgroup/cpuset.c return css_cs(task_css(task, cpuset_cgrp_id)); task 913 kernel/cgroup/cpuset.c struct task_struct *task; task 917 kernel/cgroup/cpuset.c while ((task = css_task_iter_next(&it))) task 918 kernel/cgroup/cpuset.c dl_add_task_root_domain(task); task 1036 kernel/cgroup/cpuset.c struct task_struct *task; task 1039 kernel/cgroup/cpuset.c while ((task = css_task_iter_next(&it))) task 1040 kernel/cgroup/cpuset.c set_cpus_allowed_ptr(task, cs->effective_cpus); task 1649 kernel/cgroup/cpuset.c struct task_struct *task; task 1666 kernel/cgroup/cpuset.c while ((task = css_task_iter_next(&it))) { task 1670 kernel/cgroup/cpuset.c cpuset_change_task_nodemask(task, &newmems); task 1672 kernel/cgroup/cpuset.c mm = get_task_mm(task); task 1855 kernel/cgroup/cpuset.c struct task_struct *task; task 1858 kernel/cgroup/cpuset.c while ((task = css_task_iter_next(&it))) task 1859 kernel/cgroup/cpuset.c cpuset_update_task_spread_flag(cs, task); task 2109 kernel/cgroup/cpuset.c struct task_struct *task; task 2124 kernel/cgroup/cpuset.c cgroup_taskset_for_each(task, css, tset) { task 2125 kernel/cgroup/cpuset.c ret = task_can_attach(task, cs->cpus_allowed); task 2128 kernel/cgroup/cpuset.c ret = security_task_setscheduler(task); task 2166 kernel/cgroup/cpuset.c struct task_struct *task; task 2185 kernel/cgroup/cpuset.c cgroup_taskset_for_each(task, css, tset) { task 2190 kernel/cgroup/cpuset.c WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); task 2192 kernel/cgroup/cpuset.c cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); task 2193 kernel/cgroup/cpuset.c cpuset_update_task_spread_flag(cs, task); task 2860 kernel/cgroup/cpuset.c static void cpuset_fork(struct task_struct *task) task 2862 kernel/cgroup/cpuset.c if (task_css_is_root(task, cpuset_cgrp_id)) task 2865 kernel/cgroup/cpuset.c set_cpus_allowed_ptr(task, current->cpus_ptr); task 2866 kernel/cgroup/cpuset.c task->mems_allowed = current->mems_allowed; task 3615 kernel/cgroup/cpuset.c void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) task 3618 kernel/cgroup/cpuset.c nodemask_pr_args(&task->mems_allowed)); task 3620 kernel/cgroup/cpuset.c nodemask_pr_args(&task->mems_allowed)); task 124 kernel/cgroup/debug.c struct task_struct *task; task 164 kernel/cgroup/debug.c list_for_each_entry(task, &cset->tasks, cg_list) { task 167 kernel/cgroup/debug.c task_pid_vnr(task)); task 170 kernel/cgroup/debug.c list_for_each_entry(task, &cset->mg_tasks, cg_list) { task 173 kernel/cgroup/debug.c task_pid_vnr(task)); task 155 kernel/cgroup/freezer.c static void cgroup_freeze_task(struct task_struct *task, bool freeze) task 160 kernel/cgroup/freezer.c if (!lock_task_sighand(task, &flags)) task 164 kernel/cgroup/freezer.c task->jobctl |= JOBCTL_TRAP_FREEZE; task 165 kernel/cgroup/freezer.c signal_wake_up(task, false); task 167 kernel/cgroup/freezer.c task->jobctl &= ~JOBCTL_TRAP_FREEZE; task 168 kernel/cgroup/freezer.c wake_up_process(task); task 171 kernel/cgroup/freezer.c unlock_task_sighand(task, &flags); task 180 kernel/cgroup/freezer.c struct task_struct *task; task 197 kernel/cgroup/freezer.c while ((task = css_task_iter_next(&it))) { task 202 kernel/cgroup/freezer.c if (task->flags & PF_KTHREAD) task 204 kernel/cgroup/freezer.c cgroup_freeze_task(task, freeze); task 222 kernel/cgroup/freezer.c void cgroup_freezer_migrate_task(struct task_struct *task, task 230 kernel/cgroup/freezer.c if (task->flags & PF_KTHREAD) task 239 kernel/cgroup/freezer.c !task->frozen) task 247 kernel/cgroup/freezer.c if (task->frozen) { task 257 kernel/cgroup/freezer.c cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags)); task 55 kernel/cgroup/legacy_freezer.c static inline struct freezer *task_freezer(struct task_struct *task) task 57 kernel/cgroup/legacy_freezer.c return css_freezer(task_css(task, freezer_cgrp_id)); task 65 kernel/cgroup/legacy_freezer.c bool cgroup_freezing(struct task_struct *task) task 70 kernel/cgroup/legacy_freezer.c ret = task_freezer(task)->state & CGROUP_FREEZING; task 160 kernel/cgroup/legacy_freezer.c struct task_struct *task; task 175 kernel/cgroup/legacy_freezer.c cgroup_taskset_for_each(task, new_css, tset) { task 179 kernel/cgroup/legacy_freezer.c __thaw_task(task); task 181 kernel/cgroup/legacy_freezer.c freeze_task(task); task 203 kernel/cgroup/legacy_freezer.c static void freezer_fork(struct task_struct *task) task 214 kernel/cgroup/legacy_freezer.c if (task_css_is_root(task, freezer_cgrp_id)) task 220 kernel/cgroup/legacy_freezer.c freezer = task_freezer(task); task 222 kernel/cgroup/legacy_freezer.c freeze_task(task); task 249 kernel/cgroup/legacy_freezer.c struct task_struct *task; task 273 kernel/cgroup/legacy_freezer.c while ((task = css_task_iter_next(&it))) { task 274 kernel/cgroup/legacy_freezer.c if (freezing(task)) { task 281 kernel/cgroup/legacy_freezer.c if (!frozen(task) && !freezer_should_skip(task)) task 321 kernel/cgroup/legacy_freezer.c struct task_struct *task; task 324 kernel/cgroup/legacy_freezer.c while ((task = css_task_iter_next(&it))) task 325 kernel/cgroup/legacy_freezer.c freeze_task(task); task 332 kernel/cgroup/legacy_freezer.c struct task_struct *task; task 335 kernel/cgroup/legacy_freezer.c while ((task = css_task_iter_next(&it))) task 336 kernel/cgroup/legacy_freezer.c __thaw_task(task); task 117 kernel/cgroup/namespace.c static struct ns_common *cgroupns_get(struct task_struct *task) task 122 kernel/cgroup/namespace.c task_lock(task); task 123 kernel/cgroup/namespace.c nsproxy = task->nsproxy; task 128 kernel/cgroup/namespace.c task_unlock(task); task 172 kernel/cgroup/pids.c struct task_struct *task; task 175 kernel/cgroup/pids.c cgroup_taskset_for_each(task, dst_css, tset) { task 185 kernel/cgroup/pids.c old_css = task_css(task, pids_cgrp_id); task 197 kernel/cgroup/pids.c struct task_struct *task; task 200 kernel/cgroup/pids.c cgroup_taskset_for_each(task, dst_css, tset) { task 205 kernel/cgroup/pids.c old_css = task_css(task, pids_cgrp_id); task 217 kernel/cgroup/pids.c static int pids_can_fork(struct task_struct *task) task 238 kernel/cgroup/pids.c static void pids_cancel_fork(struct task_struct *task) task 248 kernel/cgroup/pids.c static void pids_release(struct task_struct *task) task 250 kernel/cgroup/pids.c struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id)); task 193 kernel/cred.c const struct cred *get_task_cred(struct task_struct *task) task 200 kernel/cred.c cred = __task_cred((task)); task 252 kernel/cred.c struct task_struct *task = current; task 264 kernel/cred.c old = task->cred; task 436 kernel/cred.c struct task_struct *task = current; task 437 kernel/cred.c const struct cred *old = task->real_cred; task 443 kernel/cred.c BUG_ON(task->cred != old); task 459 kernel/cred.c if (task->mm) task 460 kernel/cred.c set_dumpable(task->mm, suid_dumpable); task 461 kernel/cred.c task->pdeath_signal = 0; task 487 kernel/cred.c rcu_assign_pointer(task->real_cred, new); task 488 kernel/cred.c rcu_assign_pointer(task->cred, new); task 498 kernel/cred.c proc_id_connector(task, PROC_EVENT_UID); task 504 kernel/cred.c proc_id_connector(task, PROC_EVENT_GID); task 556 kernel/debug/debug_core.c kgdb_info[cpu].task = current; task 596 kernel/debug/debug_core.c kgdb_info[cpu].task = NULL; task 616 kernel/debug/debug_core.c (kgdb_info[cpu].task && task 617 kernel/debug/debug_core.c kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { task 719 kernel/debug/debug_core.c if (kgdb_info[sstep_cpu].task) task 720 kernel/debug/debug_core.c kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; task 730 kernel/debug/debug_core.c kgdb_info[cpu].task = NULL; task 40 kernel/debug/debug_core.h struct task_struct *task; task 441 kernel/debug/gdbstub.c if (kgdb_info[-tid - 2].task) task 442 kernel/debug/gdbstub.c return kgdb_info[-tid - 2].task; task 502 kernel/debug/gdbstub.c thread = kgdb_info[ks->cpu].task; task 513 kernel/debug/gdbstub.c if (thread == kgdb_info[i].task) task 951 kernel/debug/gdbstub.c kgdb_usethread = kgdb_info[ks->cpu].task; task 952 kernel/debug/gdbstub.c ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid); task 40 kernel/debug/kdb/kdb_debugger.c kdb_current_task = kgdb_info[ks->cpu].task; task 225 kernel/debug/kdb/kdb_private.h #define KDB_TSK(cpu) kgdb_info[cpu].task task 223 kernel/events/core.c if (ctx->task) { task 224 kernel/events/core.c if (ctx->task != current) { task 256 kernel/events/core.c struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ task 272 kernel/events/core.c if (!task) { task 277 kernel/events/core.c if (task == TASK_TOMBSTONE) task 281 kernel/events/core.c if (!task_function_call(task, event_function, &efs)) task 289 kernel/events/core.c task = ctx->task; task 290 kernel/events/core.c if (task == TASK_TOMBSTONE) { task 310 kernel/events/core.c struct task_struct *task = READ_ONCE(ctx->task); task 315 kernel/events/core.c if (task) { task 316 kernel/events/core.c if (task == TASK_TOMBSTONE) task 324 kernel/events/core.c task = ctx->task; task 325 kernel/events/core.c if (task == TASK_TOMBSTONE) task 328 kernel/events/core.c if (task) { task 335 kernel/events/core.c if (WARN_ON_ONCE(task != current)) task 569 kernel/events/core.c struct task_struct *task); task 764 kernel/events/core.c perf_cgroup_set_timestamp(struct task_struct *task, task 776 kernel/events/core.c if (!task || !ctx->nr_cgroups) task 779 kernel/events/core.c cgrp = perf_cgroup_from_task(task, ctx); task 799 kernel/events/core.c static void perf_cgroup_switch(struct task_struct *task, int mode) task 836 kernel/events/core.c cpuctx->cgrp = perf_cgroup_from_task(task, task 838 kernel/events/core.c cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); task 847 kernel/events/core.c static inline void perf_cgroup_sched_out(struct task_struct *task, task 859 kernel/events/core.c cgrp1 = perf_cgroup_from_task(task, NULL); task 868 kernel/events/core.c perf_cgroup_switch(task, PERF_CGROUP_SWOUT); task 874 kernel/events/core.c struct task_struct *task) task 885 kernel/events/core.c cgrp1 = perf_cgroup_from_task(task, NULL); task 894 kernel/events/core.c perf_cgroup_switch(task, PERF_CGROUP_SWIN); task 1016 kernel/events/core.c static inline void perf_cgroup_sched_out(struct task_struct *task, task 1022 kernel/events/core.c struct task_struct *task) task 1034 kernel/events/core.c perf_cgroup_set_timestamp(struct task_struct *task, task 1040 kernel/events/core.c perf_cgroup_switch(struct task_struct *task, struct task_struct *next) task 1197 kernel/events/core.c if (ctx->task && ctx->task != TASK_TOMBSTONE) task 1198 kernel/events/core.c put_task_struct(ctx->task); task 1373 kernel/events/core.c perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) task 1389 kernel/events/core.c ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); task 1402 kernel/events/core.c if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { task 1409 kernel/events/core.c if (ctx->task == TASK_TOMBSTONE || task 1414 kernel/events/core.c WARN_ON_ONCE(ctx->task != task); task 1429 kernel/events/core.c perf_pin_task_context(struct task_struct *task, int ctxn) task 1434 kernel/events/core.c ctx = perf_lock_task_context(task, ctxn, &flags); task 1487 kernel/events/core.c if (!ctx->task) task 2174 kernel/events/core.c if (ctx->task) { task 2493 kernel/events/core.c struct task_struct *task); task 2510 kernel/events/core.c struct task_struct *task) task 2512 kernel/events/core.c cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); task 2514 kernel/events/core.c ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); task 2515 kernel/events/core.c cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); task 2517 kernel/events/core.c ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); task 2597 kernel/events/core.c if (ctx->task) { task 2601 kernel/events/core.c reprogram = (ctx->task == current); task 2610 kernel/events/core.c if (task_curr(ctx->task) && !reprogram) { task 2659 kernel/events/core.c struct task_struct *task = READ_ONCE(ctx->task); task 2674 kernel/events/core.c if (!task) { task 2682 kernel/events/core.c if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) task 2717 kernel/events/core.c if (!task_function_call(task, __perf_install_in_context, event)) task 2721 kernel/events/core.c task = ctx->task; task 2722 kernel/events/core.c if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { task 2735 kernel/events/core.c if (task_curr(task)) { task 2781 kernel/events/core.c if (ctx->task) task 3014 kernel/events/core.c if (ctx->task) task 3023 kernel/events/core.c if (ctx->task) { task 3171 kernel/events/core.c static void perf_event_context_sched_out(struct task_struct *task, int ctxn, task 3174 kernel/events/core.c struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; task 3212 kernel/events/core.c WRITE_ONCE(ctx->task, next); task 3213 kernel/events/core.c WRITE_ONCE(next_ctx->task, task); task 3224 kernel/events/core.c RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); task 3301 kernel/events/core.c static void perf_event_switch(struct task_struct *task, task 3318 kernel/events/core.c void __perf_event_task_sched_out(struct task_struct *task, task 3324 kernel/events/core.c perf_pmu_sched_task(task, next, false); task 3327 kernel/events/core.c perf_event_switch(task, next, false); task 3330 kernel/events/core.c perf_event_context_sched_out(task, ctxn, next); task 3338 kernel/events/core.c perf_cgroup_sched_out(task, next); task 3469 kernel/events/core.c struct task_struct *task) task 3480 kernel/events/core.c if (ctx->task) { task 3493 kernel/events/core.c perf_cgroup_set_timestamp(task, ctx); task 3510 kernel/events/core.c struct task_struct *task) task 3514 kernel/events/core.c ctx_sched_in(ctx, cpuctx, event_type, task); task 3518 kernel/events/core.c struct task_struct *task) task 3545 kernel/events/core.c perf_event_sched_in(cpuctx, ctx, task); task 3564 kernel/events/core.c struct task_struct *task) task 3577 kernel/events/core.c perf_cgroup_sched_in(prev, task); task 3580 kernel/events/core.c ctx = task->perf_event_ctxp[ctxn]; task 3584 kernel/events/core.c perf_event_context_sched_in(ctx, task); task 3588 kernel/events/core.c perf_event_switch(task, prev, true); task 3591 kernel/events/core.c perf_pmu_sched_task(prev, task, true); task 3970 kernel/events/core.c if (ctx->task && cpuctx->task_ctx != ctx) task 4185 kernel/events/core.c alloc_perf_context(struct pmu *pmu, struct task_struct *task) task 4194 kernel/events/core.c if (task) task 4195 kernel/events/core.c ctx->task = get_task_struct(task); task 4204 kernel/events/core.c struct task_struct *task; task 4208 kernel/events/core.c task = current; task 4210 kernel/events/core.c task = find_task_by_vpid(vpid); task 4211 kernel/events/core.c if (task) task 4212 kernel/events/core.c get_task_struct(task); task 4215 kernel/events/core.c if (!task) task 4218 kernel/events/core.c return task; task 4225 kernel/events/core.c find_get_context(struct pmu *pmu, struct task_struct *task, task 4235 kernel/events/core.c if (!task) { task 4262 kernel/events/core.c ctx = perf_lock_task_context(task, ctxn, &flags); task 4276 kernel/events/core.c ctx = alloc_perf_context(pmu, task); task 4287 kernel/events/core.c mutex_lock(&task->perf_event_mutex); task 4292 kernel/events/core.c if (task->flags & PF_EXITING) task 4294 kernel/events/core.c else if (task->perf_event_ctxp[ctxn]) task 4299 kernel/events/core.c rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); task 4301 kernel/events/core.c mutex_unlock(&task->perf_event_mutex); task 4358 kernel/events/core.c attr->task || attr->ksymbol || task 4417 kernel/events/core.c if (event->attr.task) task 6567 kernel/events/core.c bool crosstask = event->ctx->task && event->ctx->task != current; task 6769 kernel/events/core.c struct task_struct *task) task 6779 kernel/events/core.c .pid = perf_event_pid(event, task), task 6780 kernel/events/core.c .tid = perf_event_tid(event, task), task 7019 kernel/events/core.c struct task_struct *task; task 7037 kernel/events/core.c event->attr.task; task 7046 kernel/events/core.c struct task_struct *task = task_event->task; task 7059 kernel/events/core.c task_event->event_id.pid = perf_event_pid(event, task); task 7060 kernel/events/core.c task_event->event_id.tid = perf_event_tid(event, task); task 7064 kernel/events/core.c task->real_parent); task 7066 kernel/events/core.c task->real_parent); task 7083 kernel/events/core.c static void perf_event_task(struct task_struct *task, task 7095 kernel/events/core.c .task = task, task 7116 kernel/events/core.c void perf_event_fork(struct task_struct *task) task 7118 kernel/events/core.c perf_event_task(task, NULL, 1); task 7119 kernel/events/core.c perf_event_namespaces(task); task 7127 kernel/events/core.c struct task_struct *task; task 7163 kernel/events/core.c comm_event->event_id.pid = perf_event_pid(event, comm_event->task); task 7164 kernel/events/core.c comm_event->event_id.tid = perf_event_tid(event, comm_event->task); task 7183 kernel/events/core.c strlcpy(comm, comm_event->task->comm, sizeof(comm)); task 7196 kernel/events/core.c void perf_event_comm(struct task_struct *task, bool exec) task 7204 kernel/events/core.c .task = task, task 7226 kernel/events/core.c struct task_struct *task; task 7263 kernel/events/core.c namespaces_event->task); task 7265 kernel/events/core.c namespaces_event->task); task 7277 kernel/events/core.c struct task_struct *task, task 7284 kernel/events/core.c error = ns_get_path(&ns_path, task, ns_ops); task 7293 kernel/events/core.c void perf_event_namespaces(struct task_struct *task) task 7302 kernel/events/core.c .task = task, task 7319 kernel/events/core.c task, &mntns_operations); task 7323 kernel/events/core.c task, &userns_operations); task 7327 kernel/events/core.c task, &netns_operations); task 7331 kernel/events/core.c task, &utsns_operations); task 7335 kernel/events/core.c task, &ipcns_operations); task 7339 kernel/events/core.c task, &pidns_operations); task 7343 kernel/events/core.c task, &cgroupns_operations); task 7770 kernel/events/core.c struct task_struct *task; task 7796 kernel/events/core.c if (event->ctx->task) { task 7814 kernel/events/core.c if (event->ctx->task) task 7824 kernel/events/core.c static void perf_event_switch(struct task_struct *task, task 7832 kernel/events/core.c .task = task, task 7845 kernel/events/core.c if (!sched_in && task->state == TASK_RUNNING) task 8708 kernel/events/core.c struct task_struct *task) task 8718 kernel/events/core.c rctx, task); task 8724 kernel/events/core.c struct task_struct *task) task 8750 kernel/events/core.c if (task && task != current) { task 8755 kernel/events/core.c ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); task 9236 kernel/events/core.c struct task_struct *task = READ_ONCE(event->ctx->task); task 9246 kernel/events/core.c if (task == TASK_TOMBSTONE) task 9250 kernel/events/core.c mm = get_task_mm(event->ctx->task); task 9454 kernel/events/core.c if (!event->ctx->task) task 10366 kernel/events/core.c if (event->attr.task) task 10421 kernel/events/core.c struct task_struct *task, task 10433 kernel/events/core.c if (!task || cpu != -1) task 10482 kernel/events/core.c if (task) { task 10489 kernel/events/core.c event->hw.target = get_task_struct(task); task 10909 kernel/events/core.c struct task_struct *task = NULL; task 10983 kernel/events/core.c task = find_lively_task_by_vpid(pid); task 10984 kernel/events/core.c if (IS_ERR(task)) { task 10985 kernel/events/core.c err = PTR_ERR(task); task 10990 kernel/events/core.c if (task && group_leader && task 10996 kernel/events/core.c if (task) { task 10997 kernel/events/core.c err = mutex_lock_interruptible(&task->signal->cred_guard_mutex); task 11010 kernel/events/core.c if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) task 11017 kernel/events/core.c event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, task 11073 kernel/events/core.c ctx = find_get_context(pmu, task, event); task 11108 kernel/events/core.c if (group_leader->ctx->task != ctx->task) task 11143 kernel/events/core.c if (gctx->task == TASK_TOMBSTONE) { task 11182 kernel/events/core.c if (ctx->task == TASK_TOMBSTONE) { task 11192 kernel/events/core.c if (!task) { task 11292 kernel/events/core.c if (task) { task 11293 kernel/events/core.c mutex_unlock(&task->signal->cred_guard_mutex); task 11294 kernel/events/core.c put_task_struct(task); task 11328 kernel/events/core.c if (task) task 11329 kernel/events/core.c mutex_unlock(&task->signal->cred_guard_mutex); task 11331 kernel/events/core.c if (task) task 11332 kernel/events/core.c put_task_struct(task); task 11349 kernel/events/core.c struct task_struct *task, task 11364 kernel/events/core.c event = perf_event_alloc(attr, cpu, task, NULL, NULL, task 11377 kernel/events/core.c ctx = find_get_context(event->pmu, task, event); task 11385 kernel/events/core.c if (ctx->task == TASK_TOMBSTONE) { task 11390 kernel/events/core.c if (!task) { task 11607 kernel/events/core.c WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); task 11696 kernel/events/core.c void perf_event_free_task(struct task_struct *task) task 11703 kernel/events/core.c ctx = task->perf_event_ctxp[ctxn]; task 11715 kernel/events/core.c RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); task 11716 kernel/events/core.c WRITE_ONCE(ctx->task, TASK_TOMBSTONE); task 11717 kernel/events/core.c put_task_struct(task); /* cannot be last */ task 11744 kernel/events/core.c void perf_event_delayed_put(struct task_struct *task) task 11749 kernel/events/core.c WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); task 12328 kernel/events/core.c struct task_struct *task = info; task 12330 kernel/events/core.c perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); task 12337 kernel/events/core.c struct task_struct *task; task 12340 kernel/events/core.c cgroup_taskset_for_each(task, css, tset) task 12341 kernel/events/core.c task_function_call(task, __perf_cgroup_move, task); task 507 kernel/events/hw_breakpoint.c if (irqs_disabled() && bp->ctx && bp->ctx->task == current) task 185 kernel/exit.c void put_task_struct_rcu_user(struct task_struct *task) task 187 kernel/exit.c if (refcount_dec_and_test(&task->rcu_users)) task 188 kernel/exit.c call_rcu(&task->rcu, delayed_put_task_struct); task 239 kernel/exit.c struct task_struct *task; task 256 kernel/exit.c task = rcu_dereference(w->task); task 257 kernel/exit.c if (task) task 258 kernel/exit.c wake_up_process(task); task 458 kernel/exit.c self.task = current; task 469 kernel/exit.c if (!self.task) /* see coredump_finish() */ task 1177 kernel/fork.c struct file *get_task_exe_file(struct task_struct *task) task 1182 kernel/fork.c task_lock(task); task 1183 kernel/fork.c mm = task->mm; task 1185 kernel/fork.c if (!(task->flags & PF_KTHREAD)) task 1188 kernel/fork.c task_unlock(task); task 1202 kernel/fork.c struct mm_struct *get_task_mm(struct task_struct *task) task 1206 kernel/fork.c task_lock(task); task 1207 kernel/fork.c mm = task->mm; task 1209 kernel/fork.c if (task->flags & PF_KTHREAD) task 1214 kernel/fork.c task_unlock(task); task 1219 kernel/fork.c struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) task 1224 kernel/fork.c err = mutex_lock_killable(&task->signal->cred_guard_mutex); task 1228 kernel/fork.c mm = get_task_mm(task); task 1230 kernel/fork.c !ptrace_may_access(task, mode)) { task 1234 kernel/fork.c mutex_unlock(&task->signal->cred_guard_mutex); task 1643 kernel/fork.c static inline void init_task_pid_links(struct task_struct *task) task 1648 kernel/fork.c INIT_HLIST_NODE(&task->pid_links[type]); task 1653 kernel/fork.c init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) task 1656 kernel/fork.c task->thread_pid = pid; task 1658 kernel/fork.c task->signal->pids[type] = pid; task 1709 kernel/fork.c struct task_struct *task; task 1716 kernel/fork.c task = pid_task(pid, PIDTYPE_PID); task 1722 kernel/fork.c if (!task || (task->exit_state && thread_group_empty(task))) task 2302 kernel/fork.c struct task_struct *task; task 2307 kernel/fork.c task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); task 2308 kernel/fork.c if (!IS_ERR(task)) { task 2309 kernel/fork.c init_idle_pids(task); task 2310 kernel/fork.c init_idle(task, cpu); task 2313 kernel/fork.c return task; task 2938 kernel/fork.c struct task_struct *task = current; task 2947 kernel/fork.c *displaced = task->files; task 2948 kernel/fork.c task_lock(task); task 2949 kernel/fork.c task->files = copy; task 2950 kernel/fork.c task_unlock(task); task 233 kernel/futex.c struct task_struct *task; task 1440 kernel/futex.c struct task_struct *task, task 1444 kernel/futex.c u32 uval, newval, vpid = task_pid_vnr(task); task 1542 kernel/futex.c struct task_struct *p = q->task; task 1919 kernel/futex.c wake_up_state(q->task, TASK_NORMAL); task 1987 kernel/futex.c vpid = task_pid_vnr(top_waiter->task); task 1988 kernel/futex.c ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, task 2250 kernel/futex.c this->task); task 2362 kernel/futex.c q->task = current; task 2720 kernel/futex.c if (!timeout || timeout->task) task 2835 kernel/futex.c if (to && !to->task) task 3263 kernel/futex.c if (timeout && !timeout->task) task 62 kernel/kcmp.c get_file_raw_ptr(struct task_struct *task, unsigned int idx) task 66 kernel/kcmp.c task_lock(task); task 69 kernel/kcmp.c if (task->files) task 70 kernel/kcmp.c file = fcheck_files(task->files, idx); task 73 kernel/kcmp.c task_unlock(task); task 1261 kernel/kprobes.c if (ri->task == tk) task 1885 kernel/kprobes.c ri->task = current; task 163 kernel/kthread.c void *kthread_data(struct task_struct *task) task 165 kernel/kthread.c return to_kthread(task)->data; task 177 kernel/kthread.c void *kthread_probe_data(struct task_struct *task) task 179 kernel/kthread.c struct kthread *kthread = to_kthread(task); task 299 kernel/kthread.c struct task_struct *task; task 334 kernel/kthread.c task = create->result; task 335 kernel/kthread.c if (!IS_ERR(task)) { task 344 kernel/kthread.c set_task_comm(task, name); task 349 kernel/kthread.c sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); task 350 kernel/kthread.c set_cpus_allowed_ptr(task, cpu_all_mask); task 353 kernel/kthread.c return task; task 384 kernel/kthread.c struct task_struct *task; task 388 kernel/kthread.c task = __kthread_create_on_node(threadfn, data, node, namefmt, args); task 391 kernel/kthread.c return task; task 642 kernel/kthread.c WARN_ON(worker->task && worker->task != current); task 643 kernel/kthread.c worker->task = current; task 654 kernel/kthread.c worker->task = NULL; task 686 kernel/kthread.c struct task_struct *task; task 698 kernel/kthread.c task = __kthread_create_on_node(kthread_worker_fn, worker, task 700 kernel/kthread.c if (IS_ERR(task)) task 704 kernel/kthread.c kthread_bind(task, cpu); task 707 kernel/kthread.c worker->task = task; task 708 kernel/kthread.c wake_up_process(task); task 713 kernel/kthread.c return ERR_CAST(task); task 802 kernel/kthread.c if (!worker->current_work && likely(worker->task)) task 803 kernel/kthread.c wake_up_process(worker->task); task 1180 kernel/kthread.c struct task_struct *task; task 1182 kernel/kthread.c task = worker->task; task 1183 kernel/kthread.c if (WARN_ON(!task)) task 1187 kernel/kthread.c kthread_stop(task); task 73 kernel/livepatch/transition.c struct task_struct *g, *task; task 110 kernel/livepatch/transition.c for_each_process_thread(g, task) { task 111 kernel/livepatch/transition.c WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); task 112 kernel/livepatch/transition.c task->patch_state = KLP_UNDEFINED; task 117 kernel/livepatch/transition.c task = idle_task(cpu); task 118 kernel/livepatch/transition.c WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); task 119 kernel/livepatch/transition.c task->patch_state = KLP_UNDEFINED; task 163 kernel/livepatch/transition.c void klp_update_patch_state(struct task_struct *task) task 183 kernel/livepatch/transition.c if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) task 184 kernel/livepatch/transition.c task->patch_state = READ_ONCE(klp_target_state); task 242 kernel/livepatch/transition.c static int klp_check_stack(struct task_struct *task, char *err_buf) task 249 kernel/livepatch/transition.c ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); task 253 kernel/livepatch/transition.c __func__, task->comm, task->pid); task 266 kernel/livepatch/transition.c __func__, task->comm, task->pid, task 281 kernel/livepatch/transition.c static bool klp_try_switch_task(struct task_struct *task) task 292 kernel/livepatch/transition.c if (task->patch_state == klp_target_state) task 307 kernel/livepatch/transition.c rq = task_rq_lock(task, &flags); task 309 kernel/livepatch/transition.c if (task_running(rq, task) && task != current) { task 311 kernel/livepatch/transition.c "%s: %s:%d is running\n", __func__, task->comm, task 312 kernel/livepatch/transition.c task->pid); task 316 kernel/livepatch/transition.c ret = klp_check_stack(task, err_buf); task 322 kernel/livepatch/transition.c clear_tsk_thread_flag(task, TIF_PATCH_PENDING); task 323 kernel/livepatch/transition.c task->patch_state = klp_target_state; task 326 kernel/livepatch/transition.c task_rq_unlock(rq, task, &flags); task 345 kernel/livepatch/transition.c struct task_struct *g, *task; task 351 kernel/livepatch/transition.c for_each_process_thread(g, task) { task 352 kernel/livepatch/transition.c if (!klp_patch_pending(task)) task 361 kernel/livepatch/transition.c if (task->flags & PF_KTHREAD) { task 366 kernel/livepatch/transition.c wake_up_state(task, TASK_INTERRUPTIBLE); task 372 kernel/livepatch/transition.c spin_lock_irq(&task->sighand->siglock); task 373 kernel/livepatch/transition.c signal_wake_up(task, 0); task 374 kernel/livepatch/transition.c spin_unlock_irq(&task->sighand->siglock); task 391 kernel/livepatch/transition.c struct task_struct *g, *task; task 407 kernel/livepatch/transition.c for_each_process_thread(g, task) task 408 kernel/livepatch/transition.c if (!klp_try_switch_task(task)) task 417 kernel/livepatch/transition.c task = idle_task(cpu); task 419 kernel/livepatch/transition.c if (!klp_try_switch_task(task)) task 421 kernel/livepatch/transition.c } else if (task->patch_state != klp_target_state) { task 423 kernel/livepatch/transition.c clear_tsk_thread_flag(task, TIF_PATCH_PENDING); task 424 kernel/livepatch/transition.c task->patch_state = klp_target_state; task 465 kernel/livepatch/transition.c struct task_struct *g, *task; task 480 kernel/livepatch/transition.c for_each_process_thread(g, task) task 481 kernel/livepatch/transition.c if (task->patch_state != klp_target_state) task 482 kernel/livepatch/transition.c set_tsk_thread_flag(task, TIF_PATCH_PENDING); task 491 kernel/livepatch/transition.c task = idle_task(cpu); task 492 kernel/livepatch/transition.c if (task->patch_state != klp_target_state) task 493 kernel/livepatch/transition.c set_tsk_thread_flag(task, TIF_PATCH_PENDING); task 506 kernel/livepatch/transition.c struct task_struct *g, *task; task 530 kernel/livepatch/transition.c for_each_process_thread(g, task) { task 531 kernel/livepatch/transition.c WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); task 532 kernel/livepatch/transition.c task->patch_state = initial_state; task 540 kernel/livepatch/transition.c task = idle_task(cpu); task 541 kernel/livepatch/transition.c WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); task 542 kernel/livepatch/transition.c task->patch_state = initial_state; task 581 kernel/livepatch/transition.c struct task_struct *g, *task; task 598 kernel/livepatch/transition.c for_each_process_thread(g, task) task 599 kernel/livepatch/transition.c clear_tsk_thread_flag(task, TIF_PATCH_PENDING); task 631 kernel/livepatch/transition.c struct task_struct *g, *task; task 637 kernel/livepatch/transition.c for_each_process_thread(g, task) task 638 kernel/livepatch/transition.c klp_update_patch_state(task); task 373 kernel/locking/lockdep.c void lockdep_init_task(struct task_struct *task) task 375 kernel/locking/lockdep.c task->lockdep_depth = 0; /* no locks held yet */ task 376 kernel/locking/lockdep.c task->curr_chain_key = INITIAL_CHAIN_KEY; task 377 kernel/locking/lockdep.c task->lockdep_recursion = 0; task 392 kernel/locking/lockdep.c void lockdep_set_selftest_task(struct task_struct *task) task 394 kernel/locking/lockdep.c lockdep_selftest_task_struct = task; task 3601 kernel/locking/lockdep.c static inline unsigned int task_irq_context(struct task_struct *task) task 3603 kernel/locking/lockdep.c return 2 * !!task->hardirq_context + !!task->softirq_context; task 3697 kernel/locking/lockdep.c static inline unsigned int task_irq_context(struct task_struct *task) task 5364 kernel/locking/lockdep.c void debug_show_held_locks(struct task_struct *task) task 5370 kernel/locking/lockdep.c lockdep_print_held_locks(task); task 52 kernel/locking/mutex-debug.c struct task_struct *task) task 57 kernel/locking/mutex-debug.c task->blocked_on = waiter; task 61 kernel/locking/mutex-debug.c struct task_struct *task) task 64 kernel/locking/mutex-debug.c DEBUG_LOCKS_WARN_ON(waiter->task != task); task 65 kernel/locking/mutex-debug.c DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); task 66 kernel/locking/mutex-debug.c task->blocked_on = NULL; task 69 kernel/locking/mutex-debug.c waiter->task = NULL; task 24 kernel/locking/mutex-debug.h struct task_struct *task); task 26 kernel/locking/mutex-debug.h struct task_struct *task); task 114 kernel/locking/mutex.c unsigned long task = owner & ~MUTEX_FLAGS; task 116 kernel/locking/mutex.c if (task) { task 117 kernel/locking/mutex.c if (likely(task != curr)) task 224 kernel/locking/mutex.c static void __mutex_handoff(struct mutex *lock, struct task_struct *task) task 237 kernel/locking/mutex.c new |= (unsigned long)task; task 238 kernel/locking/mutex.c if (task) task 376 kernel/locking/mutex.c wake_up_process(waiter->task); task 1003 kernel/locking/mutex.c waiter.task = current; task 1269 kernel/locking/mutex.c next = waiter->task; task 13 kernel/locking/mutex.h #define mutex_remove_waiter(lock, waiter, task) \ task 60 kernel/locking/rtmutex-debug.c void rt_mutex_debug_task_free(struct task_struct *task) task 62 kernel/locking/rtmutex-debug.c DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); task 63 kernel/locking/rtmutex-debug.c DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); task 75 kernel/locking/rtmutex-debug.c struct task_struct *task; task 80 kernel/locking/rtmutex-debug.c task = rt_mutex_owner(act_waiter->lock); task 81 kernel/locking/rtmutex-debug.c if (task && task != current) { task 82 kernel/locking/rtmutex-debug.c act_waiter->deadlock_task_pid = get_pid(task_pid(task)); task 89 kernel/locking/rtmutex-debug.c struct task_struct *task; task 95 kernel/locking/rtmutex-debug.c task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); task 96 kernel/locking/rtmutex-debug.c if (!task) { task 112 kernel/locking/rtmutex-debug.c task->comm, task_pid_nr(task), task 120 kernel/locking/rtmutex-debug.c task->comm, task_pid_nr(task)); task 124 kernel/locking/rtmutex-debug.c debug_show_held_locks(task); task 127 kernel/locking/rtmutex-debug.c task->comm, task_pid_nr(task)); task 128 kernel/locking/rtmutex-debug.c show_stack(task, NULL); task 306 kernel/locking/rtmutex.c rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) task 308 kernel/locking/rtmutex.c struct rb_node **link = &task->pi_waiters.rb_root.rb_node; task 325 kernel/locking/rtmutex.c rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost); task 329 kernel/locking/rtmutex.c rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) task 334 kernel/locking/rtmutex.c rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); task 345 kernel/locking/rtmutex.c pi_task = task_top_pi_waiter(p)->task; task 449 kernel/locking/rtmutex.c static int rt_mutex_adjust_prio_chain(struct task_struct *task, task 488 kernel/locking/rtmutex.c put_task_struct(task); task 503 kernel/locking/rtmutex.c raw_spin_lock_irq(&task->pi_lock); task 508 kernel/locking/rtmutex.c waiter = task->pi_blocked_on; task 547 kernel/locking/rtmutex.c if (!task_has_pi_waiters(task)) task 555 kernel/locking/rtmutex.c if (top_waiter != task_top_pi_waiter(task)) { task 570 kernel/locking/rtmutex.c if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { task 587 kernel/locking/rtmutex.c raw_spin_unlock_irq(&task->pi_lock); task 618 kernel/locking/rtmutex.c raw_spin_unlock(&task->pi_lock); task 619 kernel/locking/rtmutex.c put_task_struct(task); task 631 kernel/locking/rtmutex.c task = get_task_struct(rt_mutex_owner(lock)); task 632 kernel/locking/rtmutex.c raw_spin_lock(&task->pi_lock); task 640 kernel/locking/rtmutex.c next_lock = task_blocked_on_lock(task); task 647 kernel/locking/rtmutex.c raw_spin_unlock(&task->pi_lock); task 682 kernel/locking/rtmutex.c waiter->prio = task->prio; task 683 kernel/locking/rtmutex.c waiter->deadline = task->dl.deadline; task 688 kernel/locking/rtmutex.c raw_spin_unlock(&task->pi_lock); task 689 kernel/locking/rtmutex.c put_task_struct(task); task 705 kernel/locking/rtmutex.c wake_up_process(rt_mutex_top_waiter(lock)->task); task 711 kernel/locking/rtmutex.c task = get_task_struct(rt_mutex_owner(lock)); task 712 kernel/locking/rtmutex.c raw_spin_lock(&task->pi_lock); task 722 kernel/locking/rtmutex.c rt_mutex_dequeue_pi(task, prerequeue_top_waiter); task 723 kernel/locking/rtmutex.c rt_mutex_enqueue_pi(task, waiter); task 724 kernel/locking/rtmutex.c rt_mutex_adjust_prio(task); task 737 kernel/locking/rtmutex.c rt_mutex_dequeue_pi(task, waiter); task 739 kernel/locking/rtmutex.c rt_mutex_enqueue_pi(task, waiter); task 740 kernel/locking/rtmutex.c rt_mutex_adjust_prio(task); task 758 kernel/locking/rtmutex.c next_lock = task_blocked_on_lock(task); task 766 kernel/locking/rtmutex.c raw_spin_unlock(&task->pi_lock); task 790 kernel/locking/rtmutex.c raw_spin_unlock_irq(&task->pi_lock); task 792 kernel/locking/rtmutex.c put_task_struct(task); task 807 kernel/locking/rtmutex.c static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, task 871 kernel/locking/rtmutex.c if (!rt_mutex_waiter_less(task_to_waiter(task), task 897 kernel/locking/rtmutex.c raw_spin_lock(&task->pi_lock); task 898 kernel/locking/rtmutex.c task->pi_blocked_on = NULL; task 905 kernel/locking/rtmutex.c rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); task 906 kernel/locking/rtmutex.c raw_spin_unlock(&task->pi_lock); task 916 kernel/locking/rtmutex.c rt_mutex_set_owner(lock, task); task 930 kernel/locking/rtmutex.c struct task_struct *task, task 949 kernel/locking/rtmutex.c if (owner == task) task 952 kernel/locking/rtmutex.c raw_spin_lock(&task->pi_lock); task 953 kernel/locking/rtmutex.c waiter->task = task; task 955 kernel/locking/rtmutex.c waiter->prio = task->prio; task 956 kernel/locking/rtmutex.c waiter->deadline = task->dl.deadline; task 963 kernel/locking/rtmutex.c task->pi_blocked_on = waiter; task 965 kernel/locking/rtmutex.c raw_spin_unlock(&task->pi_lock); task 1004 kernel/locking/rtmutex.c next_lock, waiter, task); task 1057 kernel/locking/rtmutex.c wake_q_add(wake_q, waiter->task); task 1125 kernel/locking/rtmutex.c void rt_mutex_adjust_pi(struct task_struct *task) task 1131 kernel/locking/rtmutex.c raw_spin_lock_irqsave(&task->pi_lock, flags); task 1133 kernel/locking/rtmutex.c waiter = task->pi_blocked_on; task 1134 kernel/locking/rtmutex.c if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { task 1135 kernel/locking/rtmutex.c raw_spin_unlock_irqrestore(&task->pi_lock, flags); task 1139 kernel/locking/rtmutex.c raw_spin_unlock_irqrestore(&task->pi_lock, flags); task 1142 kernel/locking/rtmutex.c get_task_struct(task); task 1144 kernel/locking/rtmutex.c rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, task 1145 kernel/locking/rtmutex.c next_lock, NULL, task); task 1153 kernel/locking/rtmutex.c waiter->task = NULL; task 1186 kernel/locking/rtmutex.c if (timeout && !timeout->task) task 1749 kernel/locking/rtmutex.c struct task_struct *task) task 1755 kernel/locking/rtmutex.c if (try_to_take_rt_mutex(lock, task, NULL)) task 1759 kernel/locking/rtmutex.c ret = task_blocks_on_rt_mutex(lock, waiter, task, task 1798 kernel/locking/rtmutex.c struct task_struct *task) task 1803 kernel/locking/rtmutex.c ret = __rt_mutex_start_proxy_lock(lock, waiter, task); task 1828 kernel/locking/rtmutex.c return rt_mutex_top_waiter(lock)->task; task 30 kernel/locking/rtmutex_common.h struct task_struct *task; task 141 kernel/locking/rtmutex_common.h struct task_struct *task); task 144 kernel/locking/rtmutex_common.h struct task_struct *task); task 354 kernel/locking/rwsem.c struct task_struct *task; task 426 kernel/locking/rwsem.c wake_q_add(wake_q, waiter->task); task 470 kernel/locking/rwsem.c owner = waiter->task; task 537 kernel/locking/rwsem.c tsk = waiter->task; task 546 kernel/locking/rwsem.c smp_store_release(&waiter->task, NULL); task 1039 kernel/locking/rwsem.c waiter.task = current; task 1090 kernel/locking/rwsem.c if (!smp_load_acquire(&waiter.task)) { task 1096 kernel/locking/rwsem.c if (waiter.task) task 1166 kernel/locking/rwsem.c waiter.task = current; task 195 kernel/locking/semaphore.c struct task_struct *task; task 210 kernel/locking/semaphore.c waiter.task = current; task 261 kernel/locking/semaphore.c wake_up_process(waiter->task); task 273 kernel/pid.c static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) task 276 kernel/pid.c &task->thread_pid : task 277 kernel/pid.c &task->signal->pids[type]; task 283 kernel/pid.c void attach_pid(struct task_struct *task, enum pid_type type) task 285 kernel/pid.c struct pid *pid = *task_pid_ptr(task, type); task 286 kernel/pid.c hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); task 289 kernel/pid.c static void __change_pid(struct task_struct *task, enum pid_type type, task 292 kernel/pid.c struct pid **pid_ptr = task_pid_ptr(task, type); task 298 kernel/pid.c hlist_del_rcu(&task->pid_links[type]); task 308 kernel/pid.c void detach_pid(struct task_struct *task, enum pid_type type) task 310 kernel/pid.c __change_pid(task, type, NULL); task 313 kernel/pid.c void change_pid(struct task_struct *task, enum pid_type type, task 316 kernel/pid.c __change_pid(task, type, pid); task 317 kernel/pid.c attach_pid(task, type); task 360 kernel/pid.c struct task_struct *task; task 363 kernel/pid.c task = find_task_by_vpid(nr); task 364 kernel/pid.c if (task) task 365 kernel/pid.c get_task_struct(task); task 368 kernel/pid.c return task; task 371 kernel/pid.c struct pid *get_task_pid(struct task_struct *task, enum pid_type type) task 375 kernel/pid.c pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); task 425 kernel/pid.c pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, task 433 kernel/pid.c if (likely(pid_alive(task))) task 434 kernel/pid.c nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); task 185 kernel/pid_namespace.c struct task_struct *task, *me = current; task 218 kernel/pid_namespace.c task = pid_task(pid, PIDTYPE_PID); task 219 kernel/pid_namespace.c if (task && !__fatal_signal_pending(task)) task 220 kernel/pid_namespace.c group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); task 342 kernel/pid_namespace.c static struct ns_common *pidns_get(struct task_struct *task) task 347 kernel/pid_namespace.c ns = task_active_pid_ns(task); task 355 kernel/pid_namespace.c static struct ns_common *pidns_for_children_get(struct task_struct *task) task 359 kernel/pid_namespace.c task_lock(task); task 360 kernel/pid_namespace.c if (task->nsproxy) { task 361 kernel/pid_namespace.c ns = task->nsproxy->pid_ns_for_children; task 364 kernel/pid_namespace.c task_unlock(task); task 141 kernel/profile.c void profile_task_exit(struct task_struct *task) task 143 kernel/profile.c blocking_notifier_call_chain(&task_exit_notifier, 0, task); task 146 kernel/profile.c int profile_handoff_task(struct task_struct *task) task 149 kernel/profile.c ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); task 173 kernel/ptrace.c static bool ptrace_freeze_traced(struct task_struct *task) task 178 kernel/ptrace.c if (task->jobctl & JOBCTL_LISTENING) task 181 kernel/ptrace.c spin_lock_irq(&task->sighand->siglock); task 182 kernel/ptrace.c if (task_is_traced(task) && !__fatal_signal_pending(task)) { task 183 kernel/ptrace.c task->state = __TASK_TRACED; task 186 kernel/ptrace.c spin_unlock_irq(&task->sighand->siglock); task 191 kernel/ptrace.c static void ptrace_unfreeze_traced(struct task_struct *task) task 193 kernel/ptrace.c if (task->state != __TASK_TRACED) task 196 kernel/ptrace.c WARN_ON(!task->ptrace || task->parent != current); task 202 kernel/ptrace.c spin_lock_irq(&task->sighand->siglock); task 203 kernel/ptrace.c if (task->state == __TASK_TRACED) { task 204 kernel/ptrace.c if (__fatal_signal_pending(task)) task 205 kernel/ptrace.c wake_up_state(task, __TASK_TRACED); task 207 kernel/ptrace.c task->state = TASK_TRACED; task 209 kernel/ptrace.c spin_unlock_irq(&task->sighand->siglock); task 281 kernel/ptrace.c static int __ptrace_may_access(struct task_struct *task, unsigned int mode) task 303 kernel/ptrace.c if (same_thread_group(task, current)) task 321 kernel/ptrace.c tcred = __task_cred(task); task 345 kernel/ptrace.c mm = task->mm; task 351 kernel/ptrace.c return security_ptrace_access_check(task, mode); task 354 kernel/ptrace.c bool ptrace_may_access(struct task_struct *task, unsigned int mode) task 357 kernel/ptrace.c task_lock(task); task 358 kernel/ptrace.c err = __ptrace_may_access(task, mode); task 359 kernel/ptrace.c task_unlock(task); task 363 kernel/ptrace.c static int ptrace_attach(struct task_struct *task, long request, task 381 kernel/ptrace.c audit_ptrace(task); task 384 kernel/ptrace.c if (unlikely(task->flags & PF_KTHREAD)) task 386 kernel/ptrace.c if (same_thread_group(task, current)) task 395 kernel/ptrace.c if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) task 398 kernel/ptrace.c task_lock(task); task 399 kernel/ptrace.c retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); task 400 kernel/ptrace.c task_unlock(task); task 406 kernel/ptrace.c if (unlikely(task->exit_state)) task 408 kernel/ptrace.c if (task->ptrace) task 413 kernel/ptrace.c task->ptrace = flags; task 415 kernel/ptrace.c ptrace_link(task, current); task 419 kernel/ptrace.c send_sig_info(SIGSTOP, SEND_SIG_PRIV, task); task 421 kernel/ptrace.c spin_lock(&task->sighand->siglock); task 440 kernel/ptrace.c if (task_is_stopped(task) && task 441 kernel/ptrace.c task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) task 442 kernel/ptrace.c signal_wake_up_state(task, __TASK_STOPPED); task 444 kernel/ptrace.c spin_unlock(&task->sighand->siglock); task 450 kernel/ptrace.c mutex_unlock(&task->signal->cred_guard_mutex); task 460 kernel/ptrace.c wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); task 461 kernel/ptrace.c proc_ptrace_connector(task, PTRACE_ATTACH); task 879 kernel/ptrace.c static int ptrace_regset(struct task_struct *task, int req, unsigned int type, task 882 kernel/ptrace.c const struct user_regset_view *view = task_user_regset_view(task); task 894 kernel/ptrace.c return copy_regset_to_user(task, view, regset_no, 0, task 897 kernel/ptrace.c return copy_regset_from_user(task, view, regset_no, 0, task 413 kernel/sched/core.c static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) task 415 kernel/sched/core.c struct wake_q_node *node = &task->wake_q; task 449 kernel/sched/core.c void wake_q_add(struct wake_q_head *head, struct task_struct *task) task 451 kernel/sched/core.c if (__wake_q_add(head, task)) task 452 kernel/sched/core.c get_task_struct(task); task 472 kernel/sched/core.c void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) task 474 kernel/sched/core.c if (!__wake_q_add(head, task)) task 475 kernel/sched/core.c put_task_struct(task); task 483 kernel/sched/core.c struct task_struct *task; task 485 kernel/sched/core.c task = container_of(node, struct task_struct, wake_q); task 486 kernel/sched/core.c BUG_ON(!task); task 489 kernel/sched/core.c task->wake_q.next = NULL; task 495 kernel/sched/core.c wake_up_process(task); task 496 kernel/sched/core.c put_task_struct(task); task 1506 kernel/sched/core.c struct task_struct *task; task 1540 kernel/sched/core.c struct task_struct *p = arg->task; task 7129 kernel/sched/core.c static void cpu_cgroup_fork(struct task_struct *task) task 7134 kernel/sched/core.c rq = task_rq_lock(task, &rf); task 7137 kernel/sched/core.c sched_change_group(task, TASK_SET_GROUP); task 7139 kernel/sched/core.c task_rq_unlock(rq, task, &rf); task 7144 kernel/sched/core.c struct task_struct *task; task 7148 kernel/sched/core.c cgroup_taskset_for_each(task, css, tset) { task 7150 kernel/sched/core.c if (!sched_rt_can_attach(css_tg(css), task)) task 7157 kernel/sched/core.c raw_spin_lock_irq(&task->pi_lock); task 7163 kernel/sched/core.c if (task->state == TASK_NEW) task 7165 kernel/sched/core.c raw_spin_unlock_irq(&task->pi_lock); task 7175 kernel/sched/core.c struct task_struct *task; task 7178 kernel/sched/core.c cgroup_taskset_for_each(task, css, tset) task 7179 kernel/sched/core.c sched_move_task(task); task 527 kernel/sched/deadline.c static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); task 1599 kernel/sched/deadline.c static int find_later_rq(struct task_struct *task); task 1876 kernel/sched/deadline.c static int find_later_rq(struct task_struct *task) task 1881 kernel/sched/deadline.c int cpu = task_cpu(task); task 1887 kernel/sched/deadline.c if (task->nr_cpus_allowed == 1) task 1894 kernel/sched/deadline.c if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) task 1964 kernel/sched/deadline.c static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) task 1971 kernel/sched/deadline.c cpu = find_later_rq(task); task 1979 kernel/sched/deadline.c !dl_time_before(task->dl.deadline, task 1992 kernel/sched/deadline.c if (unlikely(task_rq(task) != rq || task 1993 kernel/sched/deadline.c !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || task 1994 kernel/sched/deadline.c task_running(rq, task) || task 1995 kernel/sched/deadline.c !dl_task(task) || task 1996 kernel/sched/deadline.c !task_on_rq_queued(task))) { task 2009 kernel/sched/deadline.c dl_time_before(task->dl.deadline, task 2081 kernel/sched/deadline.c struct task_struct *task; task 2088 kernel/sched/deadline.c task = pick_next_pushable_dl_task(rq); task 2089 kernel/sched/deadline.c if (task == next_task) { task 2097 kernel/sched/deadline.c if (!task) task 2102 kernel/sched/deadline.c next_task = task; task 1300 kernel/sched/fair.c int maxdist, bool task) task 1339 kernel/sched/fair.c if (task) task 10447 kernel/sched/fair.c static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) task 10449 kernel/sched/fair.c struct sched_entity *se = &task->se; task 443 kernel/sched/idle.c static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) task 723 kernel/sched/psi.c static struct psi_group *iterate_groups(struct task_struct *task, void **iter) task 729 kernel/sched/psi.c cgroup = task->cgroups->dfl_cgrp; task 747 kernel/sched/psi.c void psi_task_change(struct task_struct *task, int clear, int set) task 749 kernel/sched/psi.c int cpu = task_cpu(task); task 754 kernel/sched/psi.c if (!task->pid) task 757 kernel/sched/psi.c if (((task->psi_flags & set) || task 758 kernel/sched/psi.c (task->psi_flags & clear) != clear) && task 761 kernel/sched/psi.c task->pid, task->comm, cpu, task 762 kernel/sched/psi.c task->psi_flags, clear, set); task 766 kernel/sched/psi.c task->psi_flags &= ~clear; task 767 kernel/sched/psi.c task->psi_flags |= set; task 776 kernel/sched/psi.c (task->flags & PF_WQ_WORKER) && task 777 kernel/sched/psi.c wq_worker_last_func(task) == psi_avgs_work)) task 780 kernel/sched/psi.c while ((group = iterate_groups(task, &iter))) { task 791 kernel/sched/psi.c void psi_memstall_tick(struct task_struct *task, int cpu) task 796 kernel/sched/psi.c while ((group = iterate_groups(task, &iter))) { task 902 kernel/sched/psi.c void cgroup_move_task(struct task_struct *task, struct css_set *to) task 913 kernel/sched/psi.c rcu_assign_pointer(task->cgroups, to); task 917 kernel/sched/psi.c rq = task_rq_lock(task, &rf); task 919 kernel/sched/psi.c if (task_on_rq_queued(task)) task 921 kernel/sched/psi.c else if (task->in_iowait) task 924 kernel/sched/psi.c if (task->flags & PF_MEMSTALL) task 928 kernel/sched/psi.c psi_task_change(task, task_flags, 0); task 931 kernel/sched/psi.c rcu_assign_pointer(task->cgroups, to); task 934 kernel/sched/psi.c psi_task_change(task, 0, task_flags); task 936 kernel/sched/psi.c task_rq_unlock(rq, task, &rf); task 1065 kernel/sched/psi.c sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); task 1387 kernel/sched/rt.c static int find_lowest_rq(struct task_struct *task); task 1635 kernel/sched/rt.c static int find_lowest_rq(struct task_struct *task) task 1640 kernel/sched/rt.c int cpu = task_cpu(task); task 1646 kernel/sched/rt.c if (task->nr_cpus_allowed == 1) task 1649 kernel/sched/rt.c if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) task 1711 kernel/sched/rt.c static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) task 1718 kernel/sched/rt.c cpu = find_lowest_rq(task); task 1725 kernel/sched/rt.c if (lowest_rq->rt.highest_prio.curr <= task->prio) { task 1743 kernel/sched/rt.c if (unlikely(task_rq(task) != rq || task 1744 kernel/sched/rt.c !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) || task 1745 kernel/sched/rt.c task_running(rq, task) || task 1746 kernel/sched/rt.c !rt_task(task) || task 1747 kernel/sched/rt.c !task_on_rq_queued(task))) { task 1756 kernel/sched/rt.c if (lowest_rq->rt.highest_prio.curr > task->prio) task 1825 kernel/sched/rt.c struct task_struct *task; task 1834 kernel/sched/rt.c task = pick_next_pushable_task(rq); task 1835 kernel/sched/rt.c if (task == next_task) { task 1845 kernel/sched/rt.c if (!task) task 1853 kernel/sched/rt.c next_task = task; task 2349 kernel/sched/rt.c static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) task 2354 kernel/sched/rt.c if (task->policy == SCHED_RR) task 1744 kernel/sched/sched.h void (*task_woken)(struct rq *this_rq, struct task_struct *task); task 1762 kernel/sched/sched.h void (*switched_from)(struct rq *this_rq, struct task_struct *task); task 1763 kernel/sched/sched.h void (*switched_to) (struct rq *this_rq, struct task_struct *task); task 1764 kernel/sched/sched.h void (*prio_changed) (struct rq *this_rq, struct task_struct *task, task 1768 kernel/sched/sched.h struct task_struct *task); task 109 kernel/sched/stop_task.c get_rr_interval_stop(struct rq *rq, struct task_struct *task) task 30 kernel/sched/swait.c wake_up_process(curr->task); task 59 kernel/sched/swait.c wake_up_state(curr->task, TASK_NORMAL); task 74 kernel/sched/swait.c wait->task = current; task 53 kernel/seccomp.c struct task_struct *task; task 146 kernel/seccomp.c struct task_struct *task = current; task 147 kernel/seccomp.c struct pt_regs *regs = task_pt_regs(task); task 150 kernel/seccomp.c sd->nr = syscall_get_nr(task, regs); task 151 kernel/seccomp.c sd->arch = syscall_get_arch(task); task 152 kernel/seccomp.c syscall_get_arguments(task, regs, args); task 159 kernel/seccomp.c sd->instruction_pointer = KSTK_EIP(task); task 294 kernel/seccomp.c void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { } task 296 kernel/seccomp.c static inline void seccomp_assign_mode(struct task_struct *task, task 300 kernel/seccomp.c assert_spin_locked(&task->sighand->siglock); task 302 kernel/seccomp.c task->seccomp.mode = seccomp_mode; task 310 kernel/seccomp.c arch_seccomp_spec_mitigate(task); task 311 kernel/seccomp.c set_tsk_thread_flag(task, TIF_SECCOMP); task 748 kernel/seccomp.c n.task = current; task 1050 kernel/seccomp.c unotif.pid = task_pid_vnr(knotif->task); task 1454 kernel/seccomp.c static struct seccomp_filter *get_nth_filter(struct task_struct *task, task 1464 kernel/seccomp.c spin_lock_irq(&task->sighand->siglock); task 1466 kernel/seccomp.c if (task->seccomp.mode != SECCOMP_MODE_FILTER) { task 1467 kernel/seccomp.c spin_unlock_irq(&task->sighand->siglock); task 1471 kernel/seccomp.c orig = task->seccomp.filter; task 1473 kernel/seccomp.c spin_unlock_irq(&task->sighand->siglock); task 1500 kernel/seccomp.c long seccomp_get_filter(struct task_struct *task, unsigned long filter_off, task 1512 kernel/seccomp.c filter = get_nth_filter(task, filter_off); task 1538 kernel/seccomp.c long seccomp_get_metadata(struct task_struct *task, task 1558 kernel/seccomp.c filter = get_nth_filter(task, kmd.filter_off); task 285 kernel/signal.c bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) task 291 kernel/signal.c if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) task 295 kernel/signal.c task->jobctl &= ~JOBCTL_STOP_SIGMASK; task 297 kernel/signal.c task->jobctl |= mask; task 313 kernel/signal.c void task_clear_jobctl_trapping(struct task_struct *task) task 315 kernel/signal.c if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { task 316 kernel/signal.c task->jobctl &= ~JOBCTL_TRAPPING; task 318 kernel/signal.c wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); task 337 kernel/signal.c void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) task 344 kernel/signal.c task->jobctl &= ~mask; task 346 kernel/signal.c if (!(task->jobctl & JOBCTL_PENDING_MASK)) task 347 kernel/signal.c task_clear_jobctl_trapping(task); task 366 kernel/signal.c static bool task_participate_group_stop(struct task_struct *task) task 368 kernel/signal.c struct signal_struct *sig = task->signal; task 369 kernel/signal.c bool consume = task->jobctl & JOBCTL_STOP_CONSUME; task 371 kernel/signal.c WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); task 373 kernel/signal.c task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); task 392 kernel/signal.c void task_join_group_stop(struct task_struct *task) task 400 kernel/signal.c if (task_set_jobctl_pending(task, signr | gstop)) { task 1894 kernel/signal.c static void do_notify_pidfd(struct task_struct *task) task 1898 kernel/signal.c WARN_ON(task->exit_state == 0); task 1899 kernel/signal.c pid = task_pid(task); task 295 kernel/stacktrace.c unsigned int stack_trace_save_tsk(struct task_struct *task, task 303 kernel/stacktrace.c .skip = skipnr + !!(current == task), task 306 kernel/stacktrace.c save_stack_trace_tsk(task, &trace); task 1584 kernel/sys.c static int check_prlimit_permission(struct task_struct *task, task 1590 kernel/sys.c if (current == task) task 1593 kernel/sys.c tcred = __task_cred(task); task 28 kernel/task_work.c task_work_add(struct task_struct *task, struct callback_head *work, bool notify) task 33 kernel/task_work.c head = READ_ONCE(task->task_works); task 37 kernel/task_work.c } while (cmpxchg(&task->task_works, head, work) != head); task 40 kernel/task_work.c set_notify_resume(task); task 56 kernel/task_work.c task_work_cancel(struct task_struct *task, task_work_func_t func) task 58 kernel/task_work.c struct callback_head **pprev = &task->task_works; task 62 kernel/task_work.c if (likely(!task->task_works)) task 70 kernel/task_work.c raw_spin_lock_irqsave(&task->pi_lock, flags); task 77 kernel/task_work.c raw_spin_unlock_irqrestore(&task->pi_lock, flags); task 92 kernel/task_work.c struct task_struct *task = current; task 100 kernel/task_work.c raw_spin_lock_irq(&task->pi_lock); task 102 kernel/task_work.c work = READ_ONCE(task->task_works); task 103 kernel/task_work.c head = !work && (task->flags & PF_EXITING) ? task 105 kernel/task_work.c } while (cmpxchg(&task->task_works, work, head) != work); task 106 kernel/task_work.c raw_spin_unlock_irq(&task->pi_lock); task 716 kernel/time/alarmtimer.c struct task_struct *task = (struct task_struct *)alarm->data; task 719 kernel/time/alarmtimer.c if (task) task 720 kernel/time/alarmtimer.c wake_up_process(task); task 1765 kernel/time/hrtimer.c struct task_struct *task = t->task; task 1767 kernel/time/hrtimer.c t->task = NULL; task 1768 kernel/time/hrtimer.c if (task) task 1769 kernel/time/hrtimer.c wake_up_process(task); task 1828 kernel/time/hrtimer.c sl->task = current; task 1873 kernel/time/hrtimer.c if (likely(t->task)) task 1879 kernel/time/hrtimer.c } while (t->task && !signal_pending(current)); task 1883 kernel/time/hrtimer.c if (!t->task) task 2129 kernel/time/hrtimer.c if (likely(t.task)) task 2137 kernel/time/hrtimer.c return !t.task ? 0 : -EINTR; task 38 kernel/time/posix-cpu-timers.c void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) task 42 kernel/time/posix-cpu-timers.c spin_lock_irq(&task->sighand->siglock); task 43 kernel/time/posix-cpu-timers.c set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); task 44 kernel/time/posix-cpu-timers.c spin_unlock_irq(&task->sighand->siglock); task 396 kernel/time/posix-cpu-timers.c new_timer->it.cpu.task = p; task 409 kernel/time/posix-cpu-timers.c struct task_struct *p = ctmr->task; task 492 kernel/time/posix-cpu-timers.c struct task_struct *p = ctmr->task; task 567 kernel/time/posix-cpu-timers.c struct task_struct *p = ctmr->task; task 707 kernel/time/posix-cpu-timers.c struct task_struct *p = ctmr->task; task 980 kernel/time/posix-cpu-timers.c struct task_struct *p = ctmr->task; task 1817 kernel/time/timer.c struct task_struct *task; task 1824 kernel/time/timer.c wake_up_process(timeout->task); task 1892 kernel/time/timer.c timer.task = current; task 623 kernel/trace/bpf_trace.c struct task_struct *task; task 634 kernel/trace/bpf_trace.c group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID); task 668 kernel/trace/bpf_trace.c work->task = current; task 247 kernel/trace/fgraph.c ftrace_graph_get_ret_stack(struct task_struct *task, int idx) task 249 kernel/trace/fgraph.c idx = task->curr_ret_stack - idx; task 251 kernel/trace/fgraph.c if (idx >= 0 && idx <= task->curr_ret_stack) task 252 kernel/trace/fgraph.c return &task->ret_stack[idx]; task 273 kernel/trace/fgraph.c unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, task 276 kernel/trace/fgraph.c int index = task->curr_ret_stack; task 286 kernel/trace/fgraph.c if (task->ret_stack[i].retp == retp) task 287 kernel/trace/fgraph.c return task->ret_stack[i].ret; task 292 kernel/trace/fgraph.c unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, task 300 kernel/trace/fgraph.c task_idx = task->curr_ret_stack; task 302 kernel/trace/fgraph.c if (!task->ret_stack || task_idx < *idx) task 308 kernel/trace/fgraph.c return task->ret_stack[task_idx].ret; task 6438 kernel/trace/ftrace.c struct task_struct *task) task 6444 kernel/trace/ftrace.c trace_filter_add_remove_task(pid_list, self, task); task 6448 kernel/trace/ftrace.c ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) task 6454 kernel/trace/ftrace.c trace_filter_add_remove_task(pid_list, NULL, task); task 374 kernel/trace/trace.c trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) task 383 kernel/trace/trace.c return !trace_find_filtered_pid(filtered_pids, task->pid); task 400 kernel/trace/trace.c struct task_struct *task) task 412 kernel/trace/trace.c if (task->pid >= pid_list->pid_max) task 417 kernel/trace/trace.c set_bit(task->pid, pid_list->pids); task 419 kernel/trace/trace.c clear_bit(task->pid, pid_list->pids); task 2263 kernel/trace/trace.c void tracing_record_taskinfo(struct task_struct *task, int flags) task 2274 kernel/trace/trace.c done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); task 2275 kernel/trace/trace.c done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); task 2317 kernel/trace/trace.c void tracing_record_cmdline(struct task_struct *task) task 2319 kernel/trace/trace.c tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); task 2322 kernel/trace/trace.c void tracing_record_tgid(struct task_struct *task) task 2324 kernel/trace/trace.c tracing_record_taskinfo(task, TRACE_RECORD_TGID); task 770 kernel/trace/trace.h struct task_struct *task); task 773 kernel/trace/trace.h struct task_struct *task); task 504 kernel/trace/trace_events.c event_filter_pid_sched_process_exit(void *data, struct task_struct *task) task 510 kernel/trace/trace_events.c trace_filter_add_remove_task(pid_list, NULL, task); task 516 kernel/trace/trace_events.c struct task_struct *task) task 522 kernel/trace/trace_events.c trace_filter_add_remove_task(pid_list, self, task); task 568 kernel/trace/trace_events.c event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) task 580 kernel/trace/trace_events.c trace_ignore_this_task(pid_list, task)); task 584 kernel/trace/trace_events.c event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) task 2249 kernel/trace/trace_events_hist.c static inline void save_comm(char *comm, struct task_struct *task) task 2251 kernel/trace/trace_events_hist.c if (!task->pid) { task 2256 kernel/trace/trace_events_hist.c if (WARN_ON_ONCE(task->pid < 0)) { task 2261 kernel/trace/trace_events_hist.c strncpy(comm, task->comm, TASK_COMM_LEN); task 1097 kernel/trace/trace_output.c struct task_struct *task; task 1103 kernel/trace/trace_output.c task = find_task_by_vpid(field->tgid); task 1104 kernel/trace/trace_output.c if (task) task 1105 kernel/trace/trace_output.c mm = get_task_mm(task); task 366 kernel/trace/trace_sched_wakeup.c probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) task 368 kernel/trace/trace_sched_wakeup.c if (task != wakeup_task) task 65 kernel/trace/trace_syscalls.c trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) task 70 kernel/trace/trace_syscalls.c return syscall_get_nr(task, regs); task 74 kernel/trace/trace_syscalls.c trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) task 76 kernel/trace/trace_syscalls.c return syscall_get_nr(task, regs); task 1240 kernel/user_namespace.c static struct ns_common *userns_get(struct task_struct *task) task 1245 kernel/user_namespace.c user_ns = get_user_ns(__task_cred(task)->user_ns); task 122 kernel/utsname.c static struct ns_common *utsns_get(struct task_struct *task) task 127 kernel/utsname.c task_lock(task); task 128 kernel/utsname.c nsproxy = task->nsproxy; task 133 kernel/utsname.c task_unlock(task); task 842 kernel/workqueue.c wake_up_process(worker->task); task 851 kernel/workqueue.c void wq_worker_running(struct task_struct *task) task 853 kernel/workqueue.c struct worker *worker = kthread_data(task); task 869 kernel/workqueue.c void wq_worker_sleeping(struct task_struct *task) task 871 kernel/workqueue.c struct worker *next, *worker = kthread_data(task); task 905 kernel/workqueue.c wake_up_process(next->task); task 934 kernel/workqueue.c work_func_t wq_worker_last_func(struct task_struct *task) task 936 kernel/workqueue.c struct worker *worker = kthread_data(task); task 955 kernel/workqueue.c WARN_ON_ONCE(worker->task != current); task 981 kernel/workqueue.c WARN_ON_ONCE(worker->task != current); task 1852 kernel/workqueue.c set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); task 1932 kernel/workqueue.c worker->task = kthread_create_on_node(worker_thread, worker, pool->node, task 1934 kernel/workqueue.c if (IS_ERR(worker->task)) task 1937 kernel/workqueue.c set_user_nice(worker->task, pool->attrs->nice); task 1938 kernel/workqueue.c kthread_bind_mask(worker->task, pool->attrs->cpumask); task 1947 kernel/workqueue.c wake_up_process(worker->task); task 1986 kernel/workqueue.c wake_up_process(worker->task); task 2033 kernel/workqueue.c wake_up_process(wq->rescuer->task); task 2376 kernel/workqueue.c set_task_comm(worker->task, "kworker/dying"); task 2620 kernel/workqueue.c struct task_struct *task; /* purely informational */ task 2671 kernel/workqueue.c barr->task = current; task 4214 kernel/workqueue.c rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name); task 4215 kernel/workqueue.c ret = PTR_ERR_OR_ZERO(rescuer->task); task 4222 kernel/workqueue.c kthread_bind_mask(rescuer->task, cpu_possible_mask); task 4223 kernel/workqueue.c wake_up_process(rescuer->task); task 4356 kernel/workqueue.c kthread_stop(rescuer->task); task 4596 kernel/workqueue.c void print_worker_info(const char *log_lvl, struct task_struct *task) task 4605 kernel/workqueue.c if (!(task->flags & PF_WQ_WORKER)) task 4612 kernel/workqueue.c worker = kthread_probe_data(task); task 4648 kernel/workqueue.c task_pid_nr(barr->task)); task 4684 kernel/workqueue.c task_pid_nr(worker->task), task 4787 kernel/workqueue.c task_pid_nr(pool->manager->task)); task 4790 kernel/workqueue.c task_pid_nr(worker->task)); task 4808 kernel/workqueue.c void wq_worker_comm(char *buf, size_t size, struct task_struct *task) task 4813 kernel/workqueue.c off = strscpy(buf, task->comm, size); task 4820 kernel/workqueue.c if (task->flags & PF_WQ_WORKER) { task 4821 kernel/workqueue.c struct worker *worker = kthread_data(task); task 4936 kernel/workqueue.c WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, task 4955 kernel/workqueue.c wake_up_process(worker->task); task 5006 kernel/workqueue.c WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); task 38 kernel/workqueue_internal.h struct task_struct *task; /* I: worker task */ task 76 kernel/workqueue_internal.h void wq_worker_running(struct task_struct *task); task 77 kernel/workqueue_internal.h void wq_worker_sleeping(struct task_struct *task); task 78 kernel/workqueue_internal.h work_func_t wq_worker_last_func(struct task_struct *task); task 58 lib/fault-inject.c static bool fail_task(struct fault_attr *attr, struct task_struct *task) task 60 lib/fault-inject.c return in_task() && task->make_it_fail; task 17 lib/is_single_threaded.c struct task_struct *task = current; task 18 lib/is_single_threaded.c struct mm_struct *mm = task->mm; task 22 lib/is_single_threaded.c if (atomic_read(&task->signal->live) != 1) task 33 lib/is_single_threaded.c if (unlikely(p == task->group_leader)) task 597 lib/string_helpers.c char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp) task 606 lib/string_helpers.c res = get_cmdline(task, buffer, PAGE_SIZE - 1); task 42 lib/test_firmware.c struct task_struct *task; task 646 lib/test_firmware.c req->task = NULL; task 685 lib/test_firmware.c req->task = kthread_run(test_fw_run_batch_request, req, task 687 lib/test_firmware.c if (!req->task || IS_ERR(req->task)) { task 689 lib/test_firmware.c req->task = NULL; task 708 lib/test_firmware.c if (req->task || req->sent) task 75 lib/test_rhashtable.c struct task_struct *task; task 784 lib/test_rhashtable.c tdata[i].task = kthread_run(threadfunc, &tdata[i], task 786 lib/test_rhashtable.c if (IS_ERR(tdata[i].task)) { task 799 lib/test_rhashtable.c if (IS_ERR(tdata[i].task)) task 801 lib/test_rhashtable.c if ((err = kthread_stop(tdata[i].task))) { task 359 lib/test_vmalloc.c struct task_struct *task; task 485 lib/test_vmalloc.c t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu); task 487 lib/test_vmalloc.c if (!IS_ERR(t->task)) task 513 lib/test_vmalloc.c if (!IS_ERR(t->task)) task 514 lib/test_vmalloc.c kthread_stop(t->task); task 43 mm/hugetlb_cgroup.c struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task) task 45 mm/hugetlb_cgroup.c return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id)); task 172 mm/kasan/common.c static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) task 174 mm/kasan/common.c void *base = task_stack_page(task); task 181 mm/kasan/common.c void kasan_unpoison_task_stack(struct task_struct *task) task 183 mm/kasan/common.c __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); task 1224 mm/memcontrol.c struct task_struct *task; task 1227 mm/memcontrol.c while (!ret && (task = css_task_iter_next(&it))) task 1228 mm/memcontrol.c ret = fn(task, arg); task 161 mm/memory.c struct task_struct *task = current; task 163 mm/memory.c if (likely(task->mm == mm)) task 164 mm/memory.c task->rss_stat.count[member] += val; task 173 mm/memory.c static void check_sync_rss_stat(struct task_struct *task) task 175 mm/memory.c if (unlikely(task != current)) task 177 mm/memory.c if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) task 178 mm/memory.c sync_mm_rss(task->mm); task 185 mm/memory.c static void check_sync_rss_stat(struct task_struct *task) task 1464 mm/mempolicy.c struct task_struct *task; task 1487 mm/mempolicy.c task = pid ? find_task_by_vpid(pid) : current; task 1488 mm/mempolicy.c if (!task) { task 1493 mm/mempolicy.c get_task_struct(task); task 1501 mm/mempolicy.c if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { task 1508 mm/mempolicy.c task_nodes = cpuset_mems_allowed(task); task 1520 mm/mempolicy.c err = security_task_movememory(task); task 1524 mm/mempolicy.c mm = get_task_mm(task); task 1525 mm/mempolicy.c put_task_struct(task); task 1542 mm/mempolicy.c put_task_struct(task); task 2457 mm/mempolicy.c void mpol_put_task_policy(struct task_struct *task) task 2461 mm/mempolicy.c task_lock(task); task 2462 mm/mempolicy.c pol = task->mempolicy; task 2463 mm/mempolicy.c task->mempolicy = NULL; task 2464 mm/mempolicy.c task_unlock(task); task 1791 mm/migrate.c struct task_struct *task; task 1805 mm/migrate.c task = pid ? find_task_by_vpid(pid) : current; task 1806 mm/migrate.c if (!task) { task 1810 mm/migrate.c get_task_struct(task); task 1816 mm/migrate.c if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { task 1823 mm/migrate.c err = security_task_movememory(task); task 1827 mm/migrate.c task_nodes = cpuset_mems_allowed(task); task 1828 mm/migrate.c mm = get_task_mm(task); task 1829 mm/migrate.c put_task_struct(task); task 1844 mm/migrate.c put_task_struct(task); task 309 mm/oom_kill.c static int oom_evaluate_task(struct task_struct *task, void *arg) task 314 mm/oom_kill.c if (oom_unkillable_task(task)) task 318 mm/oom_kill.c if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) task 327 mm/oom_kill.c if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { task 328 mm/oom_kill.c if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) task 337 mm/oom_kill.c if (oom_task_origin(task)) { task 342 mm/oom_kill.c points = oom_badness(task, oc->totalpages); task 349 mm/oom_kill.c get_task_struct(task); task 350 mm/oom_kill.c oc->chosen = task; task 383 mm/oom_kill.c struct task_struct *task; task 392 mm/oom_kill.c task = find_lock_task_mm(p); task 393 mm/oom_kill.c if (!task) { task 403 mm/oom_kill.c task->pid, from_kuid(&init_user_ns, task_uid(task)), task 404 mm/oom_kill.c task->tgid, task->mm->total_vm, get_mm_rss(task->mm), task 405 mm/oom_kill.c mm_pgtables_bytes(task->mm), task 406 mm/oom_kill.c get_mm_counter(task->mm, MM_SWAPENTS), task 407 mm/oom_kill.c task->signal->oom_score_adj, task->comm); task 408 mm/oom_kill.c task_unlock(task); task 781 mm/oom_kill.c static inline bool __task_will_free_mem(struct task_struct *task) task 783 mm/oom_kill.c struct signal_struct *sig = task->signal; task 796 mm/oom_kill.c if (thread_group_empty(task) && (task->flags & PF_EXITING)) task 809 mm/oom_kill.c static bool task_will_free_mem(struct task_struct *task) task 811 mm/oom_kill.c struct mm_struct *mm = task->mm; task 823 mm/oom_kill.c if (!__task_will_free_mem(task)) task 845 mm/oom_kill.c if (same_thread_group(task, p)) task 941 mm/oom_kill.c static int oom_kill_memcg_member(struct task_struct *task, void *message) task 943 mm/oom_kill.c if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN && task 944 mm/oom_kill.c !is_global_init(task)) { task 945 mm/oom_kill.c get_task_struct(task); task 946 mm/oom_kill.c __oom_kill_process(task, message); task 79 mm/process_vm_access.c struct task_struct *task, task 109 mm/process_vm_access.c pages = get_user_pages_remote(task, mm, pa, pages, flags, task 156 mm/process_vm_access.c struct task_struct *task; task 197 mm/process_vm_access.c task = find_get_task_by_vpid(pid); task 198 mm/process_vm_access.c if (!task) { task 203 mm/process_vm_access.c mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); task 218 mm/process_vm_access.c iter, process_pages, mm, task, vm_write); task 232 mm/process_vm_access.c put_task_struct(task); task 425 mm/util.c struct task_struct *task, bool bypass_rlim) task 435 mm/util.c limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; task 446 mm/util.c pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, task 448 mm/util.c locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), task 861 mm/util.c int get_cmdline(struct task_struct *task, char *buffer, int buflen) task 865 mm/util.c struct mm_struct *mm = get_task_mm(task); task 884 mm/util.c res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); task 898 mm/util.c res += access_process_vm(task, env_start, task 174 mm/vmscan.c static void set_task_reclaim_state(struct task_struct *task, task 178 mm/vmscan.c WARN_ON_ONCE(rs && task->reclaim_state); task 181 mm/vmscan.c WARN_ON_ONCE(!rs && !task->reclaim_state); task 183 mm/vmscan.c task->reclaim_state = rs; task 152 net/bluetooth/bnep/bnep.h struct task_struct *task; task 630 net/bluetooth/bnep/core.c s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name); task 631 net/bluetooth/bnep/core.c if (IS_ERR(s->task)) { task 636 net/bluetooth/bnep/core.c err = PTR_ERR(s->task); task 376 net/bluetooth/cmtp/capi.c wake_up_process(session->task); task 85 net/bluetooth/cmtp/cmtp.h struct task_struct *task; task 384 net/bluetooth/cmtp/core.c session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d", task 386 net/bluetooth/cmtp/core.c if (IS_ERR(session->task)) { task 388 net/bluetooth/cmtp/core.c err = PTR_ERR(session->task); task 1060 net/bluetooth/hidp/core.c session->task = kthread_run(hidp_session_thread, session, task 1062 net/bluetooth/hidp/core.c if (IS_ERR(session->task)) task 1063 net/bluetooth/hidp/core.c return PTR_ERR(session->task); task 145 net/bluetooth/hidp/hidp.h struct task_struct *task; task 1334 net/core/net_namespace.c static struct ns_common *netns_get(struct task_struct *task) task 1339 net/core/net_namespace.c task_lock(task); task 1340 net/core/net_namespace.c nsproxy = task->nsproxy; task 1343 net/core/net_namespace.c task_unlock(task); task 2177 net/core/pktgen.c if (likely(t.task)) task 2181 net/core/pktgen.c } while (t.task && pkt_dev->running && !signal_pending(current)); task 198 net/netfilter/ipvs/ip_vs_sync.c struct task_struct *task; task 381 net/netfilter/ipvs/ip_vs_sync.c wake_up_process(ipvs->master_tinfo[id].task); task 1646 net/netfilter/ipvs/ip_vs_sync.c wake_up_process(ipvs->master_tinfo[id].task); task 1753 net/netfilter/ipvs/ip_vs_sync.c struct task_struct *task; task 1878 net/netfilter/ipvs/ip_vs_sync.c task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); task 1879 net/netfilter/ipvs/ip_vs_sync.c if (IS_ERR(task)) { task 1880 net/netfilter/ipvs/ip_vs_sync.c result = PTR_ERR(task); task 1883 net/netfilter/ipvs/ip_vs_sync.c tinfo->task = task; task 1909 net/netfilter/ipvs/ip_vs_sync.c if (tinfo->task) task 1910 net/netfilter/ipvs/ip_vs_sync.c kthread_stop(tinfo->task); task 1977 net/netfilter/ipvs/ip_vs_sync.c task_pid_nr(tinfo->task)); task 1979 net/netfilter/ipvs/ip_vs_sync.c ret = kthread_stop(tinfo->task); task 1999 net/netfilter/ipvs/ip_vs_sync.c task_pid_nr(tinfo->task)); task 2000 net/netfilter/ipvs/ip_vs_sync.c ret = kthread_stop(tinfo->task); task 68 net/sched/sch_atm.c struct tasklet_struct task; /* dequeue tasklet */ task 178 net/sched/sch_atm.c tasklet_schedule(&p->task); task 458 net/sched/sch_atm.c tasklet_schedule(&p->task); task 525 net/sched/sch_atm.c tasklet_schedule(&p->task); task 566 net/sched/sch_atm.c tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); task 597 net/sched/sch_atm.c tasklet_kill(&p->task); task 659 net/sunrpc/auth.c rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) task 661 net/sunrpc/auth.c struct rpc_auth *auth = task->tk_client->cl_auth; task 673 net/sunrpc/auth.c rpcauth_bind_machine_cred(struct rpc_task *task, int lookupflags) task 675 net/sunrpc/auth.c struct rpc_auth *auth = task->tk_client->cl_auth; task 677 net/sunrpc/auth.c .principal = task->tk_client->cl_principal, task 687 net/sunrpc/auth.c rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags) task 689 net/sunrpc/auth.c struct rpc_auth *auth = task->tk_client->cl_auth; task 695 net/sunrpc/auth.c rpcauth_bindcred(struct rpc_task *task, const struct cred *cred, int flags) task 697 net/sunrpc/auth.c struct rpc_rqst *req = task->tk_rqstp; task 700 net/sunrpc/auth.c struct rpc_auth *auth = task->tk_client->cl_auth; task 707 net/sunrpc/auth.c if (task->tk_op_cred) task 709 net/sunrpc/auth.c new = get_rpccred(task->tk_op_cred); task 713 net/sunrpc/auth.c new = rpcauth_bind_machine_cred(task, lookupflags); task 719 net/sunrpc/auth.c new = rpcauth_bind_root_cred(task, lookupflags); task 723 net/sunrpc/auth.c new = rpcauth_bind_new_cred(task, lookupflags); task 772 net/sunrpc/auth.c int rpcauth_marshcred(struct rpc_task *task, struct xdr_stream *xdr) task 774 net/sunrpc/auth.c const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops; task 776 net/sunrpc/auth.c return ops->crmarshal(task, xdr); task 787 net/sunrpc/auth.c int rpcauth_wrap_req_encode(struct rpc_task *task, struct xdr_stream *xdr) task 789 net/sunrpc/auth.c kxdreproc_t encode = task->tk_msg.rpc_proc->p_encode; task 791 net/sunrpc/auth.c encode(task->tk_rqstp, xdr, task->tk_msg.rpc_argp); task 805 net/sunrpc/auth.c int rpcauth_wrap_req(struct rpc_task *task, struct xdr_stream *xdr) task 807 net/sunrpc/auth.c const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops; task 809 net/sunrpc/auth.c return ops->crwrap_req(task, xdr); task 822 net/sunrpc/auth.c rpcauth_checkverf(struct rpc_task *task, struct xdr_stream *xdr) task 824 net/sunrpc/auth.c const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops; task 826 net/sunrpc/auth.c return ops->crvalidate(task, xdr); task 837 net/sunrpc/auth.c rpcauth_unwrap_resp_decode(struct rpc_task *task, struct xdr_stream *xdr) task 839 net/sunrpc/auth.c kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; task 841 net/sunrpc/auth.c return decode(task->tk_rqstp, xdr, task->tk_msg.rpc_resp); task 853 net/sunrpc/auth.c rpcauth_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr) task 855 net/sunrpc/auth.c const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops; task 857 net/sunrpc/auth.c return ops->crunwrap_resp(task, xdr); task 861 net/sunrpc/auth.c rpcauth_xmit_need_reencode(struct rpc_task *task) task 863 net/sunrpc/auth.c struct rpc_cred *cred = task->tk_rqstp->rq_cred; task 867 net/sunrpc/auth.c return cred->cr_ops->crneed_reencode(task); task 871 net/sunrpc/auth.c rpcauth_refreshcred(struct rpc_task *task) task 876 net/sunrpc/auth.c cred = task->tk_rqstp->rq_cred; task 878 net/sunrpc/auth.c err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags); task 881 net/sunrpc/auth.c cred = task->tk_rqstp->rq_cred; task 884 net/sunrpc/auth.c err = cred->cr_ops->crrefresh(task); task 887 net/sunrpc/auth.c task->tk_status = err; task 892 net/sunrpc/auth.c rpcauth_invalcred(struct rpc_task *task) task 894 net/sunrpc/auth.c struct rpc_cred *cred = task->tk_rqstp->rq_cred; task 901 net/sunrpc/auth.c rpcauth_uptodatecred(struct rpc_task *task) task 903 net/sunrpc/auth.c struct rpc_cred *cred = task->tk_rqstp->rq_cred; task 402 net/sunrpc/auth_gss/auth_gss.c gss_upcall_callback(struct rpc_task *task) task 404 net/sunrpc/auth_gss/auth_gss.c struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, task 412 net/sunrpc/auth_gss/auth_gss.c task->tk_status = gss_msg->msg.errno; task 605 net/sunrpc/auth_gss/auth_gss.c gss_refresh_upcall(struct rpc_task *task) task 607 net/sunrpc/auth_gss/auth_gss.c struct rpc_cred *cred = task->tk_rqstp->rq_cred; task 622 net/sunrpc/auth_gss/auth_gss.c task, NULL, jiffies + (15 * HZ)); task 633 net/sunrpc/auth_gss/auth_gss.c rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); task 638 net/sunrpc/auth_gss/auth_gss.c rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); task 1281 net/sunrpc/auth_gss/auth_gss.c struct rpc_task *task; task 1287 net/sunrpc/auth_gss/auth_gss.c task = rpc_call_null(gss_auth->client, &new->gc_base, task 1289 net/sunrpc/auth_gss/auth_gss.c if (!IS_ERR(task)) task 1290 net/sunrpc/auth_gss/auth_gss.c rpc_put_task(task); task 1526 net/sunrpc/auth_gss/auth_gss.c static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr) task 1528 net/sunrpc/auth_gss/auth_gss.c struct rpc_rqst *req = task->tk_rqstp; task 1554 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_seqno(task); task 1595 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_get_mic(task, maj_stat); task 1600 net/sunrpc/auth_gss/auth_gss.c static int gss_renew_cred(struct rpc_task *task) task 1602 net/sunrpc/auth_gss/auth_gss.c struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; task 1616 net/sunrpc/auth_gss/auth_gss.c task->tk_rqstp->rq_cred = new; task 1642 net/sunrpc/auth_gss/auth_gss.c gss_refresh(struct rpc_task *task) task 1644 net/sunrpc/auth_gss/auth_gss.c struct rpc_cred *cred = task->tk_rqstp->rq_cred; task 1652 net/sunrpc/auth_gss/auth_gss.c ret = gss_renew_cred(task); task 1655 net/sunrpc/auth_gss/auth_gss.c cred = task->tk_rqstp->rq_cred; task 1659 net/sunrpc/auth_gss/auth_gss.c ret = gss_refresh_upcall(task); task 1666 net/sunrpc/auth_gss/auth_gss.c gss_refresh_null(struct rpc_task *task) task 1672 net/sunrpc/auth_gss/auth_gss.c gss_validate(struct rpc_task *task, struct xdr_stream *xdr) task 1674 net/sunrpc/auth_gss/auth_gss.c struct rpc_cred *cred = task->tk_rqstp->rq_cred; task 1698 net/sunrpc/auth_gss/auth_gss.c *seq = cpu_to_be32(task->tk_rqstp->rq_seqno); task 1723 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_verify_mic(task, maj_stat); task 1729 net/sunrpc/auth_gss/auth_gss.c struct rpc_task *task, struct xdr_stream *xdr) task 1731 net/sunrpc/auth_gss/auth_gss.c struct rpc_rqst *rqstp = task->tk_rqstp; task 1743 net/sunrpc/auth_gss/auth_gss.c if (rpcauth_wrap_req_encode(task, xdr)) task 1768 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_get_mic(task, maj_stat); task 1821 net/sunrpc/auth_gss/auth_gss.c struct rpc_task *task, struct xdr_stream *xdr) task 1823 net/sunrpc/auth_gss/auth_gss.c struct rpc_rqst *rqstp = task->tk_rqstp; task 1839 net/sunrpc/auth_gss/auth_gss.c if (rpcauth_wrap_req_encode(task, xdr)) task 1890 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_wrap(task, maj_stat); task 1894 net/sunrpc/auth_gss/auth_gss.c static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr) task 1896 net/sunrpc/auth_gss/auth_gss.c struct rpc_cred *cred = task->tk_rqstp->rq_cred; task 1907 net/sunrpc/auth_gss/auth_gss.c status = rpcauth_wrap_req_encode(task, xdr); task 1912 net/sunrpc/auth_gss/auth_gss.c status = rpcauth_wrap_req_encode(task, xdr); task 1915 net/sunrpc/auth_gss/auth_gss.c status = gss_wrap_req_integ(cred, ctx, task, xdr); task 1918 net/sunrpc/auth_gss/auth_gss.c status = gss_wrap_req_priv(cred, ctx, task, xdr); task 1952 net/sunrpc/auth_gss/auth_gss.c gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred, task 2015 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_unwrap_failed(task); task 2018 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno); task 2021 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_verify_mic(task, maj_stat); task 2026 net/sunrpc/auth_gss/auth_gss.c gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, task 2064 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_unwrap_failed(task); task 2067 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p)); task 2070 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_unwrap(task, maj_stat); task 2081 net/sunrpc/auth_gss/auth_gss.c gss_xmit_need_reencode(struct rpc_task *task) task 2083 net/sunrpc/auth_gss/auth_gss.c struct rpc_rqst *req = task->tk_rqstp; task 2113 net/sunrpc/auth_gss/auth_gss.c trace_rpcgss_need_reencode(task, seq_xmit, ret); task 2118 net/sunrpc/auth_gss/auth_gss.c gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr) task 2120 net/sunrpc/auth_gss/auth_gss.c struct rpc_rqst *rqstp = task->tk_rqstp; task 2134 net/sunrpc/auth_gss/auth_gss.c status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr); task 2137 net/sunrpc/auth_gss/auth_gss.c status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr); task 2144 net/sunrpc/auth_gss/auth_gss.c status = rpcauth_unwrap_resp_decode(task, xdr); task 63 net/sunrpc/auth_null.c nul_marshal(struct rpc_task *task, struct xdr_stream *xdr) task 83 net/sunrpc/auth_null.c nul_refresh(struct rpc_task *task) task 85 net/sunrpc/auth_null.c set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags); task 90 net/sunrpc/auth_null.c nul_validate(struct rpc_task *task, struct xdr_stream *xdr) task 103 net/sunrpc/auth_unix.c unx_marshal(struct rpc_task *task, struct xdr_stream *xdr) task 105 net/sunrpc/auth_unix.c struct rpc_clnt *clnt = task->tk_client; task 106 net/sunrpc/auth_unix.c struct rpc_cred *cred = task->tk_rqstp->rq_cred; task 158 net/sunrpc/auth_unix.c unx_refresh(struct rpc_task *task) task 160 net/sunrpc/auth_unix.c set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags); task 165 net/sunrpc/auth_unix.c unx_validate(struct rpc_task *task, struct xdr_stream *xdr) task 167 net/sunrpc/auth_unix.c struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; task 61 net/sunrpc/clnt.c static void call_start(struct rpc_task *task); task 62 net/sunrpc/clnt.c static void call_reserve(struct rpc_task *task); task 63 net/sunrpc/clnt.c static void call_reserveresult(struct rpc_task *task); task 64 net/sunrpc/clnt.c static void call_allocate(struct rpc_task *task); task 65 net/sunrpc/clnt.c static void call_encode(struct rpc_task *task); task 66 net/sunrpc/clnt.c static void call_decode(struct rpc_task *task); task 67 net/sunrpc/clnt.c static void call_bind(struct rpc_task *task); task 68 net/sunrpc/clnt.c static void call_bind_status(struct rpc_task *task); task 69 net/sunrpc/clnt.c static void call_transmit(struct rpc_task *task); task 70 net/sunrpc/clnt.c static void call_status(struct rpc_task *task); task 71 net/sunrpc/clnt.c static void call_transmit_status(struct rpc_task *task); task 72 net/sunrpc/clnt.c static void call_refresh(struct rpc_task *task); task 73 net/sunrpc/clnt.c static void call_refreshresult(struct rpc_task *task); task 74 net/sunrpc/clnt.c static void call_connect(struct rpc_task *task); task 75 net/sunrpc/clnt.c static void call_connect_status(struct rpc_task *task); task 77 net/sunrpc/clnt.c static int rpc_encode_header(struct rpc_task *task, task 79 net/sunrpc/clnt.c static int rpc_decode_header(struct rpc_task *task, task 82 net/sunrpc/clnt.c static void rpc_check_timeout(struct rpc_task *task); task 1011 net/sunrpc/clnt.c void rpc_task_release_transport(struct rpc_task *task) task 1013 net/sunrpc/clnt.c struct rpc_xprt *xprt = task->tk_xprt; task 1016 net/sunrpc/clnt.c task->tk_xprt = NULL; task 1017 net/sunrpc/clnt.c if (task->tk_client) task 1018 net/sunrpc/clnt.c rpc_task_release_xprt(task->tk_client, xprt); task 1025 net/sunrpc/clnt.c void rpc_task_release_client(struct rpc_task *task) task 1027 net/sunrpc/clnt.c struct rpc_clnt *clnt = task->tk_client; task 1029 net/sunrpc/clnt.c rpc_task_release_transport(task); task 1033 net/sunrpc/clnt.c list_del(&task->tk_task); task 1035 net/sunrpc/clnt.c task->tk_client = NULL; task 1059 net/sunrpc/clnt.c void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) task 1061 net/sunrpc/clnt.c if (task->tk_xprt) task 1063 net/sunrpc/clnt.c if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) task 1064 net/sunrpc/clnt.c task->tk_xprt = rpc_task_get_first_xprt(clnt); task 1066 net/sunrpc/clnt.c task->tk_xprt = rpc_task_get_next_xprt(clnt); task 1070 net/sunrpc/clnt.c void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) task 1074 net/sunrpc/clnt.c rpc_task_set_transport(task, clnt); task 1075 net/sunrpc/clnt.c task->tk_client = clnt; task 1078 net/sunrpc/clnt.c task->tk_flags |= RPC_TASK_SOFT; task 1080 net/sunrpc/clnt.c task->tk_flags |= RPC_TASK_TIMEOUT; task 1082 net/sunrpc/clnt.c task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; task 1084 net/sunrpc/clnt.c task->tk_flags |= RPC_TASK_SWAPPER; task 1087 net/sunrpc/clnt.c list_add_tail(&task->tk_task, &clnt->cl_tasks); task 1093 net/sunrpc/clnt.c rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) task 1096 net/sunrpc/clnt.c task->tk_msg.rpc_proc = msg->rpc_proc; task 1097 net/sunrpc/clnt.c task->tk_msg.rpc_argp = msg->rpc_argp; task 1098 net/sunrpc/clnt.c task->tk_msg.rpc_resp = msg->rpc_resp; task 1100 net/sunrpc/clnt.c task->tk_msg.rpc_cred = get_cred(msg->rpc_cred); task 1108 net/sunrpc/clnt.c rpc_default_callback(struct rpc_task *task, void *data) task 1122 net/sunrpc/clnt.c struct rpc_task *task; task 1124 net/sunrpc/clnt.c task = rpc_new_task(task_setup_data); task 1126 net/sunrpc/clnt.c rpc_task_set_client(task, task_setup_data->rpc_client); task 1127 net/sunrpc/clnt.c rpc_task_set_rpc_message(task, task_setup_data->rpc_message); task 1129 net/sunrpc/clnt.c if (task->tk_action == NULL) task 1130 net/sunrpc/clnt.c rpc_call_start(task); task 1132 net/sunrpc/clnt.c atomic_inc(&task->tk_count); task 1133 net/sunrpc/clnt.c rpc_execute(task); task 1134 net/sunrpc/clnt.c return task; task 1146 net/sunrpc/clnt.c struct rpc_task *task; task 1162 net/sunrpc/clnt.c task = rpc_run_task(&task_setup_data); task 1163 net/sunrpc/clnt.c if (IS_ERR(task)) task 1164 net/sunrpc/clnt.c return PTR_ERR(task); task 1165 net/sunrpc/clnt.c status = task->tk_status; task 1166 net/sunrpc/clnt.c rpc_put_task(task); task 1183 net/sunrpc/clnt.c struct rpc_task *task; task 1192 net/sunrpc/clnt.c task = rpc_run_task(&task_setup_data); task 1193 net/sunrpc/clnt.c if (IS_ERR(task)) task 1194 net/sunrpc/clnt.c return PTR_ERR(task); task 1195 net/sunrpc/clnt.c rpc_put_task(task); task 1201 net/sunrpc/clnt.c static void call_bc_encode(struct rpc_task *task); task 1210 net/sunrpc/clnt.c struct rpc_task *task; task 1221 net/sunrpc/clnt.c task = rpc_new_task(&task_setup_data); task 1222 net/sunrpc/clnt.c xprt_init_bc_request(req, task); task 1224 net/sunrpc/clnt.c task->tk_action = call_bc_encode; task 1225 net/sunrpc/clnt.c atomic_inc(&task->tk_count); task 1226 net/sunrpc/clnt.c WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); task 1227 net/sunrpc/clnt.c rpc_execute(task); task 1229 net/sunrpc/clnt.c dprintk("RPC: rpc_run_bc_task: task= %p\n", task); task 1230 net/sunrpc/clnt.c return task; task 1258 net/sunrpc/clnt.c rpc_call_start(struct rpc_task *task) task 1260 net/sunrpc/clnt.c task->tk_action = call_start; task 1558 net/sunrpc/clnt.c __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) task 1560 net/sunrpc/clnt.c task->tk_status = 0; task 1561 net/sunrpc/clnt.c task->tk_rpc_status = 0; task 1562 net/sunrpc/clnt.c task->tk_action = action; task 1571 net/sunrpc/clnt.c rpc_restart_call(struct rpc_task *task) task 1573 net/sunrpc/clnt.c return __rpc_restart_call(task, call_start); task 1582 net/sunrpc/clnt.c rpc_restart_call_prepare(struct rpc_task *task) task 1584 net/sunrpc/clnt.c if (task->tk_ops->rpc_call_prepare != NULL) task 1585 net/sunrpc/clnt.c return __rpc_restart_call(task, rpc_prepare_task); task 1586 net/sunrpc/clnt.c return rpc_restart_call(task); task 1591 net/sunrpc/clnt.c *rpc_proc_name(const struct rpc_task *task) task 1593 net/sunrpc/clnt.c const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; task 1605 net/sunrpc/clnt.c __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) task 1607 net/sunrpc/clnt.c task->tk_rpc_status = rpc_status; task 1608 net/sunrpc/clnt.c rpc_exit(task, tk_status); task 1612 net/sunrpc/clnt.c rpc_call_rpcerror(struct rpc_task *task, int status) task 1614 net/sunrpc/clnt.c __rpc_call_rpcerror(task, status, status); task 1624 net/sunrpc/clnt.c call_start(struct rpc_task *task) task 1626 net/sunrpc/clnt.c struct rpc_clnt *clnt = task->tk_client; task 1627 net/sunrpc/clnt.c int idx = task->tk_msg.rpc_proc->p_statidx; task 1629 net/sunrpc/clnt.c trace_rpc_request(task); task 1630 net/sunrpc/clnt.c dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid, task 1632 net/sunrpc/clnt.c rpc_proc_name(task), task 1633 net/sunrpc/clnt.c (RPC_IS_ASYNC(task) ? "async" : "sync")); task 1639 net/sunrpc/clnt.c task->tk_action = call_reserve; task 1640 net/sunrpc/clnt.c rpc_task_set_transport(task, clnt); task 1647 net/sunrpc/clnt.c call_reserve(struct rpc_task *task) task 1649 net/sunrpc/clnt.c dprint_status(task); task 1651 net/sunrpc/clnt.c task->tk_status = 0; task 1652 net/sunrpc/clnt.c task->tk_action = call_reserveresult; task 1653 net/sunrpc/clnt.c xprt_reserve(task); task 1656 net/sunrpc/clnt.c static void call_retry_reserve(struct rpc_task *task); task 1662 net/sunrpc/clnt.c call_reserveresult(struct rpc_task *task) task 1664 net/sunrpc/clnt.c int status = task->tk_status; task 1666 net/sunrpc/clnt.c dprint_status(task); task 1672 net/sunrpc/clnt.c task->tk_status = 0; task 1674 net/sunrpc/clnt.c if (task->tk_rqstp) { task 1675 net/sunrpc/clnt.c task->tk_action = call_refresh; task 1681 net/sunrpc/clnt.c rpc_call_rpcerror(task, -EIO); task 1689 net/sunrpc/clnt.c if (task->tk_rqstp) { task 1692 net/sunrpc/clnt.c xprt_release(task); task 1697 net/sunrpc/clnt.c rpc_delay(task, HZ >> 2); task 1700 net/sunrpc/clnt.c task->tk_action = call_retry_reserve; task 1709 net/sunrpc/clnt.c rpc_call_rpcerror(task, status); task 1716 net/sunrpc/clnt.c call_retry_reserve(struct rpc_task *task) task 1718 net/sunrpc/clnt.c dprint_status(task); task 1720 net/sunrpc/clnt.c task->tk_status = 0; task 1721 net/sunrpc/clnt.c task->tk_action = call_reserveresult; task 1722 net/sunrpc/clnt.c xprt_retry_reserve(task); task 1729 net/sunrpc/clnt.c call_refresh(struct rpc_task *task) task 1731 net/sunrpc/clnt.c dprint_status(task); task 1733 net/sunrpc/clnt.c task->tk_action = call_refreshresult; task 1734 net/sunrpc/clnt.c task->tk_status = 0; task 1735 net/sunrpc/clnt.c task->tk_client->cl_stats->rpcauthrefresh++; task 1736 net/sunrpc/clnt.c rpcauth_refreshcred(task); task 1743 net/sunrpc/clnt.c call_refreshresult(struct rpc_task *task) task 1745 net/sunrpc/clnt.c int status = task->tk_status; task 1747 net/sunrpc/clnt.c dprint_status(task); task 1749 net/sunrpc/clnt.c task->tk_status = 0; task 1750 net/sunrpc/clnt.c task->tk_action = call_refresh; task 1753 net/sunrpc/clnt.c if (rpcauth_uptodatecred(task)) { task 1754 net/sunrpc/clnt.c task->tk_action = call_allocate; task 1762 net/sunrpc/clnt.c rpc_delay(task, 3*HZ); task 1768 net/sunrpc/clnt.c if (!task->tk_cred_retry) task 1770 net/sunrpc/clnt.c task->tk_cred_retry--; task 1772 net/sunrpc/clnt.c task->tk_pid, __func__); task 1776 net/sunrpc/clnt.c task->tk_pid, __func__, status); task 1777 net/sunrpc/clnt.c rpc_call_rpcerror(task, status); task 1785 net/sunrpc/clnt.c call_allocate(struct rpc_task *task) task 1787 net/sunrpc/clnt.c const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; task 1788 net/sunrpc/clnt.c struct rpc_rqst *req = task->tk_rqstp; task 1790 net/sunrpc/clnt.c const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; task 1793 net/sunrpc/clnt.c dprint_status(task); task 1795 net/sunrpc/clnt.c task->tk_status = 0; task 1796 net/sunrpc/clnt.c task->tk_action = call_encode; task 1823 net/sunrpc/clnt.c status = xprt->ops->buf_alloc(task); task 1828 net/sunrpc/clnt.c rpc_call_rpcerror(task, status); task 1832 net/sunrpc/clnt.c dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); task 1834 net/sunrpc/clnt.c if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { task 1835 net/sunrpc/clnt.c task->tk_action = call_allocate; task 1836 net/sunrpc/clnt.c rpc_delay(task, HZ>>4); task 1840 net/sunrpc/clnt.c rpc_call_rpcerror(task, -ERESTARTSYS); task 1844 net/sunrpc/clnt.c rpc_task_need_encode(struct rpc_task *task) task 1846 net/sunrpc/clnt.c return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && task 1847 net/sunrpc/clnt.c (!(task->tk_flags & RPC_TASK_SENT) || task 1848 net/sunrpc/clnt.c !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || task 1849 net/sunrpc/clnt.c xprt_request_need_retransmit(task)); task 1853 net/sunrpc/clnt.c rpc_xdr_encode(struct rpc_task *task) task 1855 net/sunrpc/clnt.c struct rpc_rqst *req = task->tk_rqstp; task 1870 net/sunrpc/clnt.c if (rpc_encode_header(task, &xdr)) task 1873 net/sunrpc/clnt.c task->tk_status = rpcauth_wrap_req(task, &xdr); task 1880 net/sunrpc/clnt.c call_encode(struct rpc_task *task) task 1882 net/sunrpc/clnt.c if (!rpc_task_need_encode(task)) task 1884 net/sunrpc/clnt.c dprint_status(task); task 1886 net/sunrpc/clnt.c xprt_request_dequeue_xprt(task); task 1888 net/sunrpc/clnt.c rpc_xdr_encode(task); task 1890 net/sunrpc/clnt.c if (task->tk_status != 0) { task 1892 net/sunrpc/clnt.c switch (task->tk_status) { task 1895 net/sunrpc/clnt.c rpc_delay(task, HZ >> 4); task 1898 net/sunrpc/clnt.c if (!task->tk_cred_retry) { task 1899 net/sunrpc/clnt.c rpc_exit(task, task->tk_status); task 1901 net/sunrpc/clnt.c task->tk_action = call_refresh; task 1902 net/sunrpc/clnt.c task->tk_cred_retry--; task 1904 net/sunrpc/clnt.c task->tk_pid, __func__); task 1908 net/sunrpc/clnt.c rpc_call_rpcerror(task, task->tk_status); task 1914 net/sunrpc/clnt.c if (rpc_reply_expected(task)) task 1915 net/sunrpc/clnt.c xprt_request_enqueue_receive(task); task 1916 net/sunrpc/clnt.c xprt_request_enqueue_transmit(task); task 1918 net/sunrpc/clnt.c task->tk_action = call_transmit; task 1920 net/sunrpc/clnt.c if (!xprt_bound(task->tk_xprt)) task 1921 net/sunrpc/clnt.c task->tk_action = call_bind; task 1922 net/sunrpc/clnt.c else if (!xprt_connected(task->tk_xprt)) task 1923 net/sunrpc/clnt.c task->tk_action = call_connect; task 1931 net/sunrpc/clnt.c rpc_task_transmitted(struct rpc_task *task) task 1933 net/sunrpc/clnt.c return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); task 1937 net/sunrpc/clnt.c rpc_task_handle_transmitted(struct rpc_task *task) task 1939 net/sunrpc/clnt.c xprt_end_transmit(task); task 1940 net/sunrpc/clnt.c task->tk_action = call_transmit_status; task 1947 net/sunrpc/clnt.c call_bind(struct rpc_task *task) task 1949 net/sunrpc/clnt.c struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; task 1951 net/sunrpc/clnt.c if (rpc_task_transmitted(task)) { task 1952 net/sunrpc/clnt.c rpc_task_handle_transmitted(task); task 1957 net/sunrpc/clnt.c task->tk_action = call_connect; task 1961 net/sunrpc/clnt.c dprint_status(task); task 1963 net/sunrpc/clnt.c task->tk_action = call_bind_status; task 1964 net/sunrpc/clnt.c if (!xprt_prepare_transmit(task)) task 1967 net/sunrpc/clnt.c xprt->ops->rpcbind(task); task 1974 net/sunrpc/clnt.c call_bind_status(struct rpc_task *task) task 1976 net/sunrpc/clnt.c struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; task 1979 net/sunrpc/clnt.c if (rpc_task_transmitted(task)) { task 1980 net/sunrpc/clnt.c rpc_task_handle_transmitted(task); task 1984 net/sunrpc/clnt.c dprint_status(task); task 1985 net/sunrpc/clnt.c trace_rpc_bind_status(task); task 1986 net/sunrpc/clnt.c if (task->tk_status >= 0) task 1989 net/sunrpc/clnt.c task->tk_status = 0; task 1993 net/sunrpc/clnt.c switch (task->tk_status) { task 1995 net/sunrpc/clnt.c dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); task 1996 net/sunrpc/clnt.c rpc_delay(task, HZ >> 2); task 2000 net/sunrpc/clnt.c "unavailable\n", task->tk_pid); task 2002 net/sunrpc/clnt.c if (task->tk_msg.rpc_proc->p_proc == 0) { task 2006 net/sunrpc/clnt.c if (task->tk_rebind_retry == 0) task 2008 net/sunrpc/clnt.c task->tk_rebind_retry--; task 2009 net/sunrpc/clnt.c rpc_delay(task, 3*HZ); task 2012 net/sunrpc/clnt.c rpc_delay(task, HZ >> 2); task 2018 net/sunrpc/clnt.c task->tk_pid); task 2023 net/sunrpc/clnt.c task->tk_pid); task 2027 net/sunrpc/clnt.c task->tk_pid); task 2039 net/sunrpc/clnt.c task->tk_pid, task->tk_status); task 2040 net/sunrpc/clnt.c if (!RPC_IS_SOFTCONN(task)) { task 2041 net/sunrpc/clnt.c rpc_delay(task, 5*HZ); task 2044 net/sunrpc/clnt.c status = task->tk_status; task 2048 net/sunrpc/clnt.c task->tk_pid, -task->tk_status); task 2051 net/sunrpc/clnt.c rpc_call_rpcerror(task, status); task 2054 net/sunrpc/clnt.c task->tk_action = call_connect; task 2057 net/sunrpc/clnt.c task->tk_status = 0; task 2058 net/sunrpc/clnt.c task->tk_action = call_bind; task 2059 net/sunrpc/clnt.c rpc_check_timeout(task); task 2066 net/sunrpc/clnt.c call_connect(struct rpc_task *task) task 2068 net/sunrpc/clnt.c struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; task 2070 net/sunrpc/clnt.c if (rpc_task_transmitted(task)) { task 2071 net/sunrpc/clnt.c rpc_task_handle_transmitted(task); task 2076 net/sunrpc/clnt.c task->tk_action = call_transmit; task 2081 net/sunrpc/clnt.c task->tk_pid, xprt, task 2084 net/sunrpc/clnt.c task->tk_action = call_connect_status; task 2085 net/sunrpc/clnt.c if (task->tk_status < 0) task 2087 net/sunrpc/clnt.c if (task->tk_flags & RPC_TASK_NOCONNECT) { task 2088 net/sunrpc/clnt.c rpc_call_rpcerror(task, -ENOTCONN); task 2091 net/sunrpc/clnt.c if (!xprt_prepare_transmit(task)) task 2093 net/sunrpc/clnt.c xprt_connect(task); task 2100 net/sunrpc/clnt.c call_connect_status(struct rpc_task *task) task 2102 net/sunrpc/clnt.c struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; task 2103 net/sunrpc/clnt.c struct rpc_clnt *clnt = task->tk_client; task 2104 net/sunrpc/clnt.c int status = task->tk_status; task 2106 net/sunrpc/clnt.c if (rpc_task_transmitted(task)) { task 2107 net/sunrpc/clnt.c rpc_task_handle_transmitted(task); task 2111 net/sunrpc/clnt.c dprint_status(task); task 2112 net/sunrpc/clnt.c trace_rpc_connect_status(task); task 2114 net/sunrpc/clnt.c if (task->tk_status == 0) { task 2119 net/sunrpc/clnt.c task->tk_status = 0; task 2123 net/sunrpc/clnt.c task->tk_status = 0; task 2127 net/sunrpc/clnt.c if (RPC_IS_SOFTCONN(task)) task 2140 net/sunrpc/clnt.c xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, task 2141 net/sunrpc/clnt.c task->tk_rqstp->rq_connect_cookie); task 2142 net/sunrpc/clnt.c if (RPC_IS_SOFTCONN(task)) task 2145 net/sunrpc/clnt.c rpc_delay(task, 3*HZ); task 2153 net/sunrpc/clnt.c rpc_delay(task, HZ >> 2); task 2156 net/sunrpc/clnt.c rpc_call_rpcerror(task, status); task 2159 net/sunrpc/clnt.c task->tk_action = call_transmit; task 2163 net/sunrpc/clnt.c task->tk_action = call_bind; task 2164 net/sunrpc/clnt.c rpc_check_timeout(task); task 2171 net/sunrpc/clnt.c call_transmit(struct rpc_task *task) task 2173 net/sunrpc/clnt.c if (rpc_task_transmitted(task)) { task 2174 net/sunrpc/clnt.c rpc_task_handle_transmitted(task); task 2178 net/sunrpc/clnt.c dprint_status(task); task 2180 net/sunrpc/clnt.c task->tk_action = call_transmit_status; task 2181 net/sunrpc/clnt.c if (!xprt_prepare_transmit(task)) task 2183 net/sunrpc/clnt.c task->tk_status = 0; task 2184 net/sunrpc/clnt.c if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { task 2185 net/sunrpc/clnt.c if (!xprt_connected(task->tk_xprt)) { task 2186 net/sunrpc/clnt.c task->tk_status = -ENOTCONN; task 2189 net/sunrpc/clnt.c xprt_transmit(task); task 2191 net/sunrpc/clnt.c xprt_end_transmit(task); task 2198 net/sunrpc/clnt.c call_transmit_status(struct rpc_task *task) task 2200 net/sunrpc/clnt.c task->tk_action = call_status; task 2206 net/sunrpc/clnt.c if (rpc_task_transmitted(task)) { task 2207 net/sunrpc/clnt.c task->tk_status = 0; task 2208 net/sunrpc/clnt.c xprt_request_wait_receive(task); task 2212 net/sunrpc/clnt.c switch (task->tk_status) { task 2214 net/sunrpc/clnt.c dprint_status(task); task 2217 net/sunrpc/clnt.c task->tk_status = 0; task 2218 net/sunrpc/clnt.c task->tk_action = call_encode; task 2227 net/sunrpc/clnt.c rpc_delay(task, HZ>>2); task 2231 net/sunrpc/clnt.c task->tk_action = call_transmit; task 2232 net/sunrpc/clnt.c task->tk_status = 0; task 2240 net/sunrpc/clnt.c if (RPC_IS_SOFTCONN(task)) { task 2241 net/sunrpc/clnt.c if (!task->tk_msg.rpc_proc->p_proc) task 2242 net/sunrpc/clnt.c trace_xprt_ping(task->tk_xprt, task 2243 net/sunrpc/clnt.c task->tk_status); task 2244 net/sunrpc/clnt.c rpc_call_rpcerror(task, task->tk_status); task 2253 net/sunrpc/clnt.c task->tk_action = call_bind; task 2254 net/sunrpc/clnt.c task->tk_status = 0; task 2257 net/sunrpc/clnt.c rpc_check_timeout(task); task 2261 net/sunrpc/clnt.c static void call_bc_transmit(struct rpc_task *task); task 2262 net/sunrpc/clnt.c static void call_bc_transmit_status(struct rpc_task *task); task 2265 net/sunrpc/clnt.c call_bc_encode(struct rpc_task *task) task 2267 net/sunrpc/clnt.c xprt_request_enqueue_transmit(task); task 2268 net/sunrpc/clnt.c task->tk_action = call_bc_transmit; task 2276 net/sunrpc/clnt.c call_bc_transmit(struct rpc_task *task) task 2278 net/sunrpc/clnt.c task->tk_action = call_bc_transmit_status; task 2279 net/sunrpc/clnt.c if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { task 2280 net/sunrpc/clnt.c if (!xprt_prepare_transmit(task)) task 2282 net/sunrpc/clnt.c task->tk_status = 0; task 2283 net/sunrpc/clnt.c xprt_transmit(task); task 2285 net/sunrpc/clnt.c xprt_end_transmit(task); task 2289 net/sunrpc/clnt.c call_bc_transmit_status(struct rpc_task *task) task 2291 net/sunrpc/clnt.c struct rpc_rqst *req = task->tk_rqstp; task 2293 net/sunrpc/clnt.c if (rpc_task_transmitted(task)) task 2294 net/sunrpc/clnt.c task->tk_status = 0; task 2296 net/sunrpc/clnt.c dprint_status(task); task 2298 net/sunrpc/clnt.c switch (task->tk_status) { task 2312 net/sunrpc/clnt.c rpc_delay(task, HZ>>2); task 2316 net/sunrpc/clnt.c task->tk_status = 0; task 2317 net/sunrpc/clnt.c task->tk_action = call_bc_transmit; task 2328 net/sunrpc/clnt.c "error: %d\n", task->tk_status); task 2338 net/sunrpc/clnt.c "error: %d\n", task->tk_status); task 2341 net/sunrpc/clnt.c task->tk_action = rpc_exit_task; task 2349 net/sunrpc/clnt.c call_status(struct rpc_task *task) task 2351 net/sunrpc/clnt.c struct rpc_clnt *clnt = task->tk_client; task 2354 net/sunrpc/clnt.c if (!task->tk_msg.rpc_proc->p_proc) task 2355 net/sunrpc/clnt.c trace_xprt_ping(task->tk_xprt, task->tk_status); task 2357 net/sunrpc/clnt.c dprint_status(task); task 2359 net/sunrpc/clnt.c status = task->tk_status; task 2361 net/sunrpc/clnt.c task->tk_action = call_decode; task 2365 net/sunrpc/clnt.c trace_rpc_call_status(task); task 2366 net/sunrpc/clnt.c task->tk_status = 0; task 2373 net/sunrpc/clnt.c if (RPC_IS_SOFTCONN(task)) task 2379 net/sunrpc/clnt.c rpc_delay(task, 3*HZ); task 2390 net/sunrpc/clnt.c rpc_delay(task, 3*HZ); task 2404 net/sunrpc/clnt.c task->tk_action = call_encode; task 2405 net/sunrpc/clnt.c rpc_check_timeout(task); task 2408 net/sunrpc/clnt.c rpc_call_rpcerror(task, status); task 2421 net/sunrpc/clnt.c rpc_check_timeout(struct rpc_task *task) task 2423 net/sunrpc/clnt.c struct rpc_clnt *clnt = task->tk_client; task 2425 net/sunrpc/clnt.c if (RPC_SIGNALLED(task)) { task 2426 net/sunrpc/clnt.c rpc_call_rpcerror(task, -ERESTARTSYS); task 2430 net/sunrpc/clnt.c if (xprt_adjust_timeout(task->tk_rqstp) == 0) task 2433 net/sunrpc/clnt.c dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); task 2434 net/sunrpc/clnt.c task->tk_timeouts++; task 2436 net/sunrpc/clnt.c if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { task 2437 net/sunrpc/clnt.c rpc_call_rpcerror(task, -ETIMEDOUT); task 2441 net/sunrpc/clnt.c if (RPC_IS_SOFT(task)) { task 2447 net/sunrpc/clnt.c if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && task 2448 net/sunrpc/clnt.c rpc_check_connected(task->tk_rqstp)) task 2455 net/sunrpc/clnt.c task->tk_xprt->servername); task 2457 net/sunrpc/clnt.c if (task->tk_flags & RPC_TASK_TIMEOUT) task 2458 net/sunrpc/clnt.c rpc_call_rpcerror(task, -ETIMEDOUT); task 2460 net/sunrpc/clnt.c __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); task 2464 net/sunrpc/clnt.c if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { task 2465 net/sunrpc/clnt.c task->tk_flags |= RPC_CALL_MAJORSEEN; task 2470 net/sunrpc/clnt.c task->tk_xprt->servername); task 2478 net/sunrpc/clnt.c rpcauth_invalcred(task); task 2485 net/sunrpc/clnt.c call_decode(struct rpc_task *task) task 2487 net/sunrpc/clnt.c struct rpc_clnt *clnt = task->tk_client; task 2488 net/sunrpc/clnt.c struct rpc_rqst *req = task->tk_rqstp; task 2492 net/sunrpc/clnt.c dprint_status(task); task 2494 net/sunrpc/clnt.c if (!task->tk_msg.rpc_proc->p_decode) { task 2495 net/sunrpc/clnt.c task->tk_action = rpc_exit_task; task 2499 net/sunrpc/clnt.c if (task->tk_flags & RPC_CALL_MAJORSEEN) { task 2503 net/sunrpc/clnt.c task->tk_xprt->servername); task 2505 net/sunrpc/clnt.c task->tk_flags &= ~RPC_CALL_MAJORSEEN; task 2530 net/sunrpc/clnt.c err = rpc_decode_header(task, &xdr); task 2534 net/sunrpc/clnt.c task->tk_action = rpc_exit_task; task 2535 net/sunrpc/clnt.c task->tk_status = rpcauth_unwrap_resp(task, &xdr); task 2537 net/sunrpc/clnt.c task->tk_pid, __func__, task->tk_status); task 2540 net/sunrpc/clnt.c task->tk_status = 0; task 2541 net/sunrpc/clnt.c if (task->tk_client->cl_discrtry) task 2544 net/sunrpc/clnt.c task->tk_action = call_encode; task 2545 net/sunrpc/clnt.c rpc_check_timeout(task); task 2548 net/sunrpc/clnt.c task->tk_action = call_reserve; task 2549 net/sunrpc/clnt.c rpc_check_timeout(task); task 2550 net/sunrpc/clnt.c rpcauth_invalcred(task); task 2552 net/sunrpc/clnt.c xprt_release(task); task 2557 net/sunrpc/clnt.c rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr) task 2559 net/sunrpc/clnt.c struct rpc_clnt *clnt = task->tk_client; task 2560 net/sunrpc/clnt.c struct rpc_rqst *req = task->tk_rqstp; task 2573 net/sunrpc/clnt.c *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); task 2575 net/sunrpc/clnt.c error = rpcauth_marshcred(task, xdr); task 2580 net/sunrpc/clnt.c trace_rpc_bad_callhdr(task); task 2581 net/sunrpc/clnt.c rpc_call_rpcerror(task, error); task 2586 net/sunrpc/clnt.c rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) task 2588 net/sunrpc/clnt.c struct rpc_clnt *clnt = task->tk_client; task 2597 net/sunrpc/clnt.c if (task->tk_rqstp->rq_rcv_buf.len & 3) task 2609 net/sunrpc/clnt.c error = rpcauth_checkverf(task, xdr); task 2620 net/sunrpc/clnt.c trace_rpc__prog_unavail(task); task 2624 net/sunrpc/clnt.c trace_rpc__prog_mismatch(task); task 2628 net/sunrpc/clnt.c trace_rpc__proc_unavail(task); task 2633 net/sunrpc/clnt.c trace_rpc__garbage_args(task); task 2642 net/sunrpc/clnt.c if (task->tk_garb_retry) { task 2643 net/sunrpc/clnt.c task->tk_garb_retry--; task 2644 net/sunrpc/clnt.c task->tk_action = call_encode; task 2648 net/sunrpc/clnt.c rpc_call_rpcerror(task, error); task 2652 net/sunrpc/clnt.c trace_rpc__unparsable(task); task 2657 net/sunrpc/clnt.c trace_rpc_bad_verifier(task); task 2669 net/sunrpc/clnt.c trace_rpc__mismatch(task); task 2684 net/sunrpc/clnt.c if (!task->tk_cred_retry) task 2686 net/sunrpc/clnt.c task->tk_cred_retry--; task 2687 net/sunrpc/clnt.c trace_rpc__stale_creds(task); task 2692 net/sunrpc/clnt.c if (!task->tk_garb_retry) task 2694 net/sunrpc/clnt.c task->tk_garb_retry--; task 2695 net/sunrpc/clnt.c trace_rpc__bad_creds(task); task 2696 net/sunrpc/clnt.c task->tk_action = call_encode; task 2699 net/sunrpc/clnt.c trace_rpc__auth_tooweak(task); task 2701 net/sunrpc/clnt.c task->tk_xprt->servername); task 2768 net/sunrpc/clnt.c static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) task 2772 net/sunrpc/clnt.c if (task->tk_status == 0) task 2802 net/sunrpc/clnt.c struct rpc_task *task; task 2814 net/sunrpc/clnt.c task = rpc_call_null_helper(clnt, xprt, NULL, task 2817 net/sunrpc/clnt.c if (IS_ERR(task)) task 2818 net/sunrpc/clnt.c return PTR_ERR(task); task 2819 net/sunrpc/clnt.c rpc_put_task(task); task 2848 net/sunrpc/clnt.c struct rpc_task *task; task 2859 net/sunrpc/clnt.c task = rpc_call_null_helper(clnt, xprt, NULL, task 2862 net/sunrpc/clnt.c if (IS_ERR(task)) { task 2863 net/sunrpc/clnt.c status = PTR_ERR(task); task 2866 net/sunrpc/clnt.c status = task->tk_status; task 2867 net/sunrpc/clnt.c rpc_put_task(task); task 3029 net/sunrpc/clnt.c const struct rpc_task *task) task 3033 net/sunrpc/clnt.c if (RPC_IS_QUEUED(task)) task 3034 net/sunrpc/clnt.c rpc_waitq = rpc_qname(task->tk_waitqueue); task 3037 net/sunrpc/clnt.c task->tk_pid, task->tk_flags, task->tk_status, task 3038 net/sunrpc/clnt.c clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, task 3039 net/sunrpc/clnt.c clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), task 3040 net/sunrpc/clnt.c task->tk_action, rpc_waitq); task 3046 net/sunrpc/clnt.c struct rpc_task *task; task 3053 net/sunrpc/clnt.c list_for_each_entry(task, &clnt->cl_tasks, tk_task) { task 3058 net/sunrpc/clnt.c rpc_show_task(clnt, task); task 23 net/sunrpc/debugfs.c struct rpc_task *task = v; task 24 net/sunrpc/debugfs.c struct rpc_clnt *clnt = task->tk_client; task 27 net/sunrpc/debugfs.c if (RPC_IS_QUEUED(task)) task 28 net/sunrpc/debugfs.c rpc_waitq = rpc_qname(task->tk_waitqueue); task 30 net/sunrpc/debugfs.c if (task->tk_rqstp) task 31 net/sunrpc/debugfs.c xid = be32_to_cpu(task->tk_rqstp->rq_xid); task 34 net/sunrpc/debugfs.c task->tk_pid, task->tk_flags, task->tk_status, task 35 net/sunrpc/debugfs.c clnt->cl_clid, xid, rpc_task_timeout(task), task->tk_ops, task 36 net/sunrpc/debugfs.c clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), task 37 net/sunrpc/debugfs.c task->tk_action, rpc_waitq); task 47 net/sunrpc/debugfs.c struct rpc_task *task; task 50 net/sunrpc/debugfs.c list_for_each_entry(task, &clnt->cl_tasks, tk_task) task 52 net/sunrpc/debugfs.c return task; task 60 net/sunrpc/debugfs.c struct rpc_task *task = v; task 61 net/sunrpc/debugfs.c struct list_head *next = task->tk_task.next; task 677 net/sunrpc/rpcb_clnt.c void rpcb_getport_async(struct rpc_task *task) task 692 net/sunrpc/rpcb_clnt.c clnt = rpcb_find_transport_owner(task->tk_client); task 694 net/sunrpc/rpcb_clnt.c xprt = xprt_get(task->tk_xprt); task 697 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__, task 702 net/sunrpc/rpcb_clnt.c rpc_sleep_on_timeout(&xprt->binding, task, task 707 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__); task 716 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__); task 736 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__); task 743 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__); task 748 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__, bind_version); task 758 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__, PTR_ERR(rpcb_clnt)); task 766 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__); task 784 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__); task 801 net/sunrpc/rpcb_clnt.c task->tk_pid, __func__); task 815 net/sunrpc/rpcb_clnt.c task->tk_status = status; task 49 net/sunrpc/sched.c static void rpc_release_task(struct rpc_task *task); task 65 net/sunrpc/sched.c rpc_task_timeout(const struct rpc_task *task) task 67 net/sunrpc/sched.c unsigned long timeout = READ_ONCE(task->tk_timeout); task 84 net/sunrpc/sched.c __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) task 86 net/sunrpc/sched.c if (list_empty(&task->u.tk_wait.timer_list)) task 88 net/sunrpc/sched.c dprintk("RPC: %5u disabling timer\n", task->tk_pid); task 89 net/sunrpc/sched.c task->tk_timeout = 0; task 90 net/sunrpc/sched.c list_del(&task->u.tk_wait.timer_list); task 111 net/sunrpc/sched.c __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, task 115 net/sunrpc/sched.c task->tk_pid, jiffies_to_msecs(timeout - jiffies)); task 117 net/sunrpc/sched.c task->tk_timeout = timeout; task 120 net/sunrpc/sched.c list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); task 140 net/sunrpc/sched.c __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) task 145 net/sunrpc/sched.c if (t->tk_owner == task->tk_owner) { task 146 net/sunrpc/sched.c list_add_tail(&task->u.tk_wait.links, task 149 net/sunrpc/sched.c task->u.tk_wait.list.next = q; task 150 net/sunrpc/sched.c task->u.tk_wait.list.prev = NULL; task 154 net/sunrpc/sched.c INIT_LIST_HEAD(&task->u.tk_wait.links); task 155 net/sunrpc/sched.c list_add_tail(&task->u.tk_wait.list, q); task 162 net/sunrpc/sched.c __rpc_list_dequeue_task(struct rpc_task *task) task 167 net/sunrpc/sched.c if (task->u.tk_wait.list.prev == NULL) { task 168 net/sunrpc/sched.c list_del(&task->u.tk_wait.links); task 171 net/sunrpc/sched.c if (!list_empty(&task->u.tk_wait.links)) { task 172 net/sunrpc/sched.c t = list_first_entry(&task->u.tk_wait.links, task 178 net/sunrpc/sched.c list_del(&task->u.tk_wait.links); task 180 net/sunrpc/sched.c list_del(&task->u.tk_wait.list); task 187 net/sunrpc/sched.c struct rpc_task *task, task 192 net/sunrpc/sched.c __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); task 204 net/sunrpc/sched.c struct rpc_task *task, task 207 net/sunrpc/sched.c WARN_ON_ONCE(RPC_IS_QUEUED(task)); task 208 net/sunrpc/sched.c if (RPC_IS_QUEUED(task)) task 211 net/sunrpc/sched.c INIT_LIST_HEAD(&task->u.tk_wait.timer_list); task 213 net/sunrpc/sched.c __rpc_add_wait_queue_priority(queue, task, queue_priority); task 214 net/sunrpc/sched.c else if (RPC_IS_SWAPPER(task)) task 215 net/sunrpc/sched.c list_add(&task->u.tk_wait.list, &queue->tasks[0]); task 217 net/sunrpc/sched.c list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task 218 net/sunrpc/sched.c task->tk_waitqueue = queue; task 222 net/sunrpc/sched.c rpc_set_queued(task); task 225 net/sunrpc/sched.c task->tk_pid, queue, rpc_qname(queue)); task 231 net/sunrpc/sched.c static void __rpc_remove_wait_queue_priority(struct rpc_task *task) task 233 net/sunrpc/sched.c __rpc_list_dequeue_task(task); task 240 net/sunrpc/sched.c static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) task 242 net/sunrpc/sched.c __rpc_disable_timer(queue, task); task 244 net/sunrpc/sched.c __rpc_remove_wait_queue_priority(task); task 246 net/sunrpc/sched.c list_del(&task->u.tk_wait.list); task 249 net/sunrpc/sched.c task->tk_pid, queue, rpc_qname(queue)); task 295 net/sunrpc/sched.c static void rpc_task_set_debuginfo(struct rpc_task *task) task 299 net/sunrpc/sched.c task->tk_pid = atomic_inc_return(&rpc_pid); task 302 net/sunrpc/sched.c static inline void rpc_task_set_debuginfo(struct rpc_task *task) task 307 net/sunrpc/sched.c static void rpc_set_active(struct rpc_task *task) task 309 net/sunrpc/sched.c rpc_task_set_debuginfo(task); task 310 net/sunrpc/sched.c set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); task 311 net/sunrpc/sched.c trace_rpc_task_begin(task, NULL); task 318 net/sunrpc/sched.c static int rpc_complete_task(struct rpc_task *task) task 320 net/sunrpc/sched.c void *m = &task->tk_runstate; task 326 net/sunrpc/sched.c trace_rpc_task_complete(task, NULL); task 329 net/sunrpc/sched.c clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); task 330 net/sunrpc/sched.c ret = atomic_dec_and_test(&task->tk_count); task 344 net/sunrpc/sched.c int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) task 348 net/sunrpc/sched.c return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, task 365 net/sunrpc/sched.c struct rpc_task *task) task 367 net/sunrpc/sched.c bool need_wakeup = !rpc_test_and_set_running(task); task 369 net/sunrpc/sched.c rpc_clear_queued(task); task 372 net/sunrpc/sched.c if (RPC_IS_ASYNC(task)) { task 373 net/sunrpc/sched.c INIT_WORK(&task->u.tk_work, rpc_async_schedule); task 374 net/sunrpc/sched.c queue_work(wq, &task->u.tk_work); task 376 net/sunrpc/sched.c wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); task 386 net/sunrpc/sched.c struct rpc_task *task, task 390 net/sunrpc/sched.c task->tk_pid, rpc_qname(q), jiffies); task 392 net/sunrpc/sched.c trace_rpc_task_sleep(task, q); task 394 net/sunrpc/sched.c __rpc_add_wait_queue(q, task, queue_priority); task 399 net/sunrpc/sched.c struct rpc_task *task, unsigned long timeout, task 403 net/sunrpc/sched.c __rpc_sleep_on_priority(q, task, queue_priority); task 404 net/sunrpc/sched.c __rpc_add_timer(q, task, timeout); task 406 net/sunrpc/sched.c task->tk_status = -ETIMEDOUT; task 409 net/sunrpc/sched.c static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) task 411 net/sunrpc/sched.c if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) task 412 net/sunrpc/sched.c task->tk_callback = action; task 415 net/sunrpc/sched.c static bool rpc_sleep_check_activated(struct rpc_task *task) task 418 net/sunrpc/sched.c if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { task 419 net/sunrpc/sched.c task->tk_status = -EIO; task 420 net/sunrpc/sched.c rpc_put_task_async(task); task 426 net/sunrpc/sched.c void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, task 429 net/sunrpc/sched.c if (!rpc_sleep_check_activated(task)) task 432 net/sunrpc/sched.c rpc_set_tk_callback(task, action); task 438 net/sunrpc/sched.c __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); task 443 net/sunrpc/sched.c void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, task 446 net/sunrpc/sched.c if (!rpc_sleep_check_activated(task)) task 449 net/sunrpc/sched.c rpc_set_tk_callback(task, action); task 451 net/sunrpc/sched.c WARN_ON_ONCE(task->tk_timeout != 0); task 456 net/sunrpc/sched.c __rpc_sleep_on_priority(q, task, task->tk_priority); task 462 net/sunrpc/sched.c struct rpc_task *task, unsigned long timeout, int priority) task 464 net/sunrpc/sched.c if (!rpc_sleep_check_activated(task)) task 472 net/sunrpc/sched.c __rpc_sleep_on_priority_timeout(q, task, timeout, priority); task 477 net/sunrpc/sched.c void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, task 480 net/sunrpc/sched.c if (!rpc_sleep_check_activated(task)) task 483 net/sunrpc/sched.c WARN_ON_ONCE(task->tk_timeout != 0); task 489 net/sunrpc/sched.c __rpc_sleep_on_priority(q, task, priority); task 504 net/sunrpc/sched.c struct rpc_task *task) task 507 net/sunrpc/sched.c task->tk_pid, jiffies); task 510 net/sunrpc/sched.c if (!RPC_IS_ACTIVATED(task)) { task 511 net/sunrpc/sched.c printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); task 515 net/sunrpc/sched.c trace_rpc_task_wakeup(task, queue); task 517 net/sunrpc/sched.c __rpc_remove_wait_queue(queue, task); task 519 net/sunrpc/sched.c rpc_make_runnable(wq, task); task 529 net/sunrpc/sched.c struct rpc_wait_queue *queue, struct rpc_task *task, task 532 net/sunrpc/sched.c if (RPC_IS_QUEUED(task)) { task 534 net/sunrpc/sched.c if (task->tk_waitqueue == queue) { task 535 net/sunrpc/sched.c if (action == NULL || action(task, data)) { task 536 net/sunrpc/sched.c __rpc_do_wake_up_task_on_wq(wq, queue, task); task 537 net/sunrpc/sched.c return task; task 548 net/sunrpc/sched.c struct rpc_task *task) task 551 net/sunrpc/sched.c task, NULL, NULL); task 557 net/sunrpc/sched.c void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) task 559 net/sunrpc/sched.c if (!RPC_IS_QUEUED(task)) task 562 net/sunrpc/sched.c rpc_wake_up_task_queue_locked(queue, task); task 567 net/sunrpc/sched.c static bool rpc_task_action_set_status(struct rpc_task *task, void *status) task 569 net/sunrpc/sched.c task->tk_status = *(int *)status; task 575 net/sunrpc/sched.c struct rpc_task *task, int status) task 578 net/sunrpc/sched.c task, rpc_task_action_set_status, &status); task 592 net/sunrpc/sched.c struct rpc_task *task, int status) task 594 net/sunrpc/sched.c if (!RPC_IS_QUEUED(task)) task 597 net/sunrpc/sched.c rpc_wake_up_task_queue_set_status_locked(queue, task, status); task 607 net/sunrpc/sched.c struct rpc_task *task; task 614 net/sunrpc/sched.c task = list_first_entry(q, struct rpc_task, u.tk_wait.list); task 627 net/sunrpc/sched.c task = list_first_entry(q, struct rpc_task, u.tk_wait.list); task 638 net/sunrpc/sched.c return task; task 657 net/sunrpc/sched.c struct rpc_task *task = NULL; task 662 net/sunrpc/sched.c task = __rpc_find_next_queued(queue); task 663 net/sunrpc/sched.c if (task != NULL) task 664 net/sunrpc/sched.c task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task 665 net/sunrpc/sched.c task, func, data); task 668 net/sunrpc/sched.c return task; task 681 net/sunrpc/sched.c static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) task 709 net/sunrpc/sched.c struct rpc_task *task; task 710 net/sunrpc/sched.c task = list_first_entry(head, task 713 net/sunrpc/sched.c rpc_wake_up_task_queue_locked(queue, task); task 738 net/sunrpc/sched.c struct rpc_task *task; task 739 net/sunrpc/sched.c task = list_first_entry(head, task 742 net/sunrpc/sched.c task->tk_status = status; task 743 net/sunrpc/sched.c rpc_wake_up_task_queue_locked(queue, task); task 758 net/sunrpc/sched.c struct rpc_task *task, *n; task 763 net/sunrpc/sched.c list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { task 764 net/sunrpc/sched.c timeo = task->tk_timeout; task 766 net/sunrpc/sched.c dprintk("RPC: %5u timeout\n", task->tk_pid); task 767 net/sunrpc/sched.c task->tk_status = -ETIMEDOUT; task 768 net/sunrpc/sched.c rpc_wake_up_task_queue_locked(queue, task); task 779 net/sunrpc/sched.c static void __rpc_atrun(struct rpc_task *task) task 781 net/sunrpc/sched.c if (task->tk_status == -ETIMEDOUT) task 782 net/sunrpc/sched.c task->tk_status = 0; task 788 net/sunrpc/sched.c void rpc_delay(struct rpc_task *task, unsigned long delay) task 790 net/sunrpc/sched.c rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); task 797 net/sunrpc/sched.c void rpc_prepare_task(struct rpc_task *task) task 799 net/sunrpc/sched.c task->tk_ops->rpc_call_prepare(task, task->tk_calldata); task 803 net/sunrpc/sched.c rpc_init_task_statistics(struct rpc_task *task) task 806 net/sunrpc/sched.c task->tk_garb_retry = 2; task 807 net/sunrpc/sched.c task->tk_cred_retry = 2; task 808 net/sunrpc/sched.c task->tk_rebind_retry = 2; task 811 net/sunrpc/sched.c task->tk_start = ktime_get(); task 815 net/sunrpc/sched.c rpc_reset_task_statistics(struct rpc_task *task) task 817 net/sunrpc/sched.c task->tk_timeouts = 0; task 818 net/sunrpc/sched.c task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); task 819 net/sunrpc/sched.c rpc_init_task_statistics(task); task 825 net/sunrpc/sched.c void rpc_exit_task(struct rpc_task *task) task 827 net/sunrpc/sched.c task->tk_action = NULL; task 828 net/sunrpc/sched.c if (task->tk_ops->rpc_count_stats) task 829 net/sunrpc/sched.c task->tk_ops->rpc_count_stats(task, task->tk_calldata); task 830 net/sunrpc/sched.c else if (task->tk_client) task 831 net/sunrpc/sched.c rpc_count_iostats(task, task->tk_client->cl_metrics); task 832 net/sunrpc/sched.c if (task->tk_ops->rpc_call_done != NULL) { task 833 net/sunrpc/sched.c task->tk_ops->rpc_call_done(task, task->tk_calldata); task 834 net/sunrpc/sched.c if (task->tk_action != NULL) { task 836 net/sunrpc/sched.c xprt_release(task); task 837 net/sunrpc/sched.c rpc_reset_task_statistics(task); task 842 net/sunrpc/sched.c void rpc_signal_task(struct rpc_task *task) task 846 net/sunrpc/sched.c if (!RPC_IS_ACTIVATED(task)) task 848 net/sunrpc/sched.c set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); task 850 net/sunrpc/sched.c queue = READ_ONCE(task->tk_waitqueue); task 852 net/sunrpc/sched.c rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); task 855 net/sunrpc/sched.c void rpc_exit(struct rpc_task *task, int status) task 857 net/sunrpc/sched.c task->tk_status = status; task 858 net/sunrpc/sched.c task->tk_action = rpc_exit_task; task 859 net/sunrpc/sched.c rpc_wake_up_queued_task(task->tk_waitqueue, task); task 872 net/sunrpc/sched.c static void __rpc_execute(struct rpc_task *task) task 875 net/sunrpc/sched.c int task_is_async = RPC_IS_ASYNC(task); task 879 net/sunrpc/sched.c task->tk_pid, task->tk_flags); task 881 net/sunrpc/sched.c WARN_ON_ONCE(RPC_IS_QUEUED(task)); task 882 net/sunrpc/sched.c if (RPC_IS_QUEUED(task)) task 895 net/sunrpc/sched.c do_action = task->tk_action; task 896 net/sunrpc/sched.c if (task->tk_callback) { task 897 net/sunrpc/sched.c do_action = task->tk_callback; task 898 net/sunrpc/sched.c task->tk_callback = NULL; task 902 net/sunrpc/sched.c trace_rpc_task_run_action(task, do_action); task 903 net/sunrpc/sched.c do_action(task); task 908 net/sunrpc/sched.c if (!RPC_IS_QUEUED(task)) task 914 net/sunrpc/sched.c if (RPC_SIGNALLED(task)) { task 915 net/sunrpc/sched.c task->tk_rpc_status = -ERESTARTSYS; task 916 net/sunrpc/sched.c rpc_exit(task, -ERESTARTSYS); task 928 net/sunrpc/sched.c queue = task->tk_waitqueue; task 930 net/sunrpc/sched.c if (!RPC_IS_QUEUED(task)) { task 934 net/sunrpc/sched.c rpc_clear_running(task); task 940 net/sunrpc/sched.c dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); task 941 net/sunrpc/sched.c status = out_of_line_wait_on_bit(&task->tk_runstate, task 951 net/sunrpc/sched.c dprintk("RPC: %5u got signal\n", task->tk_pid); task 952 net/sunrpc/sched.c set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); task 953 net/sunrpc/sched.c task->tk_rpc_status = -ERESTARTSYS; task 954 net/sunrpc/sched.c rpc_exit(task, -ERESTARTSYS); task 956 net/sunrpc/sched.c dprintk("RPC: %5u sync task resuming\n", task->tk_pid); task 959 net/sunrpc/sched.c dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, task 960 net/sunrpc/sched.c task->tk_status); task 962 net/sunrpc/sched.c rpc_release_task(task); task 974 net/sunrpc/sched.c void rpc_execute(struct rpc_task *task) task 976 net/sunrpc/sched.c bool is_async = RPC_IS_ASYNC(task); task 978 net/sunrpc/sched.c rpc_set_active(task); task 979 net/sunrpc/sched.c rpc_make_runnable(rpciod_workqueue, task); task 981 net/sunrpc/sched.c __rpc_execute(task); task 1009 net/sunrpc/sched.c int rpc_malloc(struct rpc_task *task) task 1011 net/sunrpc/sched.c struct rpc_rqst *rqst = task->tk_rqstp; task 1016 net/sunrpc/sched.c if (RPC_IS_SWAPPER(task)) task 1030 net/sunrpc/sched.c task->tk_pid, size, buf); task 1042 net/sunrpc/sched.c void rpc_free(struct rpc_task *task) task 1044 net/sunrpc/sched.c void *buffer = task->tk_rqstp->rq_buffer; task 1064 net/sunrpc/sched.c static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) task 1066 net/sunrpc/sched.c memset(task, 0, sizeof(*task)); task 1067 net/sunrpc/sched.c atomic_set(&task->tk_count, 1); task 1068 net/sunrpc/sched.c task->tk_flags = task_setup_data->flags; task 1069 net/sunrpc/sched.c task->tk_ops = task_setup_data->callback_ops; task 1070 net/sunrpc/sched.c task->tk_calldata = task_setup_data->callback_data; task 1071 net/sunrpc/sched.c INIT_LIST_HEAD(&task->tk_task); task 1073 net/sunrpc/sched.c task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; task 1074 net/sunrpc/sched.c task->tk_owner = current->tgid; task 1077 net/sunrpc/sched.c task->tk_workqueue = task_setup_data->workqueue; task 1079 net/sunrpc/sched.c task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, task 1082 net/sunrpc/sched.c task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); task 1084 net/sunrpc/sched.c if (task->tk_ops->rpc_call_prepare != NULL) task 1085 net/sunrpc/sched.c task->tk_action = rpc_prepare_task; task 1087 net/sunrpc/sched.c rpc_init_task_statistics(task); task 1104 net/sunrpc/sched.c struct rpc_task *task = setup_data->task; task 1107 net/sunrpc/sched.c if (task == NULL) { task 1108 net/sunrpc/sched.c task = rpc_alloc_task(); task 1112 net/sunrpc/sched.c rpc_init_task(task, setup_data); task 1113 net/sunrpc/sched.c task->tk_flags |= flags; task 1114 net/sunrpc/sched.c dprintk("RPC: allocated task %p\n", task); task 1115 net/sunrpc/sched.c return task; task 1137 net/sunrpc/sched.c static void rpc_free_task(struct rpc_task *task) task 1139 net/sunrpc/sched.c unsigned short tk_flags = task->tk_flags; task 1141 net/sunrpc/sched.c put_rpccred(task->tk_op_cred); task 1142 net/sunrpc/sched.c rpc_release_calldata(task->tk_ops, task->tk_calldata); task 1145 net/sunrpc/sched.c dprintk("RPC: %5u freeing task\n", task->tk_pid); task 1146 net/sunrpc/sched.c mempool_free(task, rpc_task_mempool); task 1158 net/sunrpc/sched.c static void rpc_release_resources_task(struct rpc_task *task) task 1160 net/sunrpc/sched.c xprt_release(task); task 1161 net/sunrpc/sched.c if (task->tk_msg.rpc_cred) { task 1162 net/sunrpc/sched.c put_cred(task->tk_msg.rpc_cred); task 1163 net/sunrpc/sched.c task->tk_msg.rpc_cred = NULL; task 1165 net/sunrpc/sched.c rpc_task_release_client(task); task 1168 net/sunrpc/sched.c static void rpc_final_put_task(struct rpc_task *task, task 1172 net/sunrpc/sched.c INIT_WORK(&task->u.tk_work, rpc_async_release); task 1173 net/sunrpc/sched.c queue_work(q, &task->u.tk_work); task 1175 net/sunrpc/sched.c rpc_free_task(task); task 1178 net/sunrpc/sched.c static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) task 1180 net/sunrpc/sched.c if (atomic_dec_and_test(&task->tk_count)) { task 1181 net/sunrpc/sched.c rpc_release_resources_task(task); task 1182 net/sunrpc/sched.c rpc_final_put_task(task, q); task 1186 net/sunrpc/sched.c void rpc_put_task(struct rpc_task *task) task 1188 net/sunrpc/sched.c rpc_do_put_task(task, NULL); task 1192 net/sunrpc/sched.c void rpc_put_task_async(struct rpc_task *task) task 1194 net/sunrpc/sched.c rpc_do_put_task(task, task->tk_workqueue); task 1198 net/sunrpc/sched.c static void rpc_release_task(struct rpc_task *task) task 1200 net/sunrpc/sched.c dprintk("RPC: %5u release task\n", task->tk_pid); task 1202 net/sunrpc/sched.c WARN_ON_ONCE(RPC_IS_QUEUED(task)); task 1204 net/sunrpc/sched.c rpc_release_resources_task(task); task 1211 net/sunrpc/sched.c if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { task 1213 net/sunrpc/sched.c if (!rpc_complete_task(task)) task 1216 net/sunrpc/sched.c if (!atomic_dec_and_test(&task->tk_count)) task 1219 net/sunrpc/sched.c rpc_final_put_task(task, task->tk_workqueue); task 150 net/sunrpc/stats.c void rpc_count_iostats_metrics(const struct rpc_task *task, task 153 net/sunrpc/stats.c struct rpc_rqst *req = task->tk_rqstp; task 165 net/sunrpc/stats.c op_metrics->om_timeouts += task->tk_timeouts; task 172 net/sunrpc/stats.c backlog = ktime_sub(req->rq_xtime, task->tk_start); task 178 net/sunrpc/stats.c execute = ktime_sub(now, task->tk_start); task 180 net/sunrpc/stats.c if (task->tk_status < 0) task 196 net/sunrpc/stats.c void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) task 198 net/sunrpc/stats.c rpc_count_iostats_metrics(task, task 199 net/sunrpc/stats.c &stats[task->tk_msg.rpc_proc->p_statidx]); task 303 net/sunrpc/svc.c svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) task 319 net/sunrpc/svc.c set_cpus_allowed_ptr(task, cpumask_of(node)); task 324 net/sunrpc/svc.c set_cpus_allowed_ptr(task, cpumask_of_node(node)); task 672 net/sunrpc/svc.c struct task_struct *task = NULL; task 699 net/sunrpc/svc.c task = rqstp->rq_task; task 703 net/sunrpc/svc.c return task; task 711 net/sunrpc/svc.c struct task_struct *task; task 726 net/sunrpc/svc.c task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp, task 728 net/sunrpc/svc.c if (IS_ERR(task)) { task 731 net/sunrpc/svc.c return PTR_ERR(task); task 734 net/sunrpc/svc.c rqstp->rq_task = task; task 736 net/sunrpc/svc.c svc_pool_map_set_cpumask(task, chosen_pool->sp_id); task 739 net/sunrpc/svc.c wake_up_process(task); task 750 net/sunrpc/svc.c struct task_struct *task; task 755 net/sunrpc/svc.c task = choose_victim(serv, pool, &state); task 756 net/sunrpc/svc.c if (task == NULL) task 758 net/sunrpc/svc.c send_sig(SIGINT, task, 1); task 803 net/sunrpc/svc.c struct task_struct *task; task 808 net/sunrpc/svc.c task = choose_victim(serv, pool, &state); task 809 net/sunrpc/svc.c if (task == NULL) task 811 net/sunrpc/svc.c kthread_stop(task); task 1555 net/sunrpc/svc.c struct rpc_task *task; task 1607 net/sunrpc/svc.c task = rpc_run_bc_task(req); task 1608 net/sunrpc/svc.c if (IS_ERR(task)) { task 1609 net/sunrpc/svc.c error = PTR_ERR(task); task 1613 net/sunrpc/svc.c WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); task 1614 net/sunrpc/svc.c error = task->tk_status; task 1615 net/sunrpc/svc.c rpc_put_task(task); task 202 net/sunrpc/xprt.c int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) task 204 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 207 net/sunrpc/xprt.c if (task == xprt->snd_task) task 213 net/sunrpc/xprt.c xprt->snd_task = task; task 221 net/sunrpc/xprt.c task->tk_pid, xprt); task 222 net/sunrpc/xprt.c task->tk_status = -EAGAIN; task 223 net/sunrpc/xprt.c if (RPC_IS_SOFT(task)) task 224 net/sunrpc/xprt.c rpc_sleep_on_timeout(&xprt->sending, task, NULL, task 227 net/sunrpc/xprt.c rpc_sleep_on(&xprt->sending, task, NULL); task 266 net/sunrpc/xprt.c int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) task 268 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 271 net/sunrpc/xprt.c if (task == xprt->snd_task) task 276 net/sunrpc/xprt.c xprt->snd_task = task; task 282 net/sunrpc/xprt.c xprt->snd_task = task; task 288 net/sunrpc/xprt.c dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); task 289 net/sunrpc/xprt.c task->tk_status = -EAGAIN; task 290 net/sunrpc/xprt.c if (RPC_IS_SOFT(task)) task 291 net/sunrpc/xprt.c rpc_sleep_on_timeout(&xprt->sending, task, NULL, task 294 net/sunrpc/xprt.c rpc_sleep_on(&xprt->sending, task, NULL); task 299 net/sunrpc/xprt.c static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) task 303 net/sunrpc/xprt.c if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) task 306 net/sunrpc/xprt.c retval = xprt->ops->reserve_xprt(xprt, task); task 311 net/sunrpc/xprt.c static bool __xprt_lock_write_func(struct rpc_task *task, void *data) task 315 net/sunrpc/xprt.c xprt->snd_task = task; task 354 net/sunrpc/xprt.c void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) task 356 net/sunrpc/xprt.c if (xprt->snd_task == task) { task 371 net/sunrpc/xprt.c void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) task 373 net/sunrpc/xprt.c if (xprt->snd_task == task) { task 380 net/sunrpc/xprt.c static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) task 382 net/sunrpc/xprt.c if (xprt->snd_task != task) task 385 net/sunrpc/xprt.c xprt->ops->release_xprt(xprt, task); task 451 net/sunrpc/xprt.c void xprt_release_rqst_cong(struct rpc_task *task) task 453 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 495 net/sunrpc/xprt.c void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) task 497 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 606 net/sunrpc/xprt.c static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) task 614 net/sunrpc/xprt.c time_init = xprt_abs_ktime_to_jiffies(task->tk_start); task 615 net/sunrpc/xprt.c req->rq_timeout = task->tk_client->cl_timeout->to_initval; task 713 net/sunrpc/xprt.c xprt_request_retransmit_after_disconnect(struct rpc_task *task) task 715 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 780 net/sunrpc/xprt.c struct rpc_task *task, task 788 net/sunrpc/xprt.c if (xprt->snd_task != task) task 817 net/sunrpc/xprt.c void xprt_connect(struct rpc_task *task) task 819 net/sunrpc/xprt.c struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; task 821 net/sunrpc/xprt.c dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, task 825 net/sunrpc/xprt.c task->tk_status = -EAGAIN; task 828 net/sunrpc/xprt.c if (!xprt_lock_write(xprt, task)) task 835 net/sunrpc/xprt.c task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; task 836 net/sunrpc/xprt.c rpc_sleep_on_timeout(&xprt->pending, task, NULL, task 837 net/sunrpc/xprt.c xprt_request_timeout(task->tk_rqstp)); task 846 net/sunrpc/xprt.c xprt->ops->connect(xprt, task); task 849 net/sunrpc/xprt.c task->tk_status = 0; task 850 net/sunrpc/xprt.c rpc_wake_up_queued_task(&xprt->pending, task); task 853 net/sunrpc/xprt.c xprt_release_write(xprt, task); task 1025 net/sunrpc/xprt.c xprt_request_data_received(struct rpc_task *task) task 1027 net/sunrpc/xprt.c return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && task 1028 net/sunrpc/xprt.c READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0; task 1032 net/sunrpc/xprt.c xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) task 1034 net/sunrpc/xprt.c return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && task 1035 net/sunrpc/xprt.c READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0; task 1044 net/sunrpc/xprt.c xprt_request_enqueue_receive(struct rpc_task *task) task 1046 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1049 net/sunrpc/xprt.c if (!xprt_request_need_enqueue_receive(task, req)) task 1052 net/sunrpc/xprt.c xprt_request_prepare(task->tk_rqstp); task 1061 net/sunrpc/xprt.c set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); task 1075 net/sunrpc/xprt.c xprt_request_dequeue_receive_locked(struct rpc_task *task) task 1077 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1079 net/sunrpc/xprt.c if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) task 1089 net/sunrpc/xprt.c void xprt_update_rtt(struct rpc_task *task) task 1091 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1092 net/sunrpc/xprt.c struct rpc_rtt *rtt = task->tk_client->cl_rtt; task 1093 net/sunrpc/xprt.c unsigned int timer = task->tk_msg.rpc_proc->p_timer; task 1111 net/sunrpc/xprt.c void xprt_complete_rqst(struct rpc_task *task, int copied) task 1113 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1117 net/sunrpc/xprt.c task->tk_pid, ntohl(req->rq_xid), copied); task 1127 net/sunrpc/xprt.c xprt_request_dequeue_receive_locked(task); task 1128 net/sunrpc/xprt.c rpc_wake_up_queued_task(&xprt->pending, task); task 1132 net/sunrpc/xprt.c static void xprt_timer(struct rpc_task *task) task 1134 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1137 net/sunrpc/xprt.c if (task->tk_status != -ETIMEDOUT) task 1140 net/sunrpc/xprt.c trace_xprt_timer(xprt, req->rq_xid, task->tk_status); task 1143 net/sunrpc/xprt.c xprt->ops->timer(xprt, task); task 1145 net/sunrpc/xprt.c task->tk_status = 0; task 1157 net/sunrpc/xprt.c void xprt_wait_for_reply_request_def(struct rpc_task *task) task 1159 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1161 net/sunrpc/xprt.c rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, task 1173 net/sunrpc/xprt.c void xprt_wait_for_reply_request_rtt(struct rpc_task *task) task 1175 net/sunrpc/xprt.c int timer = task->tk_msg.rpc_proc->p_timer; task 1176 net/sunrpc/xprt.c struct rpc_clnt *clnt = task->tk_client; task 1178 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1186 net/sunrpc/xprt.c rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, task 1196 net/sunrpc/xprt.c void xprt_request_wait_receive(struct rpc_task *task) task 1198 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1201 net/sunrpc/xprt.c if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) task 1209 net/sunrpc/xprt.c if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { task 1210 net/sunrpc/xprt.c xprt->ops->wait_for_reply_request(task); task 1216 net/sunrpc/xprt.c if (xprt_request_retransmit_after_disconnect(task)) task 1218 net/sunrpc/xprt.c task, -ENOTCONN); task 1224 net/sunrpc/xprt.c xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req) task 1226 net/sunrpc/xprt.c return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); task 1236 net/sunrpc/xprt.c xprt_request_enqueue_transmit(struct rpc_task *task) task 1238 net/sunrpc/xprt.c struct rpc_rqst *pos, *req = task->tk_rqstp; task 1241 net/sunrpc/xprt.c if (xprt_request_need_enqueue_transmit(task, req)) { task 1256 net/sunrpc/xprt.c trace_xprt_enq_xmit(task, 1); task 1259 net/sunrpc/xprt.c } else if (RPC_IS_SWAPPER(task)) { task 1268 net/sunrpc/xprt.c trace_xprt_enq_xmit(task, 2); task 1273 net/sunrpc/xprt.c if (pos->rq_task->tk_owner != task->tk_owner) task 1277 net/sunrpc/xprt.c trace_xprt_enq_xmit(task, 3); task 1283 net/sunrpc/xprt.c trace_xprt_enq_xmit(task, 4); task 1285 net/sunrpc/xprt.c set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); task 1298 net/sunrpc/xprt.c xprt_request_dequeue_transmit_locked(struct rpc_task *task) task 1300 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1302 net/sunrpc/xprt.c if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) task 1323 net/sunrpc/xprt.c xprt_request_dequeue_transmit(struct rpc_task *task) task 1325 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1329 net/sunrpc/xprt.c xprt_request_dequeue_transmit_locked(task); task 1341 net/sunrpc/xprt.c xprt_request_dequeue_xprt(struct rpc_task *task) task 1343 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1346 net/sunrpc/xprt.c if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || task 1347 net/sunrpc/xprt.c test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || task 1350 net/sunrpc/xprt.c xprt_request_dequeue_transmit_locked(task); task 1351 net/sunrpc/xprt.c xprt_request_dequeue_receive_locked(task); task 1353 net/sunrpc/xprt.c set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); task 1357 net/sunrpc/xprt.c clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); task 1386 net/sunrpc/xprt.c xprt_request_need_retransmit(struct rpc_task *task) task 1388 net/sunrpc/xprt.c return xprt_request_retransmit_after_disconnect(task); task 1396 net/sunrpc/xprt.c bool xprt_prepare_transmit(struct rpc_task *task) task 1398 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1401 net/sunrpc/xprt.c dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); task 1403 net/sunrpc/xprt.c if (!xprt_lock_write(xprt, task)) { task 1405 net/sunrpc/xprt.c if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) task 1407 net/sunrpc/xprt.c task, 0); task 1414 net/sunrpc/xprt.c void xprt_end_transmit(struct rpc_task *task) task 1416 net/sunrpc/xprt.c xprt_release_write(task->tk_rqstp->rq_xprt, task); task 1433 net/sunrpc/xprt.c struct rpc_task *task = req->rq_task; task 1435 net/sunrpc/xprt.c int is_retrans = RPC_WAS_SENT(task); task 1439 net/sunrpc/xprt.c if (xprt_request_data_received(task)) { task 1444 net/sunrpc/xprt.c if (rpcauth_xmit_need_reencode(task)) { task 1448 net/sunrpc/xprt.c if (RPC_SIGNALLED(task)) { task 1470 net/sunrpc/xprt.c task->tk_client->cl_stats->rpcretrans++; task 1474 net/sunrpc/xprt.c task->tk_flags |= RPC_TASK_SENT; task 1487 net/sunrpc/xprt.c xprt_request_dequeue_transmit(task); task 1488 net/sunrpc/xprt.c rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); task 1502 net/sunrpc/xprt.c xprt_transmit(struct rpc_task *task) task 1504 net/sunrpc/xprt.c struct rpc_rqst *next, *req = task->tk_rqstp; task 1514 net/sunrpc/xprt.c status = xprt_request_transmit(next, task); task 1521 net/sunrpc/xprt.c if (!xprt_request_data_received(task) || task 1522 net/sunrpc/xprt.c test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) task 1524 net/sunrpc/xprt.c } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) task 1525 net/sunrpc/xprt.c task->tk_status = status; task 1531 net/sunrpc/xprt.c static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) task 1534 net/sunrpc/xprt.c rpc_sleep_on(&xprt->backlog, task, NULL); task 1543 net/sunrpc/xprt.c static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) task 1551 net/sunrpc/xprt.c rpc_sleep_on(&xprt->backlog, task, NULL); task 1587 net/sunrpc/xprt.c void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) task 1604 net/sunrpc/xprt.c task->tk_status = -ENOMEM; task 1607 net/sunrpc/xprt.c xprt_add_backlog(xprt, task); task 1611 net/sunrpc/xprt.c task->tk_status = -EAGAIN; task 1620 net/sunrpc/xprt.c task->tk_status = 0; task 1621 net/sunrpc/xprt.c task->tk_rqstp = req; task 1715 net/sunrpc/xprt.c xprt_request_init(struct rpc_task *task) task 1717 net/sunrpc/xprt.c struct rpc_xprt *xprt = task->tk_xprt; task 1718 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1720 net/sunrpc/xprt.c req->rq_task = task; task 1732 net/sunrpc/xprt.c xprt_init_majortimeo(task, req); task 1733 net/sunrpc/xprt.c dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, task 1738 net/sunrpc/xprt.c xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) task 1740 net/sunrpc/xprt.c xprt->ops->alloc_slot(xprt, task); task 1741 net/sunrpc/xprt.c if (task->tk_rqstp != NULL) task 1742 net/sunrpc/xprt.c xprt_request_init(task); task 1753 net/sunrpc/xprt.c void xprt_reserve(struct rpc_task *task) task 1755 net/sunrpc/xprt.c struct rpc_xprt *xprt = task->tk_xprt; task 1757 net/sunrpc/xprt.c task->tk_status = 0; task 1758 net/sunrpc/xprt.c if (task->tk_rqstp != NULL) task 1761 net/sunrpc/xprt.c task->tk_status = -EAGAIN; task 1762 net/sunrpc/xprt.c if (!xprt_throttle_congested(xprt, task)) task 1763 net/sunrpc/xprt.c xprt_do_reserve(xprt, task); task 1775 net/sunrpc/xprt.c void xprt_retry_reserve(struct rpc_task *task) task 1777 net/sunrpc/xprt.c struct rpc_xprt *xprt = task->tk_xprt; task 1779 net/sunrpc/xprt.c task->tk_status = 0; task 1780 net/sunrpc/xprt.c if (task->tk_rqstp != NULL) task 1783 net/sunrpc/xprt.c task->tk_status = -EAGAIN; task 1784 net/sunrpc/xprt.c xprt_do_reserve(xprt, task); task 1792 net/sunrpc/xprt.c void xprt_release(struct rpc_task *task) task 1795 net/sunrpc/xprt.c struct rpc_rqst *req = task->tk_rqstp; task 1798 net/sunrpc/xprt.c if (task->tk_client) { task 1799 net/sunrpc/xprt.c xprt = task->tk_xprt; task 1800 net/sunrpc/xprt.c xprt_release_write(xprt, task); task 1806 net/sunrpc/xprt.c xprt_request_dequeue_xprt(task); task 1808 net/sunrpc/xprt.c xprt->ops->release_xprt(xprt, task); task 1810 net/sunrpc/xprt.c xprt->ops->release_request(task); task 1814 net/sunrpc/xprt.c xprt->ops->buf_free(task); task 1820 net/sunrpc/xprt.c task->tk_rqstp = NULL; task 1824 net/sunrpc/xprt.c dprintk("RPC: %5u release request %p\n", task->tk_pid, req); task 1833 net/sunrpc/xprt.c xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) task 1837 net/sunrpc/xprt.c task->tk_rqstp = req; task 1838 net/sunrpc/xprt.c req->rq_task = task; task 137 net/sunrpc/xprtrdma/svc_rdma_backchannel.c xprt_rdma_bc_allocate(struct rpc_task *task) task 139 net/sunrpc/xprtrdma/svc_rdma_backchannel.c struct rpc_rqst *rqst = task->tk_rqstp; task 163 net/sunrpc/xprtrdma/svc_rdma_backchannel.c xprt_rdma_bc_free(struct rpc_task *task) task 165 net/sunrpc/xprtrdma/svc_rdma_backchannel.c struct rpc_rqst *rqst = task->tk_rqstp; task 487 net/sunrpc/xprtrdma/transport.c xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task) task 537 net/sunrpc/xprtrdma/transport.c xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) task 563 net/sunrpc/xprtrdma/transport.c xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) task 571 net/sunrpc/xprtrdma/transport.c task->tk_rqstp = &req->rl_slot; task 572 net/sunrpc/xprtrdma/transport.c task->tk_status = 0; task 577 net/sunrpc/xprtrdma/transport.c rpc_sleep_on(&xprt->backlog, task, NULL); task 578 net/sunrpc/xprtrdma/transport.c task->tk_status = -EAGAIN; task 621 net/sunrpc/xprtrdma/transport.c xprt_rdma_allocate(struct rpc_task *task) task 623 net/sunrpc/xprtrdma/transport.c struct rpc_rqst *rqst = task->tk_rqstp; task 629 net/sunrpc/xprtrdma/transport.c if (RPC_IS_SWAPPER(task)) task 641 net/sunrpc/xprtrdma/transport.c trace_xprtrdma_op_allocate(task, req); task 645 net/sunrpc/xprtrdma/transport.c trace_xprtrdma_op_allocate(task, NULL); task 656 net/sunrpc/xprtrdma/transport.c xprt_rdma_free(struct rpc_task *task) task 658 net/sunrpc/xprtrdma/transport.c struct rpc_rqst *rqst = task->tk_rqstp; task 662 net/sunrpc/xprtrdma/transport.c trace_xprtrdma_op_free(task, req); task 1372 net/sunrpc/xprtsock.c struct rpc_task *task; task 1397 net/sunrpc/xprtsock.c task = rovr->rq_task; task 1411 net/sunrpc/xprtsock.c xprt_adjust_cwnd(xprt, task, copied); task 1414 net/sunrpc/xprtsock.c xprt_complete_rqst(task, copied); task 1686 net/sunrpc/xprtsock.c static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) task 1689 net/sunrpc/xprtsock.c xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); task 1832 net/sunrpc/xprtsock.c static void xs_local_rpcbind(struct rpc_task *task) task 1834 net/sunrpc/xprtsock.c xprt_set_bound(task->tk_xprt); task 2026 net/sunrpc/xprtsock.c static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) task 2031 net/sunrpc/xprtsock.c if (RPC_IS_ASYNC(task)) { task 2041 net/sunrpc/xprtsock.c task->tk_rpc_status = -ENOTCONN; task 2042 net/sunrpc/xprtsock.c rpc_exit(task, -ENOTCONN); task 2046 net/sunrpc/xprtsock.c if (ret && !RPC_IS_SOFTCONN(task)) task 2440 net/sunrpc/xprtsock.c static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) task 2445 net/sunrpc/xprtsock.c WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); task 2602 net/sunrpc/xprtsock.c static int bc_malloc(struct rpc_task *task) task 2604 net/sunrpc/xprtsock.c struct rpc_rqst *rqst = task->tk_rqstp; task 2630 net/sunrpc/xprtsock.c static void bc_free(struct rpc_task *task) task 2632 net/sunrpc/xprtsock.c void *buffer = task->tk_rqstp->rq_buffer; task 269 samples/bpf/test_lru_dist.c static void do_test_lru_dist(int task, void *data) task 278 samples/bpf/test_lru_dist.c unsigned long long key_offset = task * dist_key_counts; task 300 samples/bpf/test_lru_dist.c task, pfect_lru.nr_unique, dist_key_counts, nr_misses, task 303 samples/bpf/test_lru_dist.c task, pfect_lru.nr_unique, pfect_lru.total, task 422 samples/bpf/test_lru_dist.c static void do_test_parallel_lru_loss(int task, void *data) task 434 samples/bpf/test_lru_dist.c stable_base = task * nr_repeats * 2 + 1; task 465 samples/bpf/test_lru_dist.c printf(" task:%d nr_losses:%u\n", task, nr_losses); task 35 samples/bpf/tracex2_user.c struct task t; task 39 samples/bpf/tracex2_user.c #define SIZE sizeof(struct task) task 41 samples/bpf/tracex2_user.c static void print_hist_for_pid(int fd, void *task) task 54 samples/bpf/tracex2_user.c if (memcmp(&next_key, task, SIZE)) { task 84 samples/bpf/tracex2_user.c static struct task tasks[1024]; task 74 security/apparmor/include/cred.h static inline struct aa_label *__aa_task_raw_label(struct task_struct *task) task 76 security/apparmor/include/cred.h return aa_cred_raw_label(__task_cred(task)); task 36 security/apparmor/include/resource.h int aa_task_setrlimit(struct aa_label *label, struct task_struct *task, task 13 security/apparmor/include/task.h static inline struct aa_task_ctx *task_ctx(struct task_struct *task) task 15 security/apparmor/include/task.h return task->security + apparmor_blob_sizes.lbs_task; task 36 security/apparmor/include/task.h struct aa_label *aa_get_task_label(struct task_struct *task); task 89 security/apparmor/lsm.c static void apparmor_task_free(struct task_struct *task) task 92 security/apparmor/lsm.c aa_free_task_ctx(task_ctx(task)); task 95 security/apparmor/lsm.c static int apparmor_task_alloc(struct task_struct *task, task 98 security/apparmor/lsm.c struct aa_task_ctx *new = task_ctx(task); task 575 security/apparmor/lsm.c static int apparmor_getprocattr(struct task_struct *task, char *name, task 580 security/apparmor/lsm.c const struct cred *cred = get_task_cred(task); task 716 security/apparmor/lsm.c static int apparmor_task_setrlimit(struct task_struct *task, task 723 security/apparmor/lsm.c error = aa_task_setrlimit(label, task, resource, new_rlim); task 104 security/apparmor/resource.c int aa_task_setrlimit(struct aa_label *label, struct task_struct *task, task 112 security/apparmor/resource.c peer = aa_get_newest_cred_label(__task_cred(task)); task 24 security/apparmor/task.c struct aa_label *aa_get_task_label(struct task_struct *task) task 29 security/apparmor/task.c p = aa_get_newest_label(__aa_task_raw_label(task)); task 51 security/device_cgroup.c static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) task 53 security/device_cgroup.c return css_to_devcgroup(task_css(task, devices_cgrp_id)); task 279 security/security.c static void __init lsm_early_task(struct task_struct *task); task 570 security/security.c static int lsm_task_alloc(struct task_struct *task) task 573 security/security.c task->security = NULL; task 577 security/security.c task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL); task 578 security/security.c if (task->security == NULL) task 631 security/security.c static void __init lsm_early_task(struct task_struct *task) task 633 security/security.c int rc = lsm_task_alloc(task); task 1504 security/security.c int security_task_alloc(struct task_struct *task, unsigned long clone_flags) task 1506 security/security.c int rc = lsm_task_alloc(task); task 1510 security/security.c rc = call_int_hook(task_alloc, 0, task, clone_flags); task 1512 security/security.c security_task_free(task); task 1516 security/security.c void security_task_free(struct task_struct *task) task 1518 security/security.c call_void_hook(task_free, task); task 1520 security/security.c kfree(task->security); task 1521 security/security.c task->security = NULL; task 231 security/selinux/hooks.c static inline u32 task_sid(const struct task_struct *task) task 236 security/selinux/hooks.c sid = cred_sid(__task_cred(task)); task 3883 security/selinux/hooks.c static int selinux_task_alloc(struct task_struct *task, task 328 security/smack/smack_lsm.c static void init_task_smack(struct task_smack *tsp, struct smack_known *task, task 331 security/smack/smack_lsm.c tsp->smk_task = task; task 941 security/tomoyo/common.c const struct task_struct *task = current; task 948 security/tomoyo/common.c (!uid_eq(task->cred->uid, GLOBAL_ROOT_UID) || task 949 security/tomoyo/common.c !uid_eq(task->cred->euid, GLOBAL_ROOT_UID))) task 492 security/tomoyo/common.h } task; task 1220 security/tomoyo/common.h static inline struct tomoyo_task *tomoyo_task(struct task_struct *task) task 1222 security/tomoyo/common.h return task->security + tomoyo_blob_sizes.lbs_task; task 25 security/tomoyo/securityfs_if.c return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname); task 63 security/tomoyo/securityfs_if.c r.param.task.domainname = &name; task 501 security/tomoyo/tomoyo.c static int tomoyo_task_alloc(struct task_struct *task, task 505 security/tomoyo/tomoyo.c struct tomoyo_task *new = tomoyo_task(task); task 518 security/tomoyo/tomoyo.c static void tomoyo_task_free(struct task_struct *task) task 520 security/tomoyo/tomoyo.c struct tomoyo_task *s = tomoyo_task(task); task 205 security/yama/yama_lsm.c static void yama_task_free(struct task_struct *task) task 207 security/yama/yama_lsm.c yama_ptracer_del(task, task); task 2431 sound/core/oss/pcm_oss.c static int snd_task_name(struct task_struct *task, char *name, size_t size) task 2435 sound/core/oss/pcm_oss.c if (snd_BUG_ON(!task || !name || size < 2)) task 2437 sound/core/oss/pcm_oss.c for (idx = 0; idx < sizeof(task->comm) && idx + 1 < size; idx++) task 2438 sound/core/oss/pcm_oss.c name[idx] = task->comm[idx]; task 59 sound/soc/fsl/fsl_esai.c struct tasklet_struct task; task 90 sound/soc/fsl/fsl_esai.c tasklet_schedule(&esai_priv->task); task 1037 sound/soc/fsl/fsl_esai.c tasklet_init(&esai_priv->task, fsl_esai_hw_reset, task 1056 sound/soc/fsl/fsl_esai.c tasklet_kill(&esai_priv->task); task 394 sound/soc/intel/atom/sst-mfld-dsp.h u8 task; task 202 sound/soc/intel/atom/sst-mfld-platform-pcm.c str_params->task = map[index].task_id; task 215 sound/soc/intel/atom/sst-mfld-platform-pcm.c str_params->task = map[index].task_id; task 75 sound/soc/intel/atom/sst/sst_stream.c sst_drv_ctx->streams[str_id].task_id = str_params->task; task 348 tools/include/uapi/linux/perf_event.h task : 1, /* trace fork/exit */ task 584 tools/perf/builtin-inject.c evsel->core.attr.task; task 1047 tools/perf/builtin-kvm.c attr->task = 0; task 704 tools/perf/builtin-report.c static struct task *tasks_list(struct task *task, struct machine *machine) task 706 tools/perf/builtin-report.c struct thread *parent_thread, *thread = task->thread; task 707 tools/perf/builtin-report.c struct task *parent_task; task 710 tools/perf/builtin-report.c if (!list_empty(&task->list)) task 715 tools/perf/builtin-report.c return task; task 722 tools/perf/builtin-report.c list_add_tail(&task->list, &parent_task->children); task 752 tools/perf/builtin-report.c static void task__print_level(struct task *task, FILE *fp, int level) task 754 tools/perf/builtin-report.c struct thread *thread = task->thread; task 755 tools/perf/builtin-report.c struct task *child; task 764 tools/perf/builtin-report.c if (!list_empty(&task->children)) { task 765 tools/perf/builtin-report.c list_for_each_entry(child, &task->children, list) task 774 tools/perf/builtin-report.c struct task *tasks, *task; task 797 tools/perf/builtin-report.c task = tasks + itask++; task 799 tools/perf/builtin-report.c task->thread = rb_entry(nd, struct thread, rb_node); task 800 tools/perf/builtin-report.c INIT_LIST_HEAD(&task->children); task 801 tools/perf/builtin-report.c INIT_LIST_HEAD(&task->list); task 802 tools/perf/builtin-report.c thread__set_priv(task->thread, task); task 812 tools/perf/builtin-report.c task = tasks + itask; task 814 tools/perf/builtin-report.c if (!list_empty(&task->list)) task 817 tools/perf/builtin-report.c task = tasks_list(task, machine); task 818 tools/perf/builtin-report.c if (IS_ERR(task)) { task 821 tools/perf/builtin-report.c return PTR_ERR(task); task 824 tools/perf/builtin-report.c if (task) task 825 tools/perf/builtin-report.c list_add_tail(&task->list, &list); task 830 tools/perf/builtin-report.c list_for_each_entry(task, &list, list) task 831 tools/perf/builtin-report.c task__print_level(task, fp, 0); task 355 tools/perf/builtin-sched.c get_new_event(struct task_desc *task, u64 timestamp) task 358 tools/perf/builtin-sched.c unsigned long idx = task->nr_events; task 364 tools/perf/builtin-sched.c task->nr_events++; task 365 tools/perf/builtin-sched.c size = sizeof(struct sched_atom *) * task->nr_events; task 366 tools/perf/builtin-sched.c task->atoms = realloc(task->atoms, size); task 367 tools/perf/builtin-sched.c BUG_ON(!task->atoms); task 369 tools/perf/builtin-sched.c task->atoms[idx] = event; task 374 tools/perf/builtin-sched.c static struct sched_atom *last_event(struct task_desc *task) task 376 tools/perf/builtin-sched.c if (!task->nr_events) task 379 tools/perf/builtin-sched.c return task->atoms[task->nr_events - 1]; task 382 tools/perf/builtin-sched.c static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, task 385 tools/perf/builtin-sched.c struct sched_atom *event, *curr_event = last_event(task); task 397 tools/perf/builtin-sched.c event = get_new_event(task, timestamp); task 405 tools/perf/builtin-sched.c static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, task 410 tools/perf/builtin-sched.c event = get_new_event(task, timestamp); task 432 tools/perf/builtin-sched.c static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, task 435 tools/perf/builtin-sched.c struct sched_atom *event = get_new_event(task, timestamp); task 445 tools/perf/builtin-sched.c struct task_desc *task; task 460 tools/perf/builtin-sched.c task = sched->pid_to_task[pid]; task 462 tools/perf/builtin-sched.c if (task) task 463 tools/perf/builtin-sched.c return task; task 465 tools/perf/builtin-sched.c task = zalloc(sizeof(*task)); task 466 tools/perf/builtin-sched.c task->pid = pid; task 467 tools/perf/builtin-sched.c task->nr = sched->nr_tasks; task 468 tools/perf/builtin-sched.c strcpy(task->comm, comm); task 473 tools/perf/builtin-sched.c add_sched_event_sleep(sched, task, 0, 0); task 475 tools/perf/builtin-sched.c sched->pid_to_task[pid] = task; task 479 tools/perf/builtin-sched.c sched->tasks[task->nr] = task; task 484 tools/perf/builtin-sched.c return task; task 490 tools/perf/builtin-sched.c struct task_desc *task; task 494 tools/perf/builtin-sched.c task = sched->tasks[i]; task 496 tools/perf/builtin-sched.c task->nr, task->comm, task->pid, task->nr_events); task 610 tools/perf/builtin-sched.c struct task_desc *task; task 618 tools/perf/builtin-sched.c struct task_desc *this_task = parms->task; task 661 tools/perf/builtin-sched.c struct task_desc *task; task 678 tools/perf/builtin-sched.c parms->task = task = sched->tasks[i]; task 681 tools/perf/builtin-sched.c sem_init(&task->sleep_sem, 0, 0); task 682 tools/perf/builtin-sched.c sem_init(&task->ready_for_work, 0, 0); task 683 tools/perf/builtin-sched.c sem_init(&task->work_done_sem, 0, 0); task 684 tools/perf/builtin-sched.c task->curr_event = 0; task 685 tools/perf/builtin-sched.c err = pthread_create(&task->thread, &attr, thread_func, parms); task 693 tools/perf/builtin-sched.c struct task_desc *task; task 701 tools/perf/builtin-sched.c task = sched->tasks[i]; task 702 tools/perf/builtin-sched.c ret = sem_wait(&task->ready_for_work); task 704 tools/perf/builtin-sched.c sem_init(&task->ready_for_work, 0, 0); task 714 tools/perf/builtin-sched.c task = sched->tasks[i]; task 715 tools/perf/builtin-sched.c ret = sem_wait(&task->work_done_sem); task 717 tools/perf/builtin-sched.c sem_init(&task->work_done_sem, 0, 0); task 718 tools/perf/builtin-sched.c sched->cpu_usage += task->cpu_usage; task 719 tools/perf/builtin-sched.c task->cpu_usage = 0; task 737 tools/perf/builtin-sched.c task = sched->tasks[i]; task 738 tools/perf/builtin-sched.c sem_init(&task->sleep_sem, 0, 0); task 739 tools/perf/builtin-sched.c task->curr_event = 0; task 119 tools/perf/tests/attr.c WRITE_ASS(task, "d"); task 93 tools/perf/tests/task-exit.c evsel->core.attr.task = 1; task 1077 tools/perf/util/evsel.c attr->task = track; task 116 tools/perf/util/perf_event_attr_fprintf.c PRINT_ATTRf(task, p_unsigned); task 746 tools/perf/util/python.c task = 0, task 761 tools/perf/util/python.c &enable_on_exec, &task, &watermark, task 789 tools/perf/util/python.c attr.task = task; task 181 tools/testing/selftests/bpf/progs/pyperf.h struct task_struct* task = (struct task_struct*)bpf_get_current_task(); task 182 tools/testing/selftests/bpf/progs/pyperf.h void* tls_base = (void*)task; task 443 tools/testing/selftests/bpf/progs/strobemeta.h static __always_inline void *read_strobe_meta(struct task_struct *task, task 462 tools/testing/selftests/bpf/progs/strobemeta.h tls_base = (void *)task; task 499 tools/testing/selftests/bpf/progs/strobemeta.h struct task_struct *task; task 513 tools/testing/selftests/bpf/progs/strobemeta.h task = (struct task_struct *)bpf_get_current_task(); task 514 tools/testing/selftests/bpf/progs/strobemeta.h sample_end = read_strobe_meta(task, &sample->metadata); task 23 tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c struct task_struct *task = (void *)bpf_get_current_task(); task 27 tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c if (BPF_CORE_READ(&pid, &task->pid) || task 28 tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c BPF_CORE_READ(&tgid, &task->tgid)) task 37 tools/testing/selftests/bpf/test_maps.c static void test_hashmap(unsigned int task, void *data) task 127 tools/testing/selftests/bpf/test_maps.c static void test_hashmap_sizes(unsigned int task, void *data) task 147 tools/testing/selftests/bpf/test_maps.c static void test_hashmap_percpu(unsigned int task, void *data) task 274 tools/testing/selftests/bpf/test_maps.c static void test_hashmap_walk(unsigned int task, void *data) task 345 tools/testing/selftests/bpf/test_maps.c static void test_arraymap(unsigned int task, void *data) task 400 tools/testing/selftests/bpf/test_maps.c static void test_arraymap_percpu(unsigned int task, void *data) task 496 tools/testing/selftests/bpf/test_maps.c static void test_devmap(unsigned int task, void *data) task 511 tools/testing/selftests/bpf/test_maps.c static void test_devmap_hash(unsigned int task, void *data) task 526 tools/testing/selftests/bpf/test_maps.c static void test_queuemap(unsigned int task, void *data) task 584 tools/testing/selftests/bpf/test_maps.c static void test_stackmap(unsigned int task, void *data) task 1279 tools/testing/selftests/bpf/test_maps.c void (*fn)(unsigned int task, void *data), task 90 tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c task(); task 2577 virt/kvm/kvm_main.c struct task_struct *task = NULL; task 2583 virt/kvm/kvm_main.c task = get_pid_task(pid, PIDTYPE_PID); task 2585 virt/kvm/kvm_main.c if (!task) task 2587 virt/kvm/kvm_main.c ret = yield_to(task, 1); task 2588 virt/kvm/kvm_main.c put_task_struct(task);